1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
55 #include "bnx2x_reg.h"
56 #include "bnx2x_fw_defs.h"
57 #include "bnx2x_hsi.h"
58 #include "bnx2x_link.h"
60 #include "bnx2x_init.h"
62 #define DRV_MODULE_VERSION "1.45.17"
63 #define DRV_MODULE_RELDATE "2008/08/13"
64 #define BNX2X_BC_VER 0x040200
66 /* Time in jiffies before concluding the transmitter is hung */
67 #define TX_TIMEOUT (5*HZ)
69 static char version[] __devinitdata =
70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73 MODULE_AUTHOR("Eliezer Tamir");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
78 static int disable_tpa;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
85 module_param(disable_tpa, int, 0);
86 module_param(use_inta, int, 0);
87 module_param(poll, int, 0);
88 module_param(debug, int, 0);
89 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
90 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91 MODULE_PARM_DESC(poll, "use polling (for debug)");
92 MODULE_PARM_DESC(debug, "default debug msglevel");
95 module_param(use_multi, int, 0);
96 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
99 enum bnx2x_board_type {
105 /* indexed by board_type, above */
108 } board_info[] __devinitdata = {
109 { "Broadcom NetXtreme II BCM57710 XGb" },
110 { "Broadcom NetXtreme II BCM57711 XGb" },
111 { "Broadcom NetXtreme II BCM57711E XGb" }
115 static const struct pci_device_id bnx2x_pci_tbl[] = {
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
125 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127 /****************************************************************************
128 * General service functions
129 ****************************************************************************/
132 * locking is done by mcp
134 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
139 PCICFG_VENDOR_ID_OFFSET);
142 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
154 static const u32 dmae_reg_go_c[] = {
155 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
156 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
157 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
158 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
161 /* copy command into DMAE command memory and set DMAE command go */
162 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
168 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
169 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
170 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175 REG_WR(bp, dmae_reg_go_c[idx], 1);
178 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
181 struct dmae_command *dmae = &bp->init_dmae;
182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
185 if (!bp->dmae_ready) {
186 u32 *data = bnx2x_sp(bp, wb_data[0]);
188 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
189 " using indirect\n", dst_addr, len32);
190 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
194 mutex_lock(&bp->dmae_mutex);
196 memset(dmae, 0, sizeof(struct dmae_command));
198 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
199 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
200 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 DMAE_CMD_ENDIANITY_DW_SWAP |
206 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
207 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
208 dmae->src_addr_lo = U64_LO(dma_addr);
209 dmae->src_addr_hi = U64_HI(dma_addr);
210 dmae->dst_addr_lo = dst_addr >> 2;
211 dmae->dst_addr_hi = 0;
213 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_val = DMAE_COMP_VAL;
217 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
218 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
219 "dst_addr [%x:%08x (%08x)]\n"
220 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
221 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
222 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
223 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
224 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
225 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
226 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
230 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
234 while (*wb_comp != DMAE_COMP_VAL) {
235 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
238 BNX2X_ERR("dmae timeout!\n");
242 /* adjust delay for emulation/FPGA */
243 if (CHIP_REV_IS_SLOW(bp))
249 mutex_unlock(&bp->dmae_mutex);
252 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
254 struct dmae_command *dmae = &bp->init_dmae;
255 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
258 if (!bp->dmae_ready) {
259 u32 *data = bnx2x_sp(bp, wb_data[0]);
262 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
263 " using indirect\n", src_addr, len32);
264 for (i = 0; i < len32; i++)
265 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
269 mutex_lock(&bp->dmae_mutex);
271 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
272 memset(dmae, 0, sizeof(struct dmae_command));
274 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
275 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
276 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280 DMAE_CMD_ENDIANITY_DW_SWAP |
282 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
283 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
284 dmae->src_addr_lo = src_addr >> 2;
285 dmae->src_addr_hi = 0;
286 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
287 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_val = DMAE_COMP_VAL;
293 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
294 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
295 "dst_addr [%x:%08x (%08x)]\n"
296 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
297 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
298 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
299 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
303 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
307 while (*wb_comp != DMAE_COMP_VAL) {
310 BNX2X_ERR("dmae timeout!\n");
314 /* adjust delay for emulation/FPGA */
315 if (CHIP_REV_IS_SLOW(bp))
320 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
321 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
322 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
324 mutex_unlock(&bp->dmae_mutex);
327 /* used only for slowpath so not inlined */
328 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332 wb_write[0] = val_hi;
333 wb_write[1] = val_lo;
334 REG_WR_DMAE(bp, reg, wb_write, 2);
338 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342 REG_RD_DMAE(bp, reg, wb_data, 2);
344 return HILO_U64(wb_data[0], wb_data[1]);
348 static int bnx2x_mc_assert(struct bnx2x *bp)
352 u32 row0, row1, row2, row3;
355 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
356 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360 /* print the asserts */
361 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
364 XSTORM_ASSERT_LIST_OFFSET(i));
365 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
367 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
369 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
373 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
374 " 0x%08x 0x%08x 0x%08x\n",
375 i, row3, row2, row1, row0);
383 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
384 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388 /* print the asserts */
389 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
392 TSTORM_ASSERT_LIST_OFFSET(i));
393 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
395 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
397 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
401 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
402 " 0x%08x 0x%08x 0x%08x\n",
403 i, row3, row2, row1, row0);
411 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
412 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416 /* print the asserts */
417 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
420 CSTORM_ASSERT_LIST_OFFSET(i));
421 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
423 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
425 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
429 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
430 " 0x%08x 0x%08x 0x%08x\n",
431 i, row3, row2, row1, row0);
439 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
440 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444 /* print the asserts */
445 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
448 USTORM_ASSERT_LIST_OFFSET(i));
449 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_OFFSET(i) + 4);
451 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i) + 8);
453 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
457 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
458 " 0x%08x 0x%08x 0x%08x\n",
459 i, row3, row2, row1, row0);
469 static void bnx2x_fw_dump(struct bnx2x *bp)
475 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
476 mark = ((mark + 0x3) & ~0x3);
477 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
479 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
480 for (word = 0; word < 8; word++)
481 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
484 printk(KERN_CONT "%s", (char *)data);
486 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
487 for (word = 0; word < 8; word++)
488 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
491 printk(KERN_CONT "%s", (char *)data);
493 printk("\n" KERN_ERR PFX "end of fw dump\n");
496 static void bnx2x_panic_dump(struct bnx2x *bp)
501 bp->stats_state = STATS_STATE_DISABLED;
502 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504 BNX2X_ERR("begin crash dump -----------------\n");
506 for_each_queue(bp, i) {
507 struct bnx2x_fastpath *fp = &bp->fp[i];
508 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
514 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
515 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
516 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
517 fp->rx_bd_prod, fp->rx_bd_cons,
518 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
519 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
520 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
521 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
522 " *sb_u_idx(%x) bd data(%x,%x)\n",
523 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
524 fp->status_blk->c_status_block.status_block_index,
526 fp->status_blk->u_status_block.status_block_index,
527 hw_prods->packets_prod, hw_prods->bds_prod);
529 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
530 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
531 for (j = start; j < end; j++) {
532 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
535 sw_bd->skb, sw_bd->first_bd);
538 start = TX_BD(fp->tx_bd_cons - 10);
539 end = TX_BD(fp->tx_bd_cons + 254);
540 for (j = start; j < end; j++) {
541 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
544 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
547 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
548 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
549 for (j = start; j < end; j++) {
550 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
551 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
554 j, rx_bd[1], rx_bd[0], sw_bd->skb);
557 start = RX_SGE(fp->rx_sge_prod);
558 end = RX_SGE(fp->last_max_sge);
559 for (j = start; j < end; j++) {
560 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
561 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
564 j, rx_sge[1], rx_sge[0], sw_page->page);
567 start = RCQ_BD(fp->rx_comp_cons - 10);
568 end = RCQ_BD(fp->rx_comp_cons + 503);
569 for (j = start; j < end; j++) {
570 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
573 j, cqe[0], cqe[1], cqe[2], cqe[3]);
577 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
578 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
579 " spq_prod_idx(%u)\n",
580 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
581 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
585 BNX2X_ERR("end crash dump -----------------\n");
588 static void bnx2x_int_enable(struct bnx2x *bp)
590 int port = BP_PORT(bp);
591 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592 u32 val = REG_RD(bp, addr);
593 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
596 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
601 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
602 HC_CONFIG_0_REG_INT_LINE_EN_0 |
603 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
606 val, port, addr, msix);
608 REG_WR(bp, addr, val);
610 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
613 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
614 val, port, addr, msix);
616 REG_WR(bp, addr, val);
618 if (CHIP_IS_E1H(bp)) {
619 /* init leading/trailing edge */
621 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623 /* enable nig attention */
628 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
633 static void bnx2x_int_disable(struct bnx2x *bp)
635 int port = BP_PORT(bp);
636 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637 u32 val = REG_RD(bp, addr);
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641 HC_CONFIG_0_REG_INT_LINE_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
647 REG_WR(bp, addr, val);
648 if (REG_RD(bp, addr) != val)
649 BNX2X_ERR("BUG! proper val not read from IGU!\n");
652 static void bnx2x_int_disable_sync(struct bnx2x *bp)
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
657 /* disable interrupt handling */
658 atomic_inc(&bp->intr_sem);
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
662 /* make sure all ISRs are done */
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
670 synchronize_irq(bp->pdev->irq);
672 /* make sure sp_task is not running */
673 cancel_work_sync(&bp->sp_task);
679 * General service functions
682 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
683 u8 storm, u16 index, u8 op, u8 update)
685 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
686 COMMAND_REG_INT_ACK);
687 struct igu_ack_register igu_ack;
689 igu_ack.status_block_index = index;
690 igu_ack.sb_id_and_flags =
691 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
692 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
693 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
694 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
697 (*(u32 *)&igu_ack), hc_addr);
698 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
701 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 struct host_status_block *fpsb = fp->status_blk;
706 barrier(); /* status block is written to by the chip */
707 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
708 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
711 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
712 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
718 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
721 COMMAND_REG_SIMD_MASK);
722 u32 result = REG_RD(bp, hc_addr);
724 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
732 * fast path service functions
735 /* free skb in the packet ring at pos idx
736 * return idx of last bd freed
738 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
741 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
742 struct eth_tx_bd *tx_bd;
743 struct sk_buff *skb = tx_buf->skb;
744 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
747 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
751 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
752 tx_bd = &fp->tx_desc_ring[bd_idx];
753 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
754 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756 nbd = le16_to_cpu(tx_bd->nbd) - 1;
757 new_cons = nbd + tx_buf->first_bd;
758 #ifdef BNX2X_STOP_ON_ERROR
759 if (nbd > (MAX_SKB_FRAGS + 2)) {
760 BNX2X_ERR("BAD nbd!\n");
765 /* Skip a parse bd and the TSO split header bd
766 since they have no mapping */
768 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
771 ETH_TX_BD_FLAGS_TCP_CSUM |
772 ETH_TX_BD_FLAGS_SW_LSO)) {
774 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
775 tx_bd = &fp->tx_desc_ring[bd_idx];
776 /* is this a TSO split header bd? */
777 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
786 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
787 tx_bd = &fp->tx_desc_ring[bd_idx];
788 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
789 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797 tx_buf->first_bd = 0;
803 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
809 barrier(); /* Tell compiler that prod and cons can change */
810 prod = fp->tx_bd_prod;
811 cons = fp->tx_bd_cons;
813 /* NUM_TX_RINGS = number of "next-page" entries
814 It will be used as a threshold */
815 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817 #ifdef BNX2X_STOP_ON_ERROR
819 WARN_ON(used > fp->bp->tx_ring_size);
820 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
823 return (s16)(fp->bp->tx_ring_size) - used;
826 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 struct bnx2x *bp = fp->bp;
829 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
832 #ifdef BNX2X_STOP_ON_ERROR
833 if (unlikely(bp->panic))
837 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
838 sw_cons = fp->tx_pkt_cons;
840 while (sw_cons != hw_cons) {
843 pkt_cons = TX_BD(sw_cons);
845 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
848 hw_cons, sw_cons, pkt_cons);
850 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
855 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
863 fp->tx_pkt_cons = sw_cons;
864 fp->tx_bd_cons = bd_cons;
866 /* Need to make the tx_cons update visible to start_xmit()
867 * before checking for netif_queue_stopped(). Without the
868 * memory barrier, there is a small possibility that start_xmit()
869 * will miss it and cause the queue to be stopped forever.
873 /* TBD need a thresh? */
874 if (unlikely(netif_queue_stopped(bp->dev))) {
876 netif_tx_lock(bp->dev);
878 if (netif_queue_stopped(bp->dev) &&
879 (bp->state == BNX2X_STATE_OPEN) &&
880 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
881 netif_wake_queue(bp->dev);
883 netif_tx_unlock(bp->dev);
888 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
889 union eth_rx_cqe *rr_cqe)
891 struct bnx2x *bp = fp->bp;
892 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
893 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
896 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
897 FP_IDX(fp), cid, command, bp->state,
898 rr_cqe->ramrod_cqe.ramrod_type);
903 switch (command | fp->state) {
904 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
905 BNX2X_FP_STATE_OPENING):
906 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908 fp->state = BNX2X_FP_STATE_OPEN;
911 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
912 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914 fp->state = BNX2X_FP_STATE_HALTED;
918 BNX2X_ERR("unexpected MC reply (%d) "
919 "fp->state is %x\n", command, fp->state);
922 mb(); /* force bnx2x_wait_ramrod() to see the change */
926 switch (command | bp->state) {
927 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
928 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
929 bp->state = BNX2X_STATE_OPEN;
932 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
933 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
934 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
935 fp->state = BNX2X_FP_STATE_HALTED;
938 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
939 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
940 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
944 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
946 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
947 bp->set_mac_pending = 0;
950 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
951 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
955 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
959 mb(); /* force bnx2x_wait_ramrod() to see the change */
962 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
963 struct bnx2x_fastpath *fp, u16 index)
965 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
966 struct page *page = sw_buf->page;
967 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969 /* Skip "next page" elements */
973 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
974 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
975 __free_pages(page, PAGES_PER_SGE_SHIFT);
982 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
983 struct bnx2x_fastpath *fp, int last)
987 for (i = 0; i < last; i++)
988 bnx2x_free_rx_sge(bp, fp, i);
991 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
992 struct bnx2x_fastpath *fp, u16 index)
994 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
999 if (unlikely(page == NULL))
1002 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1003 PCI_DMA_FROMDEVICE);
1004 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1009 sw_buf->page = page;
1010 pci_unmap_addr_set(sw_buf, mapping, mapping);
1012 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1013 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1018 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1019 struct bnx2x_fastpath *fp, u16 index)
1021 struct sk_buff *skb;
1022 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1023 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1026 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1027 if (unlikely(skb == NULL))
1030 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1031 PCI_DMA_FROMDEVICE);
1032 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1038 pci_unmap_addr_set(rx_buf, mapping, mapping);
1040 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1041 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1046 /* note that we are not allocating a new skb,
1047 * we are just moving one from cons to prod
1048 * we are not creating a new mapping,
1049 * so there is no need to check for dma_mapping_error().
1051 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1052 struct sk_buff *skb, u16 cons, u16 prod)
1054 struct bnx2x *bp = fp->bp;
1055 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1056 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1057 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1058 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060 pci_dma_sync_single_for_device(bp->pdev,
1061 pci_unmap_addr(cons_rx_buf, mapping),
1062 bp->rx_offset + RX_COPY_THRESH,
1063 PCI_DMA_FROMDEVICE);
1065 prod_rx_buf->skb = cons_rx_buf->skb;
1066 pci_unmap_addr_set(prod_rx_buf, mapping,
1067 pci_unmap_addr(cons_rx_buf, mapping));
1068 *prod_bd = *cons_bd;
1071 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1074 u16 last_max = fp->last_max_sge;
1076 if (SUB_S16(idx, last_max) > 0)
1077 fp->last_max_sge = idx;
1080 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1084 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1085 int idx = RX_SGE_CNT * i - 1;
1087 for (j = 0; j < 2; j++) {
1088 SGE_MASK_CLEAR_BIT(fp, idx);
1094 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1095 struct eth_fast_path_rx_cqe *fp_cqe)
1097 struct bnx2x *bp = fp->bp;
1098 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1099 le16_to_cpu(fp_cqe->len_on_bd)) >>
1101 u16 last_max, last_elem, first_elem;
1108 /* First mark all used pages */
1109 for (i = 0; i < sge_len; i++)
1110 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1113 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115 /* Here we assume that the last SGE index is the biggest */
1116 prefetch((void *)(fp->sge_mask));
1117 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119 last_max = RX_SGE(fp->last_max_sge);
1120 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1121 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123 /* If ring is not full */
1124 if (last_elem + 1 != first_elem)
1127 /* Now update the prod */
1128 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1129 if (likely(fp->sge_mask[i]))
1132 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1133 delta += RX_SGE_MASK_ELEM_SZ;
1137 fp->rx_sge_prod += delta;
1138 /* clear page-end entries */
1139 bnx2x_clear_sge_mask_next_elems(fp);
1142 DP(NETIF_MSG_RX_STATUS,
1143 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1144 fp->last_max_sge, fp->rx_sge_prod);
1147 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1150 memset(fp->sge_mask, 0xff,
1151 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153 /* Clear the two last indices in the page to 1:
1154 these are the indices that correspond to the "next" element,
1155 hence will never be indicated and should be removed from
1156 the calculations. */
1157 bnx2x_clear_sge_mask_next_elems(fp);
1160 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1161 struct sk_buff *skb, u16 cons, u16 prod)
1163 struct bnx2x *bp = fp->bp;
1164 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1165 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1166 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1169 /* move empty skb from pool to prod and map it */
1170 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1171 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1172 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1173 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175 /* move partial skb from cons to pool (don't unmap yet) */
1176 fp->tpa_pool[queue] = *cons_rx_buf;
1178 /* mark bin state as start - print error if current state != stop */
1179 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1180 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182 fp->tpa_state[queue] = BNX2X_TPA_START;
1184 /* point prod_bd to new skb */
1185 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1186 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188 #ifdef BNX2X_STOP_ON_ERROR
1189 fp->tpa_queue_used |= (1 << queue);
1190 #ifdef __powerpc64__
1191 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195 fp->tpa_queue_used);
1199 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1200 struct sk_buff *skb,
1201 struct eth_fast_path_rx_cqe *fp_cqe,
1204 struct sw_rx_page *rx_pg, old_rx_pg;
1206 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1207 u32 i, frag_len, frag_size, pages;
1211 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1212 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1214 /* This is needed in order to enable forwarding support */
1216 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1217 max(frag_size, (u32)len_on_bd));
1219 #ifdef BNX2X_STOP_ON_ERROR
1220 if (pages > 8*PAGES_PER_SGE) {
1221 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1224 fp_cqe->pkt_len, len_on_bd);
1230 /* Run through the SGL and compose the fragmented skb */
1231 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1232 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234 /* FW gives the indices of the SGE as if the ring is an array
1235 (meaning that "next" element will consume 2 indices) */
1236 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1237 rx_pg = &fp->rx_page_ring[sge_idx];
1241 /* If we fail to allocate a substitute page, we simply stop
1242 where we are and drop the whole packet */
1243 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1244 if (unlikely(err)) {
1245 bp->eth_stats.rx_skb_alloc_failed++;
1249 /* Unmap the page as we r going to pass it to the stack */
1250 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1251 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253 /* Add one frag and update the appropriate fields in the skb */
1254 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256 skb->data_len += frag_len;
1257 skb->truesize += frag_len;
1258 skb->len += frag_len;
1260 frag_size -= frag_len;
1266 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1270 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1271 struct sk_buff *skb = rx_buf->skb;
1273 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275 /* Unmap skb in the pool anyway, as we are going to change
1276 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1278 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1281 if (likely(new_skb)) {
1282 /* fix ip xsum and give it to the stack */
1283 /* (no need to map the new skb) */
1286 prefetch(((char *)(skb)) + 128);
1288 #ifdef BNX2X_STOP_ON_ERROR
1289 if (pad + len > bp->rx_buf_size) {
1290 BNX2X_ERR("skb_put is about to fail... "
1291 "pad %d len %d rx_buf_size %d\n",
1292 pad, len, bp->rx_buf_size);
1298 skb_reserve(skb, pad);
1301 skb->protocol = eth_type_trans(skb, bp->dev);
1302 skb->ip_summed = CHECKSUM_UNNECESSARY;
1307 iph = (struct iphdr *)skb->data;
1309 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1312 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1313 &cqe->fast_path_cqe, cqe_idx)) {
1315 if ((bp->vlgrp != NULL) &&
1316 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1317 PARSING_FLAGS_VLAN))
1318 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1319 le16_to_cpu(cqe->fast_path_cqe.
1323 netif_receive_skb(skb);
1325 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1326 " - dropping packet!\n");
1330 bp->dev->last_rx = jiffies;
1332 /* put new skb in bin */
1333 fp->tpa_pool[queue].skb = new_skb;
1336 /* else drop the packet and keep the buffer in the bin */
1337 DP(NETIF_MSG_RX_STATUS,
1338 "Failed to allocate new skb - dropping packet!\n");
1339 bp->eth_stats.rx_skb_alloc_failed++;
1342 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1345 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1346 struct bnx2x_fastpath *fp,
1347 u16 bd_prod, u16 rx_comp_prod,
1350 struct tstorm_eth_rx_producers rx_prods = {0};
1353 /* Update producers */
1354 rx_prods.bd_prod = bd_prod;
1355 rx_prods.cqe_prod = rx_comp_prod;
1356 rx_prods.sge_prod = rx_sge_prod;
1358 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1359 REG_WR(bp, BAR_TSTRORM_INTMEM +
1360 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1361 ((u32 *)&rx_prods)[i]);
1363 DP(NETIF_MSG_RX_STATUS,
1364 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1365 bd_prod, rx_comp_prod, rx_sge_prod);
1368 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1370 struct bnx2x *bp = fp->bp;
1371 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1372 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1375 #ifdef BNX2X_STOP_ON_ERROR
1376 if (unlikely(bp->panic))
1380 /* CQ "next element" is of the size of the regular element,
1381 that's why it's ok here */
1382 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1386 bd_cons = fp->rx_bd_cons;
1387 bd_prod = fp->rx_bd_prod;
1388 bd_prod_fw = bd_prod;
1389 sw_comp_cons = fp->rx_comp_cons;
1390 sw_comp_prod = fp->rx_comp_prod;
1392 /* Memory barrier necessary as speculative reads of the rx
1393 * buffer can be ahead of the index in the status block
1397 DP(NETIF_MSG_RX_STATUS,
1398 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1399 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1401 while (sw_comp_cons != hw_comp_cons) {
1402 struct sw_rx_bd *rx_buf = NULL;
1403 struct sk_buff *skb;
1404 union eth_rx_cqe *cqe;
1408 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409 bd_prod = RX_BD(bd_prod);
1410 bd_cons = RX_BD(bd_cons);
1412 cqe = &fp->rx_comp_ring[comp_ring_cons];
1413 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1415 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1416 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1417 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1418 cqe->fast_path_cqe.rss_hash_result,
1419 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1422 /* is this a slowpath msg? */
1423 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1424 bnx2x_sp_event(fp, cqe);
1427 /* this is an rx packet */
1429 rx_buf = &fp->rx_buf_ring[bd_cons];
1431 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432 pad = cqe->fast_path_cqe.placement_offset;
1434 /* If CQE is marked both TPA_START and TPA_END
1435 it is a non-TPA CQE */
1436 if ((!fp->disable_tpa) &&
1437 (TPA_TYPE(cqe_fp_flags) !=
1438 (TPA_TYPE_START | TPA_TYPE_END))) {
1439 u16 queue = cqe->fast_path_cqe.queue_index;
1441 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442 DP(NETIF_MSG_RX_STATUS,
1443 "calling tpa_start on queue %d\n",
1446 bnx2x_tpa_start(fp, queue, skb,
1451 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452 DP(NETIF_MSG_RX_STATUS,
1453 "calling tpa_stop on queue %d\n",
1456 if (!BNX2X_RX_SUM_FIX(cqe))
1457 BNX2X_ERR("STOP on none TCP "
1460 /* This is a size of the linear data
1462 len = le16_to_cpu(cqe->fast_path_cqe.
1464 bnx2x_tpa_stop(bp, fp, queue, pad,
1465 len, cqe, comp_ring_cons);
1466 #ifdef BNX2X_STOP_ON_ERROR
1471 bnx2x_update_sge_prod(fp,
1472 &cqe->fast_path_cqe);
1477 pci_dma_sync_single_for_device(bp->pdev,
1478 pci_unmap_addr(rx_buf, mapping),
1479 pad + RX_COPY_THRESH,
1480 PCI_DMA_FROMDEVICE);
1482 prefetch(((char *)(skb)) + 128);
1484 /* is this an error packet? */
1485 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1486 DP(NETIF_MSG_RX_ERR,
1487 "ERROR flags %x rx packet %u\n",
1488 cqe_fp_flags, sw_comp_cons);
1489 bp->eth_stats.rx_err_discard_pkt++;
1493 /* Since we don't have a jumbo ring
1494 * copy small packets if mtu > 1500
1496 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497 (len <= RX_COPY_THRESH)) {
1498 struct sk_buff *new_skb;
1500 new_skb = netdev_alloc_skb(bp->dev,
1502 if (new_skb == NULL) {
1503 DP(NETIF_MSG_RX_ERR,
1504 "ERROR packet dropped "
1505 "because of alloc failure\n");
1506 bp->eth_stats.rx_skb_alloc_failed++;
1511 skb_copy_from_linear_data_offset(skb, pad,
1512 new_skb->data + pad, len);
1513 skb_reserve(new_skb, pad);
1514 skb_put(new_skb, len);
1516 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1520 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521 pci_unmap_single(bp->pdev,
1522 pci_unmap_addr(rx_buf, mapping),
1523 bp->rx_buf_use_size,
1524 PCI_DMA_FROMDEVICE);
1525 skb_reserve(skb, pad);
1529 DP(NETIF_MSG_RX_ERR,
1530 "ERROR packet dropped because "
1531 "of alloc failure\n");
1532 bp->eth_stats.rx_skb_alloc_failed++;
1534 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1538 skb->protocol = eth_type_trans(skb, bp->dev);
1540 skb->ip_summed = CHECKSUM_NONE;
1542 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543 skb->ip_summed = CHECKSUM_UNNECESSARY;
1545 bp->eth_stats.hw_csum_err++;
1550 if ((bp->vlgrp != NULL) &&
1551 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552 PARSING_FLAGS_VLAN))
1553 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1557 netif_receive_skb(skb);
1559 bp->dev->last_rx = jiffies;
1564 bd_cons = NEXT_RX_IDX(bd_cons);
1565 bd_prod = NEXT_RX_IDX(bd_prod);
1566 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1569 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1570 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1572 if (rx_pkt == budget)
1576 fp->rx_bd_cons = bd_cons;
1577 fp->rx_bd_prod = bd_prod_fw;
1578 fp->rx_comp_cons = sw_comp_cons;
1579 fp->rx_comp_prod = sw_comp_prod;
1581 /* Update producers */
1582 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1584 mmiowb(); /* keep prod updates ordered */
1586 fp->rx_pkt += rx_pkt;
1592 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1594 struct bnx2x_fastpath *fp = fp_cookie;
1595 struct bnx2x *bp = fp->bp;
1596 struct net_device *dev = bp->dev;
1597 int index = FP_IDX(fp);
1599 /* Return here if interrupt is disabled */
1600 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1605 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1606 index, FP_SB_ID(fp));
1607 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1609 #ifdef BNX2X_STOP_ON_ERROR
1610 if (unlikely(bp->panic))
1614 prefetch(fp->rx_cons_sb);
1615 prefetch(fp->tx_cons_sb);
1616 prefetch(&fp->status_blk->c_status_block.status_block_index);
1617 prefetch(&fp->status_blk->u_status_block.status_block_index);
1619 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1624 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1626 struct net_device *dev = dev_instance;
1627 struct bnx2x *bp = netdev_priv(dev);
1628 u16 status = bnx2x_ack_int(bp);
1631 /* Return here if interrupt is shared and it's not for us */
1632 if (unlikely(status == 0)) {
1633 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1636 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1638 /* Return here if interrupt is disabled */
1639 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1640 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1644 #ifdef BNX2X_STOP_ON_ERROR
1645 if (unlikely(bp->panic))
1649 mask = 0x2 << bp->fp[0].sb_id;
1650 if (status & mask) {
1651 struct bnx2x_fastpath *fp = &bp->fp[0];
1653 prefetch(fp->rx_cons_sb);
1654 prefetch(fp->tx_cons_sb);
1655 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656 prefetch(&fp->status_blk->u_status_block.status_block_index);
1658 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1664 if (unlikely(status & 0x1)) {
1665 schedule_work(&bp->sp_task);
1673 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1679 /* end of fast path */
1681 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1686 * General service functions
1689 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1692 u32 resource_bit = (1 << resource);
1693 int func = BP_FUNC(bp);
1694 u32 hw_lock_control_reg;
1697 /* Validating that the resource is within range */
1698 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1700 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1701 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1706 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1708 hw_lock_control_reg =
1709 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1712 /* Validating that the resource is not already taken */
1713 lock_status = REG_RD(bp, hw_lock_control_reg);
1714 if (lock_status & resource_bit) {
1715 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1716 lock_status, resource_bit);
1720 /* Try for 1 second every 5ms */
1721 for (cnt = 0; cnt < 200; cnt++) {
1722 /* Try to acquire the lock */
1723 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724 lock_status = REG_RD(bp, hw_lock_control_reg);
1725 if (lock_status & resource_bit)
1730 DP(NETIF_MSG_HW, "Timeout\n");
1734 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1737 u32 resource_bit = (1 << resource);
1738 int func = BP_FUNC(bp);
1739 u32 hw_lock_control_reg;
1741 /* Validating that the resource is within range */
1742 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1744 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1750 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1752 hw_lock_control_reg =
1753 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1756 /* Validating that the resource is currently taken */
1757 lock_status = REG_RD(bp, hw_lock_control_reg);
1758 if (!(lock_status & resource_bit)) {
1759 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1760 lock_status, resource_bit);
1764 REG_WR(bp, hw_lock_control_reg, resource_bit);
1768 /* HW Lock for shared dual port PHYs */
1769 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1773 mutex_lock(&bp->port.phy_mutex);
1775 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1780 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1784 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1788 mutex_unlock(&bp->port.phy_mutex);
1791 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1793 /* The GPIO should be swapped if swap register is set and active */
1794 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1795 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1796 int gpio_shift = gpio_num +
1797 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798 u32 gpio_mask = (1 << gpio_shift);
1801 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1806 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807 /* read GPIO and mask except the float bits */
1808 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1811 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813 gpio_num, gpio_shift);
1814 /* clear FLOAT and set CLR */
1815 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1819 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821 gpio_num, gpio_shift);
1822 /* clear FLOAT and set SET */
1823 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1827 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1828 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829 gpio_num, gpio_shift);
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1838 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1844 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1846 u32 spio_mask = (1 << spio_num);
1849 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850 (spio_num > MISC_REGISTERS_SPIO_7)) {
1851 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856 /* read SPIO and mask except the float bits */
1857 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1860 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1861 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862 /* clear FLOAT and set CLR */
1863 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1867 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869 /* clear FLOAT and set SET */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1874 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1877 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1884 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1890 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1892 switch (bp->link_vars.ieee_fc) {
1893 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1894 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1897 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1898 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1901 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1902 bp->port.advertising |= ADVERTISED_Asym_Pause;
1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1911 static void bnx2x_link_report(struct bnx2x *bp)
1913 if (bp->link_vars.link_up) {
1914 if (bp->state == BNX2X_STATE_OPEN)
1915 netif_carrier_on(bp->dev);
1916 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1918 printk("%d Mbps ", bp->link_vars.line_speed);
1920 if (bp->link_vars.duplex == DUPLEX_FULL)
1921 printk("full duplex");
1923 printk("half duplex");
1925 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927 printk(", receive ");
1928 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929 printk("& transmit ");
1931 printk(", transmit ");
1933 printk("flow control ON");
1937 } else { /* link_down */
1938 netif_carrier_off(bp->dev);
1939 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1943 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1945 if (!BP_NOMCP(bp)) {
1948 /* Initialize link parameters structure variables */
1949 /* It is recommended to turn off RX FC for jumbo frames
1950 for better performance */
1952 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1953 else if (bp->dev->mtu > 5000)
1954 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1956 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1958 bnx2x_acquire_phy_lock(bp);
1959 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1960 bnx2x_release_phy_lock(bp);
1962 if (bp->link_vars.link_up)
1963 bnx2x_link_report(bp);
1965 bnx2x_calc_fc_adv(bp);
1969 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1973 static void bnx2x_link_set(struct bnx2x *bp)
1975 if (!BP_NOMCP(bp)) {
1976 bnx2x_acquire_phy_lock(bp);
1977 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1978 bnx2x_release_phy_lock(bp);
1980 bnx2x_calc_fc_adv(bp);
1982 BNX2X_ERR("Bootcode is missing -not setting link\n");
1985 static void bnx2x__link_reset(struct bnx2x *bp)
1987 if (!BP_NOMCP(bp)) {
1988 bnx2x_acquire_phy_lock(bp);
1989 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1990 bnx2x_release_phy_lock(bp);
1992 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1995 static u8 bnx2x_link_test(struct bnx2x *bp)
1999 bnx2x_acquire_phy_lock(bp);
2000 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2001 bnx2x_release_phy_lock(bp);
2006 /* Calculates the sum of vn_min_rates.
2007 It's needed for further normalizing of the min_rates.
2012 0 - if all the min_rates are 0.
2013 In the later case fairness algorithm should be deactivated.
2014 If not all min_rates are zero then those that are zeroes will
2017 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2019 int i, port = BP_PORT(bp);
2023 for (i = 0; i < E1HVN_MAX; i++) {
2025 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2026 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2027 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2028 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2029 /* If min rate is zero - set it to 1 */
2031 vn_min_rate = DEF_MIN_RATE;
2035 wsum += vn_min_rate;
2039 /* ... only if all min rates are zeros - disable FAIRNESS */
2046 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2049 struct cmng_struct_per_port *m_cmng_port)
2051 u32 r_param = port_rate / 8;
2052 int port = BP_PORT(bp);
2055 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2057 /* Enable minmax only if we are in e1hmf mode */
2059 u32 fair_periodic_timeout_usec;
2062 /* Enable rate shaping and fairness */
2063 m_cmng_port->flags.cmng_vn_enable = 1;
2064 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2065 m_cmng_port->flags.rate_shaping_enable = 1;
2068 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069 " fairness will be disabled\n");
2071 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2072 m_cmng_port->rs_vars.rs_periodic_timeout =
2073 RS_PERIODIC_TIMEOUT_USEC / 4;
2075 /* this is the threshold below which no timer arming will occur
2076 1.25 coefficient is for the threshold to be a little bigger
2077 than the real time, to compensate for timer in-accuracy */
2078 m_cmng_port->rs_vars.rs_threshold =
2079 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2081 /* resolution of fairness timer */
2082 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2083 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2084 t_fair = T_FAIR_COEF / port_rate;
2086 /* this is the threshold below which we won't arm
2087 the timer anymore */
2088 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2090 /* we multiply by 1e3/8 to get bytes/msec.
2091 We don't want the credits to pass a credit
2092 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2093 m_cmng_port->fair_vars.upper_bound =
2094 r_param * t_fair * FAIR_MEM;
2095 /* since each tick is 4 usec */
2096 m_cmng_port->fair_vars.fairness_timeout =
2097 fair_periodic_timeout_usec / 4;
2100 /* Disable rate shaping and fairness */
2101 m_cmng_port->flags.cmng_vn_enable = 0;
2102 m_cmng_port->flags.fairness_enable = 0;
2103 m_cmng_port->flags.rate_shaping_enable = 0;
2106 "Single function mode minmax will be disabled\n");
2109 /* Store it to internal memory */
2110 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2111 REG_WR(bp, BAR_XSTRORM_INTMEM +
2112 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2113 ((u32 *)(m_cmng_port))[i]);
2116 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2117 u32 wsum, u16 port_rate,
2118 struct cmng_struct_per_port *m_cmng_port)
2120 struct rate_shaping_vars_per_vn m_rs_vn;
2121 struct fairness_vars_per_vn m_fair_vn;
2122 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2123 u16 vn_min_rate, vn_max_rate;
2126 /* If function is hidden - set min and max to zeroes */
2127 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2132 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2133 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2134 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2135 if current min rate is zero - set it to 1.
2136 This is a requirement of the algorithm. */
2137 if ((vn_min_rate == 0) && wsum)
2138 vn_min_rate = DEF_MIN_RATE;
2139 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2140 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2143 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2144 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2146 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2147 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2149 /* global vn counter - maximal Mbps for this vn */
2150 m_rs_vn.vn_counter.rate = vn_max_rate;
2152 /* quota - number of bytes transmitted in this period */
2153 m_rs_vn.vn_counter.quota =
2154 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2156 #ifdef BNX2X_PER_PROT_QOS
2157 /* per protocol counter */
2158 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2159 /* maximal Mbps for this protocol */
2160 m_rs_vn.protocol_counters[protocol].rate =
2161 protocol_max_rate[protocol];
2162 /* the quota in each timer period -
2163 number of bytes transmitted in this period */
2164 m_rs_vn.protocol_counters[protocol].quota =
2165 (u32)(rs_periodic_timeout_usec *
2167 protocol_counters[protocol].rate/8));
2172 /* credit for each period of the fairness algorithm:
2173 number of bytes in T_FAIR (the vn share the port rate).
2174 wsum should not be larger than 10000, thus
2175 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2176 m_fair_vn.vn_credit_delta =
2177 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2178 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2179 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2180 m_fair_vn.vn_credit_delta);
2183 #ifdef BNX2X_PER_PROT_QOS
2185 u32 protocolWeightSum = 0;
2187 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2188 protocolWeightSum +=
2189 drvInit.protocol_min_rate[protocol];
2190 /* per protocol counter -
2191 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2192 if (protocolWeightSum > 0) {
2194 protocol < NUM_OF_PROTOCOLS; protocol++)
2195 /* credit for each period of the
2196 fairness algorithm - number of bytes in
2197 T_FAIR (the protocol share the vn rate) */
2198 m_fair_vn.protocol_credit_delta[protocol] =
2199 (u32)((vn_min_rate / 8) * t_fair *
2200 protocol_min_rate / protocolWeightSum);
2205 /* Store it to internal memory */
2206 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2207 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2209 ((u32 *)(&m_rs_vn))[i]);
2211 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2212 REG_WR(bp, BAR_XSTRORM_INTMEM +
2213 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2214 ((u32 *)(&m_fair_vn))[i]);
2217 /* This function is called upon link interrupt */
2218 static void bnx2x_link_attn(struct bnx2x *bp)
2222 /* Make sure that we are synced with the current statistics */
2223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2225 bnx2x_acquire_phy_lock(bp);
2226 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2227 bnx2x_release_phy_lock(bp);
2229 if (bp->link_vars.link_up) {
2231 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2232 struct host_port_stats *pstats;
2234 pstats = bnx2x_sp(bp, port_stats);
2235 /* reset old bmac stats */
2236 memset(&(pstats->mac_stx[0]), 0,
2237 sizeof(struct mac_stx));
2239 if ((bp->state == BNX2X_STATE_OPEN) ||
2240 (bp->state == BNX2X_STATE_DISABLED))
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2244 /* indicate link status */
2245 bnx2x_link_report(bp);
2250 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2251 if (vn == BP_E1HVN(bp))
2254 func = ((vn << 1) | BP_PORT(bp));
2256 /* Set the attention towards other drivers
2258 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2259 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2263 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2264 struct cmng_struct_per_port m_cmng_port;
2266 int port = BP_PORT(bp);
2268 /* Init RATE SHAPING and FAIRNESS contexts */
2269 wsum = bnx2x_calc_vn_wsum(bp);
2270 bnx2x_init_port_minmax(bp, (int)wsum,
2271 bp->link_vars.line_speed,
2274 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2275 bnx2x_init_vn_minmax(bp, 2*vn + port,
2276 wsum, bp->link_vars.line_speed,
2281 static void bnx2x__link_status_update(struct bnx2x *bp)
2283 if (bp->state != BNX2X_STATE_OPEN)
2286 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2288 if (bp->link_vars.link_up)
2289 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2291 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
2297 static void bnx2x_pmf_update(struct bnx2x *bp)
2299 int port = BP_PORT(bp);
2303 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2305 /* enable nig attention */
2306 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2307 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2308 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2310 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2318 * General service functions
2321 /* the slow path queue is odd since completions arrive on the fastpath ring */
2322 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2323 u32 data_hi, u32 data_lo, int common)
2325 int func = BP_FUNC(bp);
2327 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2328 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2329 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2330 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2331 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2333 #ifdef BNX2X_STOP_ON_ERROR
2334 if (unlikely(bp->panic))
2338 spin_lock_bh(&bp->spq_lock);
2340 if (!bp->spq_left) {
2341 BNX2X_ERR("BUG! SPQ ring full!\n");
2342 spin_unlock_bh(&bp->spq_lock);
2347 /* CID needs port number to be encoded int it */
2348 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2349 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2351 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2353 bp->spq_prod_bd->hdr.type |=
2354 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2356 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2357 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2361 if (bp->spq_prod_bd == bp->spq_last_bd) {
2362 bp->spq_prod_bd = bp->spq;
2363 bp->spq_prod_idx = 0;
2364 DP(NETIF_MSG_TIMER, "end of spq\n");
2371 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2374 spin_unlock_bh(&bp->spq_lock);
2378 /* acquire split MCP access lock register */
2379 static int bnx2x_acquire_alr(struct bnx2x *bp)
2386 for (j = 0; j < i*10; j++) {
2388 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2389 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2390 if (val & (1L << 31))
2395 if (!(val & (1L << 31))) {
2396 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2403 /* release split MCP access lock register */
2404 static void bnx2x_release_alr(struct bnx2x *bp)
2408 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2411 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2413 struct host_def_status_block *def_sb = bp->def_status_blk;
2416 barrier(); /* status block is written to by the chip */
2417 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2418 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2421 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2422 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2425 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2426 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2429 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2430 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2433 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2434 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2441 * slow path service functions
2444 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2446 int port = BP_PORT(bp);
2447 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2448 COMMAND_REG_ATTN_BITS_SET);
2449 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2450 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2451 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2452 NIG_REG_MASK_INTERRUPT_PORT0;
2455 if (bp->attn_state & asserted)
2456 BNX2X_ERR("IGU ERROR\n");
2458 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459 aeu_mask = REG_RD(bp, aeu_addr);
2461 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2462 aeu_mask, asserted);
2463 aeu_mask &= ~(asserted & 0xff);
2464 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2466 REG_WR(bp, aeu_addr, aeu_mask);
2467 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2469 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2470 bp->attn_state |= asserted;
2471 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2473 if (asserted & ATTN_HARD_WIRED_MASK) {
2474 if (asserted & ATTN_NIG_FOR_FUNC) {
2476 /* save nig interrupt mask */
2477 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2478 REG_WR(bp, nig_int_mask_addr, 0);
2480 bnx2x_link_attn(bp);
2482 /* handle unicore attn? */
2484 if (asserted & ATTN_SW_TIMER_4_FUNC)
2485 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2487 if (asserted & GPIO_2_FUNC)
2488 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2490 if (asserted & GPIO_3_FUNC)
2491 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2493 if (asserted & GPIO_4_FUNC)
2494 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2497 if (asserted & ATTN_GENERAL_ATTN_1) {
2498 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2499 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2501 if (asserted & ATTN_GENERAL_ATTN_2) {
2502 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2503 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2505 if (asserted & ATTN_GENERAL_ATTN_3) {
2506 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2510 if (asserted & ATTN_GENERAL_ATTN_4) {
2511 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2512 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2514 if (asserted & ATTN_GENERAL_ATTN_5) {
2515 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2516 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2518 if (asserted & ATTN_GENERAL_ATTN_6) {
2519 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2520 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2524 } /* if hardwired */
2526 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2528 REG_WR(bp, hc_addr, asserted);
2530 /* now set back the mask */
2531 if (asserted & ATTN_NIG_FOR_FUNC)
2532 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2535 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2537 int port = BP_PORT(bp);
2541 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2542 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2544 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2546 val = REG_RD(bp, reg_offset);
2547 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2548 REG_WR(bp, reg_offset, val);
2550 BNX2X_ERR("SPIO5 hw attention\n");
2552 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2555 /* Fan failure attention */
2557 /* The PHY reset is controlled by GPIO 1 */
2558 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2559 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2560 /* Low power mode is controlled by GPIO 2 */
2561 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2562 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2563 /* mark the failure */
2564 bp->link_params.ext_phy_config &=
2565 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2566 bp->link_params.ext_phy_config |=
2567 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2569 dev_info.port_hw_config[port].
2570 external_phy_config,
2571 bp->link_params.ext_phy_config);
2572 /* log the failure */
2573 printk(KERN_ERR PFX "Fan Failure on Network"
2574 " Controller %s has caused the driver to"
2575 " shutdown the card to prevent permanent"
2576 " damage. Please contact Dell Support for"
2577 " assistance\n", bp->dev->name);
2585 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2587 val = REG_RD(bp, reg_offset);
2588 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2589 REG_WR(bp, reg_offset, val);
2591 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2592 (attn & HW_INTERRUT_ASSERT_SET_0));
2597 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2601 if (attn & BNX2X_DOORQ_ASSERT) {
2603 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2604 BNX2X_ERR("DB hw attention 0x%x\n", val);
2605 /* DORQ discard attention */
2607 BNX2X_ERR("FATAL error from DORQ\n");
2610 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2612 int port = BP_PORT(bp);
2615 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2616 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2618 val = REG_RD(bp, reg_offset);
2619 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2620 REG_WR(bp, reg_offset, val);
2622 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2623 (attn & HW_INTERRUT_ASSERT_SET_1));
2628 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2632 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2634 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2635 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2636 /* CFC error attention */
2638 BNX2X_ERR("FATAL error from CFC\n");
2641 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2643 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2644 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2645 /* RQ_USDMDP_FIFO_OVERFLOW */
2647 BNX2X_ERR("FATAL error from PXP\n");
2650 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2652 int port = BP_PORT(bp);
2655 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2656 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2658 val = REG_RD(bp, reg_offset);
2659 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2660 REG_WR(bp, reg_offset, val);
2662 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2663 (attn & HW_INTERRUT_ASSERT_SET_2));
2668 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2672 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2674 if (attn & BNX2X_PMF_LINK_ASSERT) {
2675 int func = BP_FUNC(bp);
2677 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2678 bnx2x__link_status_update(bp);
2679 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2681 bnx2x_pmf_update(bp);
2683 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2685 BNX2X_ERR("MC assert!\n");
2686 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2689 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2692 } else if (attn & BNX2X_MCP_ASSERT) {
2694 BNX2X_ERR("MCP assert!\n");
2695 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2699 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2702 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2703 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2704 if (attn & BNX2X_GRC_TIMEOUT) {
2705 val = CHIP_IS_E1H(bp) ?
2706 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2707 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2709 if (attn & BNX2X_GRC_RSV) {
2710 val = CHIP_IS_E1H(bp) ?
2711 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2712 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2714 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2718 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2720 struct attn_route attn;
2721 struct attn_route group_mask;
2722 int port = BP_PORT(bp);
2728 /* need to take HW lock because MCP or other port might also
2729 try to handle this event */
2730 bnx2x_acquire_alr(bp);
2732 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2733 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2734 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2735 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2736 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2737 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2739 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2740 if (deasserted & (1 << index)) {
2741 group_mask = bp->attn_group[index];
2743 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2744 index, group_mask.sig[0], group_mask.sig[1],
2745 group_mask.sig[2], group_mask.sig[3]);
2747 bnx2x_attn_int_deasserted3(bp,
2748 attn.sig[3] & group_mask.sig[3]);
2749 bnx2x_attn_int_deasserted1(bp,
2750 attn.sig[1] & group_mask.sig[1]);
2751 bnx2x_attn_int_deasserted2(bp,
2752 attn.sig[2] & group_mask.sig[2]);
2753 bnx2x_attn_int_deasserted0(bp,
2754 attn.sig[0] & group_mask.sig[0]);
2756 if ((attn.sig[0] & group_mask.sig[0] &
2757 HW_PRTY_ASSERT_SET_0) ||
2758 (attn.sig[1] & group_mask.sig[1] &
2759 HW_PRTY_ASSERT_SET_1) ||
2760 (attn.sig[2] & group_mask.sig[2] &
2761 HW_PRTY_ASSERT_SET_2))
2762 BNX2X_ERR("FATAL HW block parity attention\n");
2766 bnx2x_release_alr(bp);
2768 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2771 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2773 REG_WR(bp, reg_addr, val);
2775 if (~bp->attn_state & deasserted)
2776 BNX2X_ERR("IGU ERROR\n");
2778 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2779 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2781 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2782 aeu_mask = REG_RD(bp, reg_addr);
2784 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2785 aeu_mask, deasserted);
2786 aeu_mask |= (deasserted & 0xff);
2787 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2789 REG_WR(bp, reg_addr, aeu_mask);
2790 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2792 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2793 bp->attn_state &= ~deasserted;
2794 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2797 static void bnx2x_attn_int(struct bnx2x *bp)
2799 /* read local copy of bits */
2800 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2801 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2802 u32 attn_state = bp->attn_state;
2804 /* look for changed bits */
2805 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2806 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2809 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2810 attn_bits, attn_ack, asserted, deasserted);
2812 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2813 BNX2X_ERR("BAD attention state\n");
2815 /* handle bits that were raised */
2817 bnx2x_attn_int_asserted(bp, asserted);
2820 bnx2x_attn_int_deasserted(bp, deasserted);
2823 static void bnx2x_sp_task(struct work_struct *work)
2825 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2829 /* Return here if interrupt is disabled */
2830 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2831 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2835 status = bnx2x_update_dsb_idx(bp);
2836 /* if (status == 0) */
2837 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2839 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2845 /* CStorm events: query_stats, port delete ramrod */
2847 bp->stats_pending = 0;
2849 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2851 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2853 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2855 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2857 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2862 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2864 struct net_device *dev = dev_instance;
2865 struct bnx2x *bp = netdev_priv(dev);
2867 /* Return here if interrupt is disabled */
2868 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2869 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2873 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2875 #ifdef BNX2X_STOP_ON_ERROR
2876 if (unlikely(bp->panic))
2880 schedule_work(&bp->sp_task);
2885 /* end of slow path */
2889 /****************************************************************************
2891 ****************************************************************************/
2893 /* sum[hi:lo] += add[hi:lo] */
2894 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2897 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2900 /* difference = minuend - subtrahend */
2901 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2903 if (m_lo < s_lo) { \
2905 d_hi = m_hi - s_hi; \
2907 /* we can 'loan' 1 */ \
2909 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2911 /* m_hi <= s_hi */ \
2916 /* m_lo >= s_lo */ \
2917 if (m_hi < s_hi) { \
2921 /* m_hi >= s_hi */ \
2922 d_hi = m_hi - s_hi; \
2923 d_lo = m_lo - s_lo; \
2928 #define UPDATE_STAT64(s, t) \
2930 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2931 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2932 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2933 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2934 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2935 pstats->mac_stx[1].t##_lo, diff.lo); \
2938 #define UPDATE_STAT64_NIG(s, t) \
2940 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2941 diff.lo, new->s##_lo, old->s##_lo); \
2942 ADD_64(estats->t##_hi, diff.hi, \
2943 estats->t##_lo, diff.lo); \
2946 /* sum[hi:lo] += add */
2947 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2950 s_hi += (s_lo < a) ? 1 : 0; \
2953 #define UPDATE_EXTEND_STAT(s) \
2955 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2956 pstats->mac_stx[1].s##_lo, \
2960 #define UPDATE_EXTEND_TSTAT(s, t) \
2962 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2963 old_tclient->s = le32_to_cpu(tclient->s); \
2964 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2967 #define UPDATE_EXTEND_XSTAT(s, t) \
2969 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2970 old_xclient->s = le32_to_cpu(xclient->s); \
2971 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2975 * General service functions
2978 static inline long bnx2x_hilo(u32 *hiref)
2980 u32 lo = *(hiref + 1);
2981 #if (BITS_PER_LONG == 64)
2984 return HILO_U64(hi, lo);
2991 * Init service functions
2994 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2996 if (!bp->stats_pending) {
2997 struct eth_query_ramrod_data ramrod_data = {0};
3000 ramrod_data.drv_counter = bp->stats_counter++;
3001 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3002 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3004 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3005 ((u32 *)&ramrod_data)[1],
3006 ((u32 *)&ramrod_data)[0], 0);
3008 /* stats ramrod has it's own slot on the spq */
3010 bp->stats_pending = 1;
3015 static void bnx2x_stats_init(struct bnx2x *bp)
3017 int port = BP_PORT(bp);
3019 bp->executer_idx = 0;
3020 bp->stats_counter = 0;
3024 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3026 bp->port.port_stx = 0;
3027 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3029 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3030 bp->port.old_nig_stats.brb_discard =
3031 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3032 bp->port.old_nig_stats.brb_truncate =
3033 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3034 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3035 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3036 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3037 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3039 /* function stats */
3040 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3041 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3042 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3043 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3045 bp->stats_state = STATS_STATE_DISABLED;
3046 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3047 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3050 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3052 struct dmae_command *dmae = &bp->stats_dmae;
3053 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3055 *stats_comp = DMAE_COMP_VAL;
3058 if (bp->executer_idx) {
3059 int loader_idx = PMF_DMAE_C(bp);
3061 memset(dmae, 0, sizeof(struct dmae_command));
3063 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3064 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3065 DMAE_CMD_DST_RESET |
3067 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3069 DMAE_CMD_ENDIANITY_DW_SWAP |
3071 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3073 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3074 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3075 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3076 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3077 sizeof(struct dmae_command) *
3078 (loader_idx + 1)) >> 2;
3079 dmae->dst_addr_hi = 0;
3080 dmae->len = sizeof(struct dmae_command) >> 2;
3083 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3084 dmae->comp_addr_hi = 0;
3088 bnx2x_post_dmae(bp, dmae, loader_idx);
3090 } else if (bp->func_stx) {
3092 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3096 static int bnx2x_stats_comp(struct bnx2x *bp)
3098 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3102 while (*stats_comp != DMAE_COMP_VAL) {
3104 BNX2X_ERR("timeout waiting for stats finished\n");
3114 * Statistics service functions
3117 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3119 struct dmae_command *dmae;
3121 int loader_idx = PMF_DMAE_C(bp);
3122 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3125 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3126 BNX2X_ERR("BUG!\n");
3130 bp->executer_idx = 0;
3132 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3134 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3136 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3138 DMAE_CMD_ENDIANITY_DW_SWAP |
3140 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3141 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3143 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3144 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3145 dmae->src_addr_lo = bp->port.port_stx >> 2;
3146 dmae->src_addr_hi = 0;
3147 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3148 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3149 dmae->len = DMAE_LEN32_RD_MAX;
3150 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3151 dmae->comp_addr_hi = 0;
3154 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3155 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3156 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3157 dmae->src_addr_hi = 0;
3158 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3159 DMAE_LEN32_RD_MAX * 4);
3160 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3161 DMAE_LEN32_RD_MAX * 4);
3162 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3163 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3164 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3165 dmae->comp_val = DMAE_COMP_VAL;
3168 bnx2x_hw_stats_post(bp);
3169 bnx2x_stats_comp(bp);
3172 static void bnx2x_port_stats_init(struct bnx2x *bp)
3174 struct dmae_command *dmae;
3175 int port = BP_PORT(bp);
3176 int vn = BP_E1HVN(bp);
3178 int loader_idx = PMF_DMAE_C(bp);
3180 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3183 if (!bp->link_vars.link_up || !bp->port.pmf) {
3184 BNX2X_ERR("BUG!\n");
3188 bp->executer_idx = 0;
3191 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3192 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3193 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3195 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3197 DMAE_CMD_ENDIANITY_DW_SWAP |
3199 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3200 (vn << DMAE_CMD_E1HVN_SHIFT));
3202 if (bp->port.port_stx) {
3204 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3205 dmae->opcode = opcode;
3206 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3207 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3208 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3209 dmae->dst_addr_hi = 0;
3210 dmae->len = sizeof(struct host_port_stats) >> 2;
3211 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3212 dmae->comp_addr_hi = 0;
3218 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3219 dmae->opcode = opcode;
3220 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3221 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3222 dmae->dst_addr_lo = bp->func_stx >> 2;
3223 dmae->dst_addr_hi = 0;
3224 dmae->len = sizeof(struct host_func_stats) >> 2;
3225 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3226 dmae->comp_addr_hi = 0;
3231 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3232 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3233 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3235 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3237 DMAE_CMD_ENDIANITY_DW_SWAP |
3239 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3240 (vn << DMAE_CMD_E1HVN_SHIFT));
3242 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3244 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3245 NIG_REG_INGRESS_BMAC0_MEM);
3247 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3248 BIGMAC_REGISTER_TX_STAT_GTBYT */
3249 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3250 dmae->opcode = opcode;
3251 dmae->src_addr_lo = (mac_addr +
3252 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3253 dmae->src_addr_hi = 0;
3254 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3255 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3256 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3257 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3258 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3259 dmae->comp_addr_hi = 0;
3262 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3263 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3264 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265 dmae->opcode = opcode;
3266 dmae->src_addr_lo = (mac_addr +
3267 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3268 dmae->src_addr_hi = 0;
3269 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3270 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3271 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3272 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3273 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3274 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3275 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3276 dmae->comp_addr_hi = 0;
3279 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3281 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3283 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3284 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285 dmae->opcode = opcode;
3286 dmae->src_addr_lo = (mac_addr +
3287 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3288 dmae->src_addr_hi = 0;
3289 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3290 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3291 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3292 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3293 dmae->comp_addr_hi = 0;
3296 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3297 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3298 dmae->opcode = opcode;
3299 dmae->src_addr_lo = (mac_addr +
3300 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3301 dmae->src_addr_hi = 0;
3302 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3303 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3304 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3305 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3307 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3308 dmae->comp_addr_hi = 0;
3311 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3312 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313 dmae->opcode = opcode;
3314 dmae->src_addr_lo = (mac_addr +
3315 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3316 dmae->src_addr_hi = 0;
3317 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3318 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3319 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3320 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3321 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3322 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3323 dmae->comp_addr_hi = 0;
3328 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329 dmae->opcode = opcode;
3330 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3331 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3332 dmae->src_addr_hi = 0;
3333 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3334 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3335 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3336 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337 dmae->comp_addr_hi = 0;
3340 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3341 dmae->opcode = opcode;
3342 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3343 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3344 dmae->src_addr_hi = 0;
3345 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3346 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3347 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3348 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3349 dmae->len = (2*sizeof(u32)) >> 2;
3350 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3351 dmae->comp_addr_hi = 0;
3354 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3356 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3357 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3359 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3361 DMAE_CMD_ENDIANITY_DW_SWAP |
3363 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3364 (vn << DMAE_CMD_E1HVN_SHIFT));
3365 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3366 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3367 dmae->src_addr_hi = 0;
3368 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3369 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3370 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3371 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3372 dmae->len = (2*sizeof(u32)) >> 2;
3373 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3374 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3375 dmae->comp_val = DMAE_COMP_VAL;
3380 static void bnx2x_func_stats_init(struct bnx2x *bp)
3382 struct dmae_command *dmae = &bp->stats_dmae;
3383 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3386 if (!bp->func_stx) {
3387 BNX2X_ERR("BUG!\n");
3391 bp->executer_idx = 0;
3392 memset(dmae, 0, sizeof(struct dmae_command));
3394 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3395 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3396 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3398 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3400 DMAE_CMD_ENDIANITY_DW_SWAP |
3402 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3403 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3404 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3405 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3406 dmae->dst_addr_lo = bp->func_stx >> 2;
3407 dmae->dst_addr_hi = 0;
3408 dmae->len = sizeof(struct host_func_stats) >> 2;
3409 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3410 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3411 dmae->comp_val = DMAE_COMP_VAL;
3416 static void bnx2x_stats_start(struct bnx2x *bp)
3419 bnx2x_port_stats_init(bp);
3421 else if (bp->func_stx)
3422 bnx2x_func_stats_init(bp);
3424 bnx2x_hw_stats_post(bp);
3425 bnx2x_storm_stats_post(bp);
3428 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3430 bnx2x_stats_comp(bp);
3431 bnx2x_stats_pmf_update(bp);
3432 bnx2x_stats_start(bp);
3435 static void bnx2x_stats_restart(struct bnx2x *bp)
3437 bnx2x_stats_comp(bp);
3438 bnx2x_stats_start(bp);
3441 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3443 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3444 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3445 struct regpair diff;
3447 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3448 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3449 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3450 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3451 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3452 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3453 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3455 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3457 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3458 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3459 UPDATE_STAT64(tx_stat_gt127,
3460 tx_stat_etherstatspkts65octetsto127octets);
3461 UPDATE_STAT64(tx_stat_gt255,
3462 tx_stat_etherstatspkts128octetsto255octets);
3463 UPDATE_STAT64(tx_stat_gt511,
3464 tx_stat_etherstatspkts256octetsto511octets);
3465 UPDATE_STAT64(tx_stat_gt1023,
3466 tx_stat_etherstatspkts512octetsto1023octets);
3467 UPDATE_STAT64(tx_stat_gt1518,
3468 tx_stat_etherstatspkts1024octetsto1522octets);
3469 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3470 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3471 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3472 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3473 UPDATE_STAT64(tx_stat_gterr,
3474 tx_stat_dot3statsinternalmactransmiterrors);
3475 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3478 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3480 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3481 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3483 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3484 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3485 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3486 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3487 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3488 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3489 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3490 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3491 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3492 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3493 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3494 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3495 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3496 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3497 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3498 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3499 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3500 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3501 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3502 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3505 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3506 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3507 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3512 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3513 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3516 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3518 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3519 struct nig_stats *old = &(bp->port.old_nig_stats);
3520 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3521 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3522 struct regpair diff;
3524 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3525 bnx2x_bmac_stats_update(bp);
3527 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3528 bnx2x_emac_stats_update(bp);
3530 else { /* unreached */
3531 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3535 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3536 new->brb_discard - old->brb_discard);
3537 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3538 new->brb_truncate - old->brb_truncate);
3540 UPDATE_STAT64_NIG(egress_mac_pkt0,
3541 etherstatspkts1024octetsto1522octets);
3542 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3544 memcpy(old, new, sizeof(struct nig_stats));
3546 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3547 sizeof(struct mac_stx));
3548 estats->brb_drop_hi = pstats->brb_drop_hi;
3549 estats->brb_drop_lo = pstats->brb_drop_lo;
3551 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3556 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3558 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3559 int cl_id = BP_CL_ID(bp);
3560 struct tstorm_per_port_stats *tport =
3561 &stats->tstorm_common.port_statistics;
3562 struct tstorm_per_client_stats *tclient =
3563 &stats->tstorm_common.client_statistics[cl_id];
3564 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3565 struct xstorm_per_client_stats *xclient =
3566 &stats->xstorm_common.client_statistics[cl_id];
3567 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3568 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3569 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3572 /* are storm stats valid? */
3573 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3574 bp->stats_counter) {
3575 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3576 " tstorm counter (%d) != stats_counter (%d)\n",
3577 tclient->stats_counter, bp->stats_counter);
3580 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3581 bp->stats_counter) {
3582 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3583 " xstorm counter (%d) != stats_counter (%d)\n",
3584 xclient->stats_counter, bp->stats_counter);
3588 fstats->total_bytes_received_hi =
3589 fstats->valid_bytes_received_hi =
3590 le32_to_cpu(tclient->total_rcv_bytes.hi);
3591 fstats->total_bytes_received_lo =
3592 fstats->valid_bytes_received_lo =
3593 le32_to_cpu(tclient->total_rcv_bytes.lo);
3595 estats->error_bytes_received_hi =
3596 le32_to_cpu(tclient->rcv_error_bytes.hi);
3597 estats->error_bytes_received_lo =
3598 le32_to_cpu(tclient->rcv_error_bytes.lo);
3599 ADD_64(estats->error_bytes_received_hi,
3600 estats->rx_stat_ifhcinbadoctets_hi,
3601 estats->error_bytes_received_lo,
3602 estats->rx_stat_ifhcinbadoctets_lo);
3604 ADD_64(fstats->total_bytes_received_hi,
3605 estats->error_bytes_received_hi,
3606 fstats->total_bytes_received_lo,
3607 estats->error_bytes_received_lo);
3609 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3610 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3611 total_multicast_packets_received);
3612 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3613 total_broadcast_packets_received);
3615 fstats->total_bytes_transmitted_hi =
3616 le32_to_cpu(xclient->total_sent_bytes.hi);
3617 fstats->total_bytes_transmitted_lo =
3618 le32_to_cpu(xclient->total_sent_bytes.lo);
3620 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3621 total_unicast_packets_transmitted);
3622 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3623 total_multicast_packets_transmitted);
3624 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3625 total_broadcast_packets_transmitted);
3627 memcpy(estats, &(fstats->total_bytes_received_hi),
3628 sizeof(struct host_func_stats) - 2*sizeof(u32));
3630 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3631 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3632 estats->brb_truncate_discard =
3633 le32_to_cpu(tport->brb_truncate_discard);
3634 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3636 old_tclient->rcv_unicast_bytes.hi =
3637 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3638 old_tclient->rcv_unicast_bytes.lo =
3639 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3640 old_tclient->rcv_broadcast_bytes.hi =
3641 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3642 old_tclient->rcv_broadcast_bytes.lo =
3643 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3644 old_tclient->rcv_multicast_bytes.hi =
3645 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3646 old_tclient->rcv_multicast_bytes.lo =
3647 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3648 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3650 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3651 old_tclient->packets_too_big_discard =
3652 le32_to_cpu(tclient->packets_too_big_discard);
3653 estats->no_buff_discard =
3654 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3655 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3657 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3658 old_xclient->unicast_bytes_sent.hi =
3659 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3660 old_xclient->unicast_bytes_sent.lo =
3661 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3662 old_xclient->multicast_bytes_sent.hi =
3663 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3664 old_xclient->multicast_bytes_sent.lo =
3665 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3666 old_xclient->broadcast_bytes_sent.hi =
3667 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3668 old_xclient->broadcast_bytes_sent.lo =
3669 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3671 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3676 static void bnx2x_net_stats_update(struct bnx2x *bp)
3678 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3679 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3680 struct net_device_stats *nstats = &bp->dev->stats;
3682 nstats->rx_packets =
3683 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3684 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3685 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3687 nstats->tx_packets =
3688 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3689 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3690 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3692 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3694 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3696 nstats->rx_dropped = old_tclient->checksum_discard +
3697 estats->mac_discard;
3698 nstats->tx_dropped = 0;
3701 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3703 nstats->collisions =
3704 estats->tx_stat_dot3statssinglecollisionframes_lo +
3705 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3706 estats->tx_stat_dot3statslatecollisions_lo +
3707 estats->tx_stat_dot3statsexcessivecollisions_lo;
3709 estats->jabber_packets_received =
3710 old_tclient->packets_too_big_discard +
3711 estats->rx_stat_dot3statsframestoolong_lo;
3713 nstats->rx_length_errors =
3714 estats->rx_stat_etherstatsundersizepkts_lo +
3715 estats->jabber_packets_received;
3716 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3717 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3718 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3719 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3720 nstats->rx_missed_errors = estats->xxoverflow_discard;
3722 nstats->rx_errors = nstats->rx_length_errors +
3723 nstats->rx_over_errors +
3724 nstats->rx_crc_errors +
3725 nstats->rx_frame_errors +
3726 nstats->rx_fifo_errors +
3727 nstats->rx_missed_errors;
3729 nstats->tx_aborted_errors =
3730 estats->tx_stat_dot3statslatecollisions_lo +
3731 estats->tx_stat_dot3statsexcessivecollisions_lo;
3732 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3733 nstats->tx_fifo_errors = 0;
3734 nstats->tx_heartbeat_errors = 0;
3735 nstats->tx_window_errors = 0;
3737 nstats->tx_errors = nstats->tx_aborted_errors +
3738 nstats->tx_carrier_errors;
3741 static void bnx2x_stats_update(struct bnx2x *bp)
3743 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3746 if (*stats_comp != DMAE_COMP_VAL)
3750 update = (bnx2x_hw_stats_update(bp) == 0);
3752 update |= (bnx2x_storm_stats_update(bp) == 0);
3755 bnx2x_net_stats_update(bp);
3758 if (bp->stats_pending) {
3759 bp->stats_pending++;
3760 if (bp->stats_pending == 3) {
3761 BNX2X_ERR("stats not updated for 3 times\n");
3768 if (bp->msglevel & NETIF_MSG_TIMER) {
3769 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3770 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3771 struct net_device_stats *nstats = &bp->dev->stats;
3774 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3775 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3777 bnx2x_tx_avail(bp->fp),
3778 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3779 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3781 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3782 bp->fp->rx_comp_cons),
3783 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3784 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3785 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3786 estats->driver_xoff, estats->brb_drop_lo);
3787 printk(KERN_DEBUG "tstats: checksum_discard %u "
3788 "packets_too_big_discard %u no_buff_discard %u "
3789 "mac_discard %u mac_filter_discard %u "
3790 "xxovrflow_discard %u brb_truncate_discard %u "
3791 "ttl0_discard %u\n",
3792 old_tclient->checksum_discard,
3793 old_tclient->packets_too_big_discard,
3794 old_tclient->no_buff_discard, estats->mac_discard,
3795 estats->mac_filter_discard, estats->xxoverflow_discard,
3796 estats->brb_truncate_discard,
3797 old_tclient->ttl0_discard);
3799 for_each_queue(bp, i) {
3800 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3801 bnx2x_fp(bp, i, tx_pkt),
3802 bnx2x_fp(bp, i, rx_pkt),
3803 bnx2x_fp(bp, i, rx_calls));
3807 bnx2x_hw_stats_post(bp);
3808 bnx2x_storm_stats_post(bp);
3811 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3813 struct dmae_command *dmae;
3815 int loader_idx = PMF_DMAE_C(bp);
3816 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3818 bp->executer_idx = 0;
3820 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3822 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3824 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3826 DMAE_CMD_ENDIANITY_DW_SWAP |
3828 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3829 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3831 if (bp->port.port_stx) {
3833 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3835 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3837 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3838 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3839 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3840 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3841 dmae->dst_addr_hi = 0;
3842 dmae->len = sizeof(struct host_port_stats) >> 2;
3844 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3845 dmae->comp_addr_hi = 0;
3848 dmae->comp_addr_lo =
3849 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3850 dmae->comp_addr_hi =
3851 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3852 dmae->comp_val = DMAE_COMP_VAL;
3860 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3861 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3862 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3863 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3864 dmae->dst_addr_lo = bp->func_stx >> 2;
3865 dmae->dst_addr_hi = 0;
3866 dmae->len = sizeof(struct host_func_stats) >> 2;
3867 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3868 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3869 dmae->comp_val = DMAE_COMP_VAL;
3875 static void bnx2x_stats_stop(struct bnx2x *bp)
3879 bnx2x_stats_comp(bp);
3882 update = (bnx2x_hw_stats_update(bp) == 0);
3884 update |= (bnx2x_storm_stats_update(bp) == 0);
3887 bnx2x_net_stats_update(bp);
3890 bnx2x_port_stats_stop(bp);
3892 bnx2x_hw_stats_post(bp);
3893 bnx2x_stats_comp(bp);
3897 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3901 static const struct {
3902 void (*action)(struct bnx2x *bp);
3903 enum bnx2x_stats_state next_state;
3904 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3907 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3908 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3909 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3910 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3913 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3914 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3915 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3916 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3920 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3922 enum bnx2x_stats_state state = bp->stats_state;
3924 bnx2x_stats_stm[state][event].action(bp);
3925 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3927 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3928 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3929 state, event, bp->stats_state);
3932 static void bnx2x_timer(unsigned long data)
3934 struct bnx2x *bp = (struct bnx2x *) data;
3936 if (!netif_running(bp->dev))
3939 if (atomic_read(&bp->intr_sem) != 0)
3943 struct bnx2x_fastpath *fp = &bp->fp[0];
3946 bnx2x_tx_int(fp, 1000);
3947 rc = bnx2x_rx_int(fp, 1000);
3950 if (!BP_NOMCP(bp)) {
3951 int func = BP_FUNC(bp);
3955 ++bp->fw_drv_pulse_wr_seq;
3956 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3957 /* TBD - add SYSTEM_TIME */
3958 drv_pulse = bp->fw_drv_pulse_wr_seq;
3959 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3961 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3962 MCP_PULSE_SEQ_MASK);
3963 /* The delta between driver pulse and mcp response
3964 * should be 1 (before mcp response) or 0 (after mcp response)
3966 if ((drv_pulse != mcp_pulse) &&
3967 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3968 /* someone lost a heartbeat... */
3969 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3970 drv_pulse, mcp_pulse);
3974 if ((bp->state == BNX2X_STATE_OPEN) ||
3975 (bp->state == BNX2X_STATE_DISABLED))
3976 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3979 mod_timer(&bp->timer, jiffies + bp->current_interval);
3982 /* end of Statistics */
3987 * nic init service functions
3990 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3992 int port = BP_PORT(bp);
3994 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3995 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3996 sizeof(struct ustorm_status_block)/4);
3997 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3998 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3999 sizeof(struct cstorm_status_block)/4);
4002 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4003 dma_addr_t mapping, int sb_id)
4005 int port = BP_PORT(bp);
4006 int func = BP_FUNC(bp);
4011 section = ((u64)mapping) + offsetof(struct host_status_block,
4013 sb->u_status_block.status_block_id = sb_id;
4015 REG_WR(bp, BAR_USTRORM_INTMEM +
4016 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4017 REG_WR(bp, BAR_USTRORM_INTMEM +
4018 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4020 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4021 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4023 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4024 REG_WR16(bp, BAR_USTRORM_INTMEM +
4025 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4028 section = ((u64)mapping) + offsetof(struct host_status_block,
4030 sb->c_status_block.status_block_id = sb_id;
4032 REG_WR(bp, BAR_CSTRORM_INTMEM +
4033 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4034 REG_WR(bp, BAR_CSTRORM_INTMEM +
4035 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4037 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4038 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4040 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4041 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4042 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4044 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4047 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4049 int func = BP_FUNC(bp);
4051 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4052 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4053 sizeof(struct ustorm_def_status_block)/4);
4054 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4055 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4056 sizeof(struct cstorm_def_status_block)/4);
4057 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4058 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4059 sizeof(struct xstorm_def_status_block)/4);
4060 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4061 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4062 sizeof(struct tstorm_def_status_block)/4);
4065 static void bnx2x_init_def_sb(struct bnx2x *bp,
4066 struct host_def_status_block *def_sb,
4067 dma_addr_t mapping, int sb_id)
4069 int port = BP_PORT(bp);
4070 int func = BP_FUNC(bp);
4071 int index, val, reg_offset;
4075 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4076 atten_status_block);
4077 def_sb->atten_status_block.status_block_id = sb_id;
4081 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4082 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4084 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4085 bp->attn_group[index].sig[0] = REG_RD(bp,
4086 reg_offset + 0x10*index);
4087 bp->attn_group[index].sig[1] = REG_RD(bp,
4088 reg_offset + 0x4 + 0x10*index);
4089 bp->attn_group[index].sig[2] = REG_RD(bp,
4090 reg_offset + 0x8 + 0x10*index);
4091 bp->attn_group[index].sig[3] = REG_RD(bp,
4092 reg_offset + 0xc + 0x10*index);
4095 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4096 HC_REG_ATTN_MSG0_ADDR_L);
4098 REG_WR(bp, reg_offset, U64_LO(section));
4099 REG_WR(bp, reg_offset + 4, U64_HI(section));
4101 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4103 val = REG_RD(bp, reg_offset);
4105 REG_WR(bp, reg_offset, val);
4108 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4109 u_def_status_block);
4110 def_sb->u_def_status_block.status_block_id = sb_id;
4112 REG_WR(bp, BAR_USTRORM_INTMEM +
4113 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4114 REG_WR(bp, BAR_USTRORM_INTMEM +
4115 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4117 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4118 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4120 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4121 REG_WR16(bp, BAR_USTRORM_INTMEM +
4122 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4125 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4126 c_def_status_block);
4127 def_sb->c_def_status_block.status_block_id = sb_id;
4129 REG_WR(bp, BAR_CSTRORM_INTMEM +
4130 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4131 REG_WR(bp, BAR_CSTRORM_INTMEM +
4132 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4134 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4135 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4137 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4138 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4139 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4142 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4143 t_def_status_block);
4144 def_sb->t_def_status_block.status_block_id = sb_id;
4146 REG_WR(bp, BAR_TSTRORM_INTMEM +
4147 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4148 REG_WR(bp, BAR_TSTRORM_INTMEM +
4149 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4151 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4152 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4154 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4155 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4156 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4159 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4160 x_def_status_block);
4161 def_sb->x_def_status_block.status_block_id = sb_id;
4163 REG_WR(bp, BAR_XSTRORM_INTMEM +
4164 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4165 REG_WR(bp, BAR_XSTRORM_INTMEM +
4166 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4168 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4169 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4171 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4172 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4173 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4175 bp->stats_pending = 0;
4176 bp->set_mac_pending = 0;
4178 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4181 static void bnx2x_update_coalesce(struct bnx2x *bp)
4183 int port = BP_PORT(bp);
4186 for_each_queue(bp, i) {
4187 int sb_id = bp->fp[i].sb_id;
4189 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4190 REG_WR8(bp, BAR_USTRORM_INTMEM +
4191 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4192 U_SB_ETH_RX_CQ_INDEX),
4194 REG_WR16(bp, BAR_USTRORM_INTMEM +
4195 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4196 U_SB_ETH_RX_CQ_INDEX),
4197 bp->rx_ticks ? 0 : 1);
4198 REG_WR16(bp, BAR_USTRORM_INTMEM +
4199 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4200 U_SB_ETH_RX_BD_INDEX),
4201 bp->rx_ticks ? 0 : 1);
4203 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4204 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4205 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4206 C_SB_ETH_TX_CQ_INDEX),
4208 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4209 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4210 C_SB_ETH_TX_CQ_INDEX),
4211 bp->tx_ticks ? 0 : 1);
4215 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4216 struct bnx2x_fastpath *fp, int last)
4220 for (i = 0; i < last; i++) {
4221 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4222 struct sk_buff *skb = rx_buf->skb;
4225 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4229 if (fp->tpa_state[i] == BNX2X_TPA_START)
4230 pci_unmap_single(bp->pdev,
4231 pci_unmap_addr(rx_buf, mapping),
4232 bp->rx_buf_use_size,
4233 PCI_DMA_FROMDEVICE);
4240 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4242 int func = BP_FUNC(bp);
4243 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4244 ETH_MAX_AGGREGATION_QUEUES_E1H;
4245 u16 ring_prod, cqe_ring_prod;
4248 bp->rx_buf_use_size = bp->dev->mtu;
4249 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4250 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4252 if (bp->flags & TPA_ENABLE_FLAG) {
4254 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4255 bp->rx_buf_use_size, bp->rx_buf_size,
4256 bp->dev->mtu + ETH_OVREHEAD);
4258 for_each_queue(bp, j) {
4259 struct bnx2x_fastpath *fp = &bp->fp[j];
4261 for (i = 0; i < max_agg_queues; i++) {
4262 fp->tpa_pool[i].skb =
4263 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4264 if (!fp->tpa_pool[i].skb) {
4265 BNX2X_ERR("Failed to allocate TPA "
4266 "skb pool for queue[%d] - "
4267 "disabling TPA on this "
4269 bnx2x_free_tpa_pool(bp, fp, i);
4270 fp->disable_tpa = 1;
4273 pci_unmap_addr_set((struct sw_rx_bd *)
4274 &bp->fp->tpa_pool[i],
4276 fp->tpa_state[i] = BNX2X_TPA_STOP;
4281 for_each_queue(bp, j) {
4282 struct bnx2x_fastpath *fp = &bp->fp[j];
4285 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4286 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4288 /* "next page" elements initialization */
4290 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4291 struct eth_rx_sge *sge;
4293 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4295 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4296 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4298 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4299 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4302 bnx2x_init_sge_ring_bit_mask(fp);
4305 for (i = 1; i <= NUM_RX_RINGS; i++) {
4306 struct eth_rx_bd *rx_bd;
4308 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4310 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4311 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4313 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4314 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4318 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4319 struct eth_rx_cqe_next_page *nextpg;
4321 nextpg = (struct eth_rx_cqe_next_page *)
4322 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4324 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4325 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4327 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4328 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4331 /* Allocate SGEs and initialize the ring elements */
4332 for (i = 0, ring_prod = 0;
4333 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4335 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4336 BNX2X_ERR("was only able to allocate "
4338 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4339 /* Cleanup already allocated elements */
4340 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4341 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4342 fp->disable_tpa = 1;
4346 ring_prod = NEXT_SGE_IDX(ring_prod);
4348 fp->rx_sge_prod = ring_prod;
4350 /* Allocate BDs and initialize BD ring */
4351 fp->rx_comp_cons = 0;
4352 cqe_ring_prod = ring_prod = 0;
4353 for (i = 0; i < bp->rx_ring_size; i++) {
4354 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4355 BNX2X_ERR("was only able to allocate "
4357 bp->eth_stats.rx_skb_alloc_failed++;
4360 ring_prod = NEXT_RX_IDX(ring_prod);
4361 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4362 WARN_ON(ring_prod <= i);
4365 fp->rx_bd_prod = ring_prod;
4366 /* must not have more available CQEs than BDs */
4367 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4369 fp->rx_pkt = fp->rx_calls = 0;
4372 * this will generate an interrupt (to the TSTORM)
4373 * must only be done after chip is initialized
4375 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4380 REG_WR(bp, BAR_USTRORM_INTMEM +
4381 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4382 U64_LO(fp->rx_comp_mapping));
4383 REG_WR(bp, BAR_USTRORM_INTMEM +
4384 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4385 U64_HI(fp->rx_comp_mapping));
4389 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4393 for_each_queue(bp, j) {
4394 struct bnx2x_fastpath *fp = &bp->fp[j];
4396 for (i = 1; i <= NUM_TX_RINGS; i++) {
4397 struct eth_tx_bd *tx_bd =
4398 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4401 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4402 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4404 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4405 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4408 fp->tx_pkt_prod = 0;
4409 fp->tx_pkt_cons = 0;
4412 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4417 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4419 int func = BP_FUNC(bp);
4421 spin_lock_init(&bp->spq_lock);
4423 bp->spq_left = MAX_SPQ_PENDING;
4424 bp->spq_prod_idx = 0;
4425 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4426 bp->spq_prod_bd = bp->spq;
4427 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4429 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4430 U64_LO(bp->spq_mapping));
4432 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4433 U64_HI(bp->spq_mapping));
4435 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4439 static void bnx2x_init_context(struct bnx2x *bp)
4443 for_each_queue(bp, i) {
4444 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4445 struct bnx2x_fastpath *fp = &bp->fp[i];
4446 u8 sb_id = FP_SB_ID(fp);
4448 context->xstorm_st_context.tx_bd_page_base_hi =
4449 U64_HI(fp->tx_desc_mapping);
4450 context->xstorm_st_context.tx_bd_page_base_lo =
4451 U64_LO(fp->tx_desc_mapping);
4452 context->xstorm_st_context.db_data_addr_hi =
4453 U64_HI(fp->tx_prods_mapping);
4454 context->xstorm_st_context.db_data_addr_lo =
4455 U64_LO(fp->tx_prods_mapping);
4456 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4457 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4459 context->ustorm_st_context.common.sb_index_numbers =
4460 BNX2X_RX_SB_INDEX_NUM;
4461 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4462 context->ustorm_st_context.common.status_block_id = sb_id;
4463 context->ustorm_st_context.common.flags =
4464 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4465 context->ustorm_st_context.common.mc_alignment_size = 64;
4466 context->ustorm_st_context.common.bd_buff_size =
4467 bp->rx_buf_use_size;
4468 context->ustorm_st_context.common.bd_page_base_hi =
4469 U64_HI(fp->rx_desc_mapping);
4470 context->ustorm_st_context.common.bd_page_base_lo =
4471 U64_LO(fp->rx_desc_mapping);
4472 if (!fp->disable_tpa) {
4473 context->ustorm_st_context.common.flags |=
4474 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4475 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4476 context->ustorm_st_context.common.sge_buff_size =
4477 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4478 context->ustorm_st_context.common.sge_page_base_hi =
4479 U64_HI(fp->rx_sge_mapping);
4480 context->ustorm_st_context.common.sge_page_base_lo =
4481 U64_LO(fp->rx_sge_mapping);
4484 context->cstorm_st_context.sb_index_number =
4485 C_SB_ETH_TX_CQ_INDEX;
4486 context->cstorm_st_context.status_block_id = sb_id;
4488 context->xstorm_ag_context.cdu_reserved =
4489 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4490 CDU_REGION_NUMBER_XCM_AG,
4491 ETH_CONNECTION_TYPE);
4492 context->ustorm_ag_context.cdu_usage =
4493 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4494 CDU_REGION_NUMBER_UCM_AG,
4495 ETH_CONNECTION_TYPE);
4499 static void bnx2x_init_ind_table(struct bnx2x *bp)
4501 int port = BP_PORT(bp);
4507 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4508 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4509 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4510 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4511 i % bp->num_queues);
4513 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4516 static void bnx2x_set_client_config(struct bnx2x *bp)
4518 struct tstorm_eth_client_config tstorm_client = {0};
4519 int port = BP_PORT(bp);
4522 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4523 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4524 tstorm_client.config_flags =
4525 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4527 if (bp->rx_mode && bp->vlgrp) {
4528 tstorm_client.config_flags |=
4529 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4530 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4534 if (bp->flags & TPA_ENABLE_FLAG) {
4535 tstorm_client.max_sges_for_packet =
4536 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4537 tstorm_client.max_sges_for_packet =
4538 ((tstorm_client.max_sges_for_packet +
4539 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4540 PAGES_PER_SGE_SHIFT;
4542 tstorm_client.config_flags |=
4543 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4546 for_each_queue(bp, i) {
4547 REG_WR(bp, BAR_TSTRORM_INTMEM +
4548 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4549 ((u32 *)&tstorm_client)[0]);
4550 REG_WR(bp, BAR_TSTRORM_INTMEM +
4551 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4552 ((u32 *)&tstorm_client)[1]);
4555 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4556 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4559 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4561 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4562 int mode = bp->rx_mode;
4563 int mask = (1 << BP_L_ID(bp));
4564 int func = BP_FUNC(bp);
4567 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4570 case BNX2X_RX_MODE_NONE: /* no Rx */
4571 tstorm_mac_filter.ucast_drop_all = mask;
4572 tstorm_mac_filter.mcast_drop_all = mask;
4573 tstorm_mac_filter.bcast_drop_all = mask;
4575 case BNX2X_RX_MODE_NORMAL:
4576 tstorm_mac_filter.bcast_accept_all = mask;
4578 case BNX2X_RX_MODE_ALLMULTI:
4579 tstorm_mac_filter.mcast_accept_all = mask;
4580 tstorm_mac_filter.bcast_accept_all = mask;
4582 case BNX2X_RX_MODE_PROMISC:
4583 tstorm_mac_filter.ucast_accept_all = mask;
4584 tstorm_mac_filter.mcast_accept_all = mask;
4585 tstorm_mac_filter.bcast_accept_all = mask;
4588 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4592 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4593 REG_WR(bp, BAR_TSTRORM_INTMEM +
4594 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4595 ((u32 *)&tstorm_mac_filter)[i]);
4597 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4598 ((u32 *)&tstorm_mac_filter)[i]); */
4601 if (mode != BNX2X_RX_MODE_NONE)
4602 bnx2x_set_client_config(bp);
4605 static void bnx2x_init_internal_common(struct bnx2x *bp)
4609 /* Zero this manually as its initialization is
4610 currently missing in the initTool */
4611 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4612 REG_WR(bp, BAR_USTRORM_INTMEM +
4613 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4616 static void bnx2x_init_internal_port(struct bnx2x *bp)
4618 int port = BP_PORT(bp);
4620 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4623 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4626 static void bnx2x_init_internal_func(struct bnx2x *bp)
4628 struct tstorm_eth_function_common_config tstorm_config = {0};
4629 struct stats_indication_flags stats_flags = {0};
4630 int port = BP_PORT(bp);
4631 int func = BP_FUNC(bp);
4636 tstorm_config.config_flags = MULTI_FLAGS;
4637 tstorm_config.rss_result_mask = MULTI_MASK;
4640 tstorm_config.leading_client_id = BP_L_ID(bp);
4642 REG_WR(bp, BAR_TSTRORM_INTMEM +
4643 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4644 (*(u32 *)&tstorm_config));
4646 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4647 bnx2x_set_storm_rx_mode(bp);
4649 /* reset xstorm per client statistics */
4650 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4651 REG_WR(bp, BAR_XSTRORM_INTMEM +
4652 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4655 /* reset tstorm per client statistics */
4656 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4657 REG_WR(bp, BAR_TSTRORM_INTMEM +
4658 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4662 /* Init statistics related context */
4663 stats_flags.collect_eth = 1;
4665 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4666 ((u32 *)&stats_flags)[0]);
4667 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4668 ((u32 *)&stats_flags)[1]);
4670 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4671 ((u32 *)&stats_flags)[0]);
4672 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4673 ((u32 *)&stats_flags)[1]);
4675 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4676 ((u32 *)&stats_flags)[0]);
4677 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4678 ((u32 *)&stats_flags)[1]);
4680 REG_WR(bp, BAR_XSTRORM_INTMEM +
4681 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4682 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4683 REG_WR(bp, BAR_XSTRORM_INTMEM +
4684 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4685 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4687 REG_WR(bp, BAR_TSTRORM_INTMEM +
4688 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4689 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4690 REG_WR(bp, BAR_TSTRORM_INTMEM +
4691 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4692 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4694 if (CHIP_IS_E1H(bp)) {
4695 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4697 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4699 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4701 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4704 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4708 /* Init CQ ring mapping and aggregation size */
4709 max_agg_size = min((u32)(bp->rx_buf_use_size +
4710 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4712 for_each_queue(bp, i) {
4713 struct bnx2x_fastpath *fp = &bp->fp[i];
4715 REG_WR(bp, BAR_USTRORM_INTMEM +
4716 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4717 U64_LO(fp->rx_comp_mapping));
4718 REG_WR(bp, BAR_USTRORM_INTMEM +
4719 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4720 U64_HI(fp->rx_comp_mapping));
4722 REG_WR16(bp, BAR_USTRORM_INTMEM +
4723 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4728 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4730 switch (load_code) {
4731 case FW_MSG_CODE_DRV_LOAD_COMMON:
4732 bnx2x_init_internal_common(bp);
4735 case FW_MSG_CODE_DRV_LOAD_PORT:
4736 bnx2x_init_internal_port(bp);
4739 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4740 bnx2x_init_internal_func(bp);
4744 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4749 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4753 for_each_queue(bp, i) {
4754 struct bnx2x_fastpath *fp = &bp->fp[i];
4757 fp->state = BNX2X_FP_STATE_CLOSED;
4759 fp->cl_id = BP_L_ID(bp) + i;
4760 fp->sb_id = fp->cl_id;
4762 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4763 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4764 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4766 bnx2x_update_fpsb_idx(fp);
4769 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4771 bnx2x_update_dsb_idx(bp);
4772 bnx2x_update_coalesce(bp);
4773 bnx2x_init_rx_rings(bp);
4774 bnx2x_init_tx_ring(bp);
4775 bnx2x_init_sp_ring(bp);
4776 bnx2x_init_context(bp);
4777 bnx2x_init_internal(bp, load_code);
4778 bnx2x_init_ind_table(bp);
4779 bnx2x_int_enable(bp);
4782 /* end of nic init */
4785 * gzip service functions
4788 static int bnx2x_gunzip_init(struct bnx2x *bp)
4790 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4791 &bp->gunzip_mapping);
4792 if (bp->gunzip_buf == NULL)
4795 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4796 if (bp->strm == NULL)
4799 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4801 if (bp->strm->workspace == NULL)
4811 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4812 bp->gunzip_mapping);
4813 bp->gunzip_buf = NULL;
4816 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4817 " un-compression\n", bp->dev->name);
4821 static void bnx2x_gunzip_end(struct bnx2x *bp)
4823 kfree(bp->strm->workspace);
4828 if (bp->gunzip_buf) {
4829 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4830 bp->gunzip_mapping);
4831 bp->gunzip_buf = NULL;
4835 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4839 /* check gzip header */
4840 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4847 if (zbuf[3] & FNAME)
4848 while ((zbuf[n++] != 0) && (n < len));
4850 bp->strm->next_in = zbuf + n;
4851 bp->strm->avail_in = len - n;
4852 bp->strm->next_out = bp->gunzip_buf;
4853 bp->strm->avail_out = FW_BUF_SIZE;
4855 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4859 rc = zlib_inflate(bp->strm, Z_FINISH);
4860 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4861 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4862 bp->dev->name, bp->strm->msg);
4864 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4865 if (bp->gunzip_outlen & 0x3)
4866 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4867 " gunzip_outlen (%d) not aligned\n",
4868 bp->dev->name, bp->gunzip_outlen);
4869 bp->gunzip_outlen >>= 2;
4871 zlib_inflateEnd(bp->strm);
4873 if (rc == Z_STREAM_END)
4879 /* nic load/unload */
4882 * General service functions
4885 /* send a NIG loopback debug packet */
4886 static void bnx2x_lb_pckt(struct bnx2x *bp)
4890 /* Ethernet source and destination addresses */
4891 wb_write[0] = 0x55555555;
4892 wb_write[1] = 0x55555555;
4893 wb_write[2] = 0x20; /* SOP */
4894 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4896 /* NON-IP protocol */
4897 wb_write[0] = 0x09000000;
4898 wb_write[1] = 0x55555555;
4899 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4900 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4903 /* some of the internal memories
4904 * are not directly readable from the driver
4905 * to test them we send debug packets
4907 static int bnx2x_int_mem_test(struct bnx2x *bp)
4913 if (CHIP_REV_IS_FPGA(bp))
4915 else if (CHIP_REV_IS_EMUL(bp))
4920 DP(NETIF_MSG_HW, "start part1\n");
4922 /* Disable inputs of parser neighbor blocks */
4923 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4924 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4925 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4926 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4928 /* Write 0 to parser credits for CFC search request */
4929 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4931 /* send Ethernet packet */
4934 /* TODO do i reset NIG statistic? */
4935 /* Wait until NIG register shows 1 packet of size 0x10 */
4936 count = 1000 * factor;
4939 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4940 val = *bnx2x_sp(bp, wb_data[0]);
4948 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4952 /* Wait until PRS register shows 1 packet */
4953 count = 1000 * factor;
4955 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4963 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4967 /* Reset and init BRB, PRS */
4968 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4970 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4972 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4973 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4975 DP(NETIF_MSG_HW, "part2\n");
4977 /* Disable inputs of parser neighbor blocks */
4978 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4979 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4980 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4981 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4983 /* Write 0 to parser credits for CFC search request */
4984 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4986 /* send 10 Ethernet packets */
4987 for (i = 0; i < 10; i++)
4990 /* Wait until NIG register shows 10 + 1
4991 packets of size 11*0x10 = 0xb0 */
4992 count = 1000 * factor;
4995 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4996 val = *bnx2x_sp(bp, wb_data[0]);
5004 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5008 /* Wait until PRS register shows 2 packets */
5009 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5011 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5013 /* Write 1 to parser credits for CFC search request */
5014 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5016 /* Wait until PRS register shows 3 packets */
5017 msleep(10 * factor);
5018 /* Wait until NIG register shows 1 packet of size 0x10 */
5019 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5021 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5023 /* clear NIG EOP FIFO */
5024 for (i = 0; i < 11; i++)
5025 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5026 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5028 BNX2X_ERR("clear of NIG failed\n");
5032 /* Reset and init BRB, PRS, NIG */
5033 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5035 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5037 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5038 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5041 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5044 /* Enable inputs of parser neighbor blocks */
5045 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5046 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5047 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5048 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5050 DP(NETIF_MSG_HW, "done\n");
5055 static void enable_blocks_attention(struct bnx2x *bp)
5057 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5058 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5059 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5060 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5061 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5062 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5063 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5064 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5065 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5066 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5067 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5068 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5069 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5070 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5071 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5072 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5073 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5074 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5075 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5076 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5077 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5078 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5079 if (CHIP_REV_IS_FPGA(bp))
5080 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5082 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5083 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5084 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5085 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5086 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5087 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5088 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5089 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5090 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5091 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5095 static int bnx2x_init_common(struct bnx2x *bp)
5099 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5101 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5102 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5104 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5105 if (CHIP_IS_E1H(bp))
5106 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5108 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5110 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5112 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5113 if (CHIP_IS_E1(bp)) {
5114 /* enable HW interrupt from PXP on USDM overflow
5115 bit 16 on INT_MASK_0 */
5116 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5119 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5123 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5124 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5125 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5126 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5127 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5128 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5130 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5131 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5132 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5133 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5134 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5137 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5139 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5140 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5141 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5144 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5145 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5147 /* let the HW do it's magic ... */
5149 /* finish PXP init */
5150 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5152 BNX2X_ERR("PXP2 CFG failed\n");
5155 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5157 BNX2X_ERR("PXP2 RD_INIT failed\n");
5161 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5162 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5164 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5166 /* clean the DMAE memory */
5168 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5170 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5171 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5172 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5173 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5175 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5176 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5177 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5178 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5180 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5181 /* soft reset pulse */
5182 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5183 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5186 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5189 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5190 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5191 if (!CHIP_REV_IS_SLOW(bp)) {
5192 /* enable hw interrupt from doorbell Q */
5193 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5196 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5197 if (CHIP_REV_IS_SLOW(bp)) {
5198 /* fix for emulation and FPGA for no pause */
5199 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5200 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5201 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5202 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5205 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5207 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5208 if (CHIP_IS_E1H(bp))
5209 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5211 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5212 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5213 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5214 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5216 if (CHIP_IS_E1H(bp)) {
5217 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5218 STORM_INTMEM_SIZE_E1H/2);
5220 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5221 0, STORM_INTMEM_SIZE_E1H/2);
5222 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5223 STORM_INTMEM_SIZE_E1H/2);
5225 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5226 0, STORM_INTMEM_SIZE_E1H/2);
5227 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5228 STORM_INTMEM_SIZE_E1H/2);
5230 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5231 0, STORM_INTMEM_SIZE_E1H/2);
5232 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5233 STORM_INTMEM_SIZE_E1H/2);
5235 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5236 0, STORM_INTMEM_SIZE_E1H/2);
5238 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5239 STORM_INTMEM_SIZE_E1);
5240 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5241 STORM_INTMEM_SIZE_E1);
5242 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5243 STORM_INTMEM_SIZE_E1);
5244 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5245 STORM_INTMEM_SIZE_E1);
5248 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5249 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5250 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5251 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5254 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5256 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5259 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5260 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5261 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5263 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5264 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5265 REG_WR(bp, i, 0xc0cac01a);
5266 /* TODO: replace with something meaningful */
5268 if (CHIP_IS_E1H(bp))
5269 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5270 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5272 if (sizeof(union cdu_context) != 1024)
5273 /* we currently assume that a context is 1024 bytes */
5274 printk(KERN_ALERT PFX "please adjust the size of"
5275 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5277 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5278 val = (4 << 24) + (0 << 12) + 1024;
5279 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5280 if (CHIP_IS_E1(bp)) {
5281 /* !!! fix pxp client crdit until excel update */
5282 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5283 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5286 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5287 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5289 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5290 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5292 /* PXPCS COMMON comes here */
5293 /* Reset PCIE errors for debug */
5294 REG_WR(bp, 0x2814, 0xffffffff);
5295 REG_WR(bp, 0x3820, 0xffffffff);
5297 /* EMAC0 COMMON comes here */
5298 /* EMAC1 COMMON comes here */
5299 /* DBU COMMON comes here */
5300 /* DBG COMMON comes here */
5302 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5303 if (CHIP_IS_E1H(bp)) {
5304 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5305 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5308 if (CHIP_REV_IS_SLOW(bp))
5311 /* finish CFC init */
5312 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5314 BNX2X_ERR("CFC LL_INIT failed\n");
5317 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5319 BNX2X_ERR("CFC AC_INIT failed\n");
5322 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5324 BNX2X_ERR("CFC CAM_INIT failed\n");
5327 REG_WR(bp, CFC_REG_DEBUG0, 0);
5329 /* read NIG statistic
5330 to see if this is our first up since powerup */
5331 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5332 val = *bnx2x_sp(bp, wb_data[0]);
5334 /* do internal memory self test */
5335 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5336 BNX2X_ERR("internal mem self test failed\n");
5340 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5341 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5342 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5343 /* Fan failure is indicated by SPIO 5 */
5344 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5345 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5347 /* set to active low mode */
5348 val = REG_RD(bp, MISC_REG_SPIO_INT);
5349 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5350 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5351 REG_WR(bp, MISC_REG_SPIO_INT, val);
5353 /* enable interrupt to signal the IGU */
5354 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5355 val |= (1 << MISC_REGISTERS_SPIO_5);
5356 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5363 /* clear PXP2 attentions */
5364 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5366 enable_blocks_attention(bp);
5368 if (bp->flags & TPA_ENABLE_FLAG) {
5369 struct tstorm_eth_tpa_exist tmp = {0};
5373 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5375 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5379 if (!BP_NOMCP(bp)) {
5380 bnx2x_acquire_phy_lock(bp);
5381 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5382 bnx2x_release_phy_lock(bp);
5384 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5389 static int bnx2x_init_port(struct bnx2x *bp)
5391 int port = BP_PORT(bp);
5394 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5396 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5398 /* Port PXP comes here */
5399 /* Port PXP2 comes here */
5404 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5405 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5406 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5407 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5412 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5413 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5414 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5415 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5420 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5421 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5422 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5423 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5425 /* Port CMs come here */
5427 /* Port QM comes here */
5429 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5430 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5432 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5433 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5435 /* Port DQ comes here */
5436 /* Port BRB1 comes here */
5437 /* Port PRS comes here */
5438 /* Port TSDM comes here */
5439 /* Port CSDM comes here */
5440 /* Port USDM comes here */
5441 /* Port XSDM comes here */
5442 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5443 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5444 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5445 port ? USEM_PORT1_END : USEM_PORT0_END);
5446 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5447 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5448 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5449 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5450 /* Port UPB comes here */
5451 /* Port XPB comes here */
5453 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5454 port ? PBF_PORT1_END : PBF_PORT0_END);
5456 /* configure PBF to work without PAUSE mtu 9000 */
5457 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5459 /* update threshold */
5460 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5461 /* update init credit */
5462 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5465 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5467 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5470 /* tell the searcher where the T2 table is */
5471 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5473 wb_write[0] = U64_LO(bp->t2_mapping);
5474 wb_write[1] = U64_HI(bp->t2_mapping);
5475 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5476 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5477 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5478 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5480 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5481 /* Port SRCH comes here */
5483 /* Port CDU comes here */
5484 /* Port CFC comes here */
5486 if (CHIP_IS_E1(bp)) {
5487 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5488 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5490 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5491 port ? HC_PORT1_END : HC_PORT0_END);
5493 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5494 MISC_AEU_PORT0_START,
5495 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5496 /* init aeu_mask_attn_func_0/1:
5497 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5498 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5499 * bits 4-7 are used for "per vn group attention" */
5500 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5501 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5503 /* Port PXPCS comes here */
5504 /* Port EMAC0 comes here */
5505 /* Port EMAC1 comes here */
5506 /* Port DBU comes here */
5507 /* Port DBG comes here */
5508 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5509 port ? NIG_PORT1_END : NIG_PORT0_END);
5511 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5513 if (CHIP_IS_E1H(bp)) {
5515 struct cmng_struct_per_port m_cmng_port;
5518 /* 0x2 disable e1hov, 0x1 enable */
5519 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5520 (IS_E1HMF(bp) ? 0x1 : 0x2));
5522 /* Init RATE SHAPING and FAIRNESS contexts.
5523 Initialize as if there is 10G link. */
5524 wsum = bnx2x_calc_vn_wsum(bp);
5525 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5527 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5528 bnx2x_init_vn_minmax(bp, 2*vn + port,
5529 wsum, 10000, &m_cmng_port);
5532 /* Port MCP comes here */
5533 /* Port DMAE comes here */
5535 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5536 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5537 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5538 /* add SPIO 5 to group 0 */
5539 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5540 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5541 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5548 bnx2x__link_reset(bp);
5553 #define ILT_PER_FUNC (768/2)
5554 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5555 /* the phys address is shifted right 12 bits and has an added
5556 1=valid bit added to the 53rd bit
5557 then since this is a wide register(TM)
5558 we split it into two 32 bit writes
5560 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5561 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5562 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5563 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5565 #define CNIC_ILT_LINES 0
5567 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5571 if (CHIP_IS_E1H(bp))
5572 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5574 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5576 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5579 static int bnx2x_init_func(struct bnx2x *bp)
5581 int port = BP_PORT(bp);
5582 int func = BP_FUNC(bp);
5585 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5587 i = FUNC_ILT_BASE(func);
5589 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5590 if (CHIP_IS_E1H(bp)) {
5591 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5592 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5594 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5595 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5598 if (CHIP_IS_E1H(bp)) {
5599 for (i = 0; i < 9; i++)
5600 bnx2x_init_block(bp,
5601 cm_start[func][i], cm_end[func][i]);
5603 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5604 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5607 /* HC init per function */
5608 if (CHIP_IS_E1H(bp)) {
5609 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5611 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5612 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5614 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5616 if (CHIP_IS_E1H(bp))
5617 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5619 /* Reset PCIE errors for debug */
5620 REG_WR(bp, 0x2114, 0xffffffff);
5621 REG_WR(bp, 0x2120, 0xffffffff);
5626 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5630 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5631 BP_FUNC(bp), load_code);
5634 mutex_init(&bp->dmae_mutex);
5635 bnx2x_gunzip_init(bp);
5637 switch (load_code) {
5638 case FW_MSG_CODE_DRV_LOAD_COMMON:
5639 rc = bnx2x_init_common(bp);
5644 case FW_MSG_CODE_DRV_LOAD_PORT:
5646 rc = bnx2x_init_port(bp);
5651 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5653 rc = bnx2x_init_func(bp);
5659 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5663 if (!BP_NOMCP(bp)) {
5664 int func = BP_FUNC(bp);
5666 bp->fw_drv_pulse_wr_seq =
5667 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5668 DRV_PULSE_SEQ_MASK);
5669 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5670 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5671 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5675 /* this needs to be done before gunzip end */
5676 bnx2x_zero_def_sb(bp);
5677 for_each_queue(bp, i)
5678 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5681 bnx2x_gunzip_end(bp);
5686 /* send the MCP a request, block until there is a reply */
5687 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5689 int func = BP_FUNC(bp);
5690 u32 seq = ++bp->fw_seq;
5693 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5695 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5696 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5699 /* let the FW do it's magic ... */
5702 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5704 /* Give the FW up to 2 second (200*10ms) */
5705 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5707 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5708 cnt*delay, rc, seq);
5710 /* is this a reply to our command? */
5711 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5712 rc &= FW_MSG_CODE_MASK;
5716 BNX2X_ERR("FW failed to respond!\n");
5724 static void bnx2x_free_mem(struct bnx2x *bp)
5727 #define BNX2X_PCI_FREE(x, y, size) \
5730 pci_free_consistent(bp->pdev, size, x, y); \
5736 #define BNX2X_FREE(x) \
5747 for_each_queue(bp, i) {
5750 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5751 bnx2x_fp(bp, i, status_blk_mapping),
5752 sizeof(struct host_status_block) +
5753 sizeof(struct eth_tx_db_data));
5755 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5756 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5757 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5758 bnx2x_fp(bp, i, tx_desc_mapping),
5759 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5761 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5762 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5763 bnx2x_fp(bp, i, rx_desc_mapping),
5764 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5766 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5767 bnx2x_fp(bp, i, rx_comp_mapping),
5768 sizeof(struct eth_fast_path_rx_cqe) *
5772 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5773 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5774 bnx2x_fp(bp, i, rx_sge_mapping),
5775 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5777 /* end of fastpath */
5779 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5780 sizeof(struct host_def_status_block));
5782 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5783 sizeof(struct bnx2x_slowpath));
5786 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5787 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5788 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5789 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5791 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5793 #undef BNX2X_PCI_FREE
5797 static int bnx2x_alloc_mem(struct bnx2x *bp)
5800 #define BNX2X_PCI_ALLOC(x, y, size) \
5802 x = pci_alloc_consistent(bp->pdev, size, y); \
5804 goto alloc_mem_err; \
5805 memset(x, 0, size); \
5808 #define BNX2X_ALLOC(x, size) \
5810 x = vmalloc(size); \
5812 goto alloc_mem_err; \
5813 memset(x, 0, size); \
5819 for_each_queue(bp, i) {
5820 bnx2x_fp(bp, i, bp) = bp;
5823 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5824 &bnx2x_fp(bp, i, status_blk_mapping),
5825 sizeof(struct host_status_block) +
5826 sizeof(struct eth_tx_db_data));
5828 bnx2x_fp(bp, i, hw_tx_prods) =
5829 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5831 bnx2x_fp(bp, i, tx_prods_mapping) =
5832 bnx2x_fp(bp, i, status_blk_mapping) +
5833 sizeof(struct host_status_block);
5835 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5836 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5837 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5838 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5839 &bnx2x_fp(bp, i, tx_desc_mapping),
5840 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5842 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5843 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5844 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5845 &bnx2x_fp(bp, i, rx_desc_mapping),
5846 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5848 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5849 &bnx2x_fp(bp, i, rx_comp_mapping),
5850 sizeof(struct eth_fast_path_rx_cqe) *
5854 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5855 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5856 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5857 &bnx2x_fp(bp, i, rx_sge_mapping),
5858 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5860 /* end of fastpath */
5862 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5863 sizeof(struct host_def_status_block));
5865 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5866 sizeof(struct bnx2x_slowpath));
5869 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5872 for (i = 0; i < 64*1024; i += 64) {
5873 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5874 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5877 /* allocate searcher T2 table
5878 we allocate 1/4 of alloc num for T2
5879 (which is not entered into the ILT) */
5880 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5883 for (i = 0; i < 16*1024; i += 64)
5884 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5886 /* now fixup the last line in the block to point to the next block */
5887 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5889 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5890 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5892 /* QM queues (128*MAX_CONN) */
5893 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5896 /* Slow path ring */
5897 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5905 #undef BNX2X_PCI_ALLOC
5909 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5913 for_each_queue(bp, i) {
5914 struct bnx2x_fastpath *fp = &bp->fp[i];
5916 u16 bd_cons = fp->tx_bd_cons;
5917 u16 sw_prod = fp->tx_pkt_prod;
5918 u16 sw_cons = fp->tx_pkt_cons;
5920 while (sw_cons != sw_prod) {
5921 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5927 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5931 for_each_queue(bp, j) {
5932 struct bnx2x_fastpath *fp = &bp->fp[j];
5934 for (i = 0; i < NUM_RX_BD; i++) {
5935 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5936 struct sk_buff *skb = rx_buf->skb;
5941 pci_unmap_single(bp->pdev,
5942 pci_unmap_addr(rx_buf, mapping),
5943 bp->rx_buf_use_size,
5944 PCI_DMA_FROMDEVICE);
5949 if (!fp->disable_tpa)
5950 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5951 ETH_MAX_AGGREGATION_QUEUES_E1 :
5952 ETH_MAX_AGGREGATION_QUEUES_E1H);
5956 static void bnx2x_free_skbs(struct bnx2x *bp)
5958 bnx2x_free_tx_skbs(bp);
5959 bnx2x_free_rx_skbs(bp);
5962 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5966 free_irq(bp->msix_table[0].vector, bp->dev);
5967 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5968 bp->msix_table[0].vector);
5970 for_each_queue(bp, i) {
5971 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5972 "state %x\n", i, bp->msix_table[i + offset].vector,
5973 bnx2x_fp(bp, i, state));
5975 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5976 BNX2X_ERR("IRQ of fp #%d being freed while "
5977 "state != closed\n", i);
5979 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5983 static void bnx2x_free_irq(struct bnx2x *bp)
5985 if (bp->flags & USING_MSIX_FLAG) {
5986 bnx2x_free_msix_irqs(bp);
5987 pci_disable_msix(bp->pdev);
5988 bp->flags &= ~USING_MSIX_FLAG;
5991 free_irq(bp->pdev->irq, bp->dev);
5994 static int bnx2x_enable_msix(struct bnx2x *bp)
5998 bp->msix_table[0].entry = 0;
6000 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6002 for_each_queue(bp, i) {
6003 int igu_vec = offset + i + BP_L_ID(bp);
6005 bp->msix_table[i + offset].entry = igu_vec;
6006 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6007 "(fastpath #%u)\n", i + offset, igu_vec, i);
6010 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6011 bp->num_queues + offset);
6013 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6016 bp->flags |= USING_MSIX_FLAG;
6021 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6023 int i, rc, offset = 1;
6025 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6026 bp->dev->name, bp->dev);
6028 BNX2X_ERR("request sp irq failed\n");
6032 for_each_queue(bp, i) {
6033 rc = request_irq(bp->msix_table[i + offset].vector,
6034 bnx2x_msix_fp_int, 0,
6035 bp->dev->name, &bp->fp[i]);
6037 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6039 bnx2x_free_msix_irqs(bp);
6043 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6049 static int bnx2x_req_irq(struct bnx2x *bp)
6053 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6054 bp->dev->name, bp->dev);
6056 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6062 * Init service functions
6065 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6067 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6068 int port = BP_PORT(bp);
6071 * unicasts 0-31:port0 32-63:port1
6072 * multicast 64-127:port0 128-191:port1
6074 config->hdr.length_6b = 2;
6075 config->hdr.offset = port ? 31 : 0;
6076 config->hdr.client_id = BP_CL_ID(bp);
6077 config->hdr.reserved1 = 0;
6080 config->config_table[0].cam_entry.msb_mac_addr =
6081 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6082 config->config_table[0].cam_entry.middle_mac_addr =
6083 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6084 config->config_table[0].cam_entry.lsb_mac_addr =
6085 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6086 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6088 config->config_table[0].target_table_entry.flags = 0;
6090 CAM_INVALIDATE(config->config_table[0]);
6091 config->config_table[0].target_table_entry.client_id = 0;
6092 config->config_table[0].target_table_entry.vlan_id = 0;
6094 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6095 (set ? "setting" : "clearing"),
6096 config->config_table[0].cam_entry.msb_mac_addr,
6097 config->config_table[0].cam_entry.middle_mac_addr,
6098 config->config_table[0].cam_entry.lsb_mac_addr);
6101 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6102 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6103 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6104 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6106 config->config_table[1].target_table_entry.flags =
6107 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6109 CAM_INVALIDATE(config->config_table[1]);
6110 config->config_table[1].target_table_entry.client_id = 0;
6111 config->config_table[1].target_table_entry.vlan_id = 0;
6113 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6114 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6115 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6118 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6120 struct mac_configuration_cmd_e1h *config =
6121 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6123 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6124 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6128 /* CAM allocation for E1H
6129 * unicasts: by func number
6130 * multicast: 20+FUNC*20, 20 each
6132 config->hdr.length_6b = 1;
6133 config->hdr.offset = BP_FUNC(bp);
6134 config->hdr.client_id = BP_CL_ID(bp);
6135 config->hdr.reserved1 = 0;
6138 config->config_table[0].msb_mac_addr =
6139 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6140 config->config_table[0].middle_mac_addr =
6141 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6142 config->config_table[0].lsb_mac_addr =
6143 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6144 config->config_table[0].client_id = BP_L_ID(bp);
6145 config->config_table[0].vlan_id = 0;
6146 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6148 config->config_table[0].flags = BP_PORT(bp);
6150 config->config_table[0].flags =
6151 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6153 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6154 (set ? "setting" : "clearing"),
6155 config->config_table[0].msb_mac_addr,
6156 config->config_table[0].middle_mac_addr,
6157 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6159 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6160 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6161 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6164 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6165 int *state_p, int poll)
6167 /* can take a while if any port is running */
6170 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6171 poll ? "polling" : "waiting", state, idx);
6176 bnx2x_rx_int(bp->fp, 10);
6177 /* if index is different from 0
6178 * the reply for some commands will
6179 * be on the non default queue
6182 bnx2x_rx_int(&bp->fp[idx], 10);
6185 mb(); /* state is changed by bnx2x_sp_event() */
6186 if (*state_p == state)
6193 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6194 poll ? "polling" : "waiting", state, idx);
6195 #ifdef BNX2X_STOP_ON_ERROR
6202 static int bnx2x_setup_leading(struct bnx2x *bp)
6206 /* reset IGU state */
6207 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6210 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6212 /* Wait for completion */
6213 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6218 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6220 /* reset IGU state */
6221 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6224 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6225 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6227 /* Wait for completion */
6228 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6229 &(bp->fp[index].state), 0);
6232 static int bnx2x_poll(struct napi_struct *napi, int budget);
6233 static void bnx2x_set_rx_mode(struct net_device *dev);
6235 /* must be called with rtnl_lock */
6236 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6240 #ifdef BNX2X_STOP_ON_ERROR
6241 if (unlikely(bp->panic))
6245 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6247 /* Send LOAD_REQUEST command to MCP
6248 Returns the type of LOAD command:
6249 if it is the first port to be initialized
6250 common blocks should be initialized, otherwise - not
6252 if (!BP_NOMCP(bp)) {
6253 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6255 BNX2X_ERR("MCP response failure, aborting\n");
6258 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6259 return -EBUSY; /* other port in diagnostic mode */
6262 int port = BP_PORT(bp);
6264 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6265 load_count[0], load_count[1], load_count[2]);
6267 load_count[1 + port]++;
6268 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6269 load_count[0], load_count[1], load_count[2]);
6270 if (load_count[0] == 1)
6271 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6272 else if (load_count[1 + port] == 1)
6273 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6275 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6278 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6279 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6283 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6285 /* if we can't use MSI-X we only need one fp,
6286 * so try to enable MSI-X with the requested number of fp's
6287 * and fallback to inta with one fp
6293 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6294 /* user requested number */
6295 bp->num_queues = use_multi;
6298 bp->num_queues = min_t(u32, num_online_cpus(),
6303 if (bnx2x_enable_msix(bp)) {
6304 /* failed to enable MSI-X */
6307 BNX2X_ERR("Multi requested but failed"
6308 " to enable MSI-X\n");
6312 "set number of queues to %d\n", bp->num_queues);
6314 if (bnx2x_alloc_mem(bp))
6317 for_each_queue(bp, i)
6318 bnx2x_fp(bp, i, disable_tpa) =
6319 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6321 if (bp->flags & USING_MSIX_FLAG) {
6322 rc = bnx2x_req_msix_irqs(bp);
6324 pci_disable_msix(bp->pdev);
6329 rc = bnx2x_req_irq(bp);
6331 BNX2X_ERR("IRQ request failed, aborting\n");
6336 for_each_queue(bp, i)
6337 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6341 rc = bnx2x_init_hw(bp, load_code);
6343 BNX2X_ERR("HW init failed, aborting\n");
6347 /* Setup NIC internals and enable interrupts */
6348 bnx2x_nic_init(bp, load_code);
6350 /* Send LOAD_DONE command to MCP */
6351 if (!BP_NOMCP(bp)) {
6352 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6354 BNX2X_ERR("MCP response failure, aborting\n");
6356 goto load_int_disable;
6360 bnx2x_stats_init(bp);
6362 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6364 /* Enable Rx interrupt handling before sending the ramrod
6365 as it's completed on Rx FP queue */
6366 for_each_queue(bp, i)
6367 napi_enable(&bnx2x_fp(bp, i, napi));
6369 /* Enable interrupt handling */
6370 atomic_set(&bp->intr_sem, 0);
6372 rc = bnx2x_setup_leading(bp);
6374 BNX2X_ERR("Setup leading failed!\n");
6375 goto load_stop_netif;
6378 if (CHIP_IS_E1H(bp))
6379 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6380 BNX2X_ERR("!!! mf_cfg function disabled\n");
6381 bp->state = BNX2X_STATE_DISABLED;
6384 if (bp->state == BNX2X_STATE_OPEN)
6385 for_each_nondefault_queue(bp, i) {
6386 rc = bnx2x_setup_multi(bp, i);
6388 goto load_stop_netif;
6392 bnx2x_set_mac_addr_e1(bp, 1);
6394 bnx2x_set_mac_addr_e1h(bp, 1);
6397 bnx2x_initial_phy_init(bp);
6399 /* Start fast path */
6400 switch (load_mode) {
6402 /* Tx queue should be only reenabled */
6403 netif_wake_queue(bp->dev);
6404 bnx2x_set_rx_mode(bp->dev);
6408 netif_start_queue(bp->dev);
6409 bnx2x_set_rx_mode(bp->dev);
6410 if (bp->flags & USING_MSIX_FLAG)
6411 printk(KERN_INFO PFX "%s: using MSI-X\n",
6416 bnx2x_set_rx_mode(bp->dev);
6417 bp->state = BNX2X_STATE_DIAG;
6425 bnx2x__link_status_update(bp);
6427 /* start the timer */
6428 mod_timer(&bp->timer, jiffies + bp->current_interval);
6434 for_each_queue(bp, i)
6435 napi_disable(&bnx2x_fp(bp, i, napi));
6438 bnx2x_int_disable_sync(bp);
6443 /* Free SKBs, SGEs, TPA pool and driver internals */
6444 bnx2x_free_skbs(bp);
6445 for_each_queue(bp, i)
6446 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6450 /* TBD we really need to reset the chip
6451 if we want to recover from this */
6455 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6459 /* halt the connection */
6460 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6461 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6463 /* Wait for completion */
6464 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6465 &(bp->fp[index].state), 1);
6466 if (rc) /* timeout */
6469 /* delete cfc entry */
6470 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6472 /* Wait for completion */
6473 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6474 &(bp->fp[index].state), 1);
6478 static int bnx2x_stop_leading(struct bnx2x *bp)
6480 u16 dsb_sp_prod_idx;
6481 /* if the other port is handling traffic,
6482 this can take a lot of time */
6488 /* Send HALT ramrod */
6489 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6490 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6492 /* Wait for completion */
6493 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6494 &(bp->fp[0].state), 1);
6495 if (rc) /* timeout */
6498 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6500 /* Send PORT_DELETE ramrod */
6501 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6503 /* Wait for completion to arrive on default status block
6504 we are going to reset the chip anyway
6505 so there is not much to do if this times out
6507 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6509 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6510 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6511 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6512 #ifdef BNX2X_STOP_ON_ERROR
6522 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6523 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6528 static void bnx2x_reset_func(struct bnx2x *bp)
6530 int port = BP_PORT(bp);
6531 int func = BP_FUNC(bp);
6535 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6536 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6538 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6541 base = FUNC_ILT_BASE(func);
6542 for (i = base; i < base + ILT_PER_FUNC; i++)
6543 bnx2x_ilt_wr(bp, i, 0);
6546 static void bnx2x_reset_port(struct bnx2x *bp)
6548 int port = BP_PORT(bp);
6551 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6553 /* Do not rcv packets to BRB */
6554 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6555 /* Do not direct rcv packets that are not for MCP to the BRB */
6556 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6557 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6560 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6563 /* Check for BRB port occupancy */
6564 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6566 DP(NETIF_MSG_IFDOWN,
6567 "BRB1 is not empty %d blocks are occupied\n", val);
6569 /* TODO: Close Doorbell port? */
6572 static void bnx2x_reset_common(struct bnx2x *bp)
6575 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6577 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6580 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6582 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6583 BP_FUNC(bp), reset_code);
6585 switch (reset_code) {
6586 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6587 bnx2x_reset_port(bp);
6588 bnx2x_reset_func(bp);
6589 bnx2x_reset_common(bp);
6592 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6593 bnx2x_reset_port(bp);
6594 bnx2x_reset_func(bp);
6597 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6598 bnx2x_reset_func(bp);
6602 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6607 /* must be called with rtnl_lock */
6608 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6610 int port = BP_PORT(bp);
6614 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6616 bp->rx_mode = BNX2X_RX_MODE_NONE;
6617 bnx2x_set_storm_rx_mode(bp);
6619 if (netif_running(bp->dev)) {
6620 netif_tx_disable(bp->dev);
6621 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6624 del_timer_sync(&bp->timer);
6625 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6626 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6627 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6629 /* Wait until tx fast path tasks complete */
6630 for_each_queue(bp, i) {
6631 struct bnx2x_fastpath *fp = &bp->fp[i];
6635 while (BNX2X_HAS_TX_WORK(fp)) {
6637 if (!netif_running(bp->dev))
6638 bnx2x_tx_int(fp, 1000);
6641 BNX2X_ERR("timeout waiting for queue[%d]\n",
6643 #ifdef BNX2X_STOP_ON_ERROR
6656 /* Give HW time to discard old tx messages */
6659 for_each_queue(bp, i)
6660 napi_disable(&bnx2x_fp(bp, i, napi));
6661 /* Disable interrupts after Tx and Rx are disabled on stack level */
6662 bnx2x_int_disable_sync(bp);
6667 if (unload_mode == UNLOAD_NORMAL)
6668 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6670 else if (bp->flags & NO_WOL_FLAG) {
6671 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6672 if (CHIP_IS_E1H(bp))
6673 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6675 } else if (bp->wol) {
6676 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6677 u8 *mac_addr = bp->dev->dev_addr;
6679 /* The mac address is written to entries 1-4 to
6680 preserve entry 0 which is used by the PMF */
6681 u8 entry = (BP_E1HVN(bp) + 1)*8;
6683 val = (mac_addr[0] << 8) | mac_addr[1];
6684 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6686 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6687 (mac_addr[4] << 8) | mac_addr[5];
6688 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6690 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6693 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6695 if (CHIP_IS_E1(bp)) {
6696 struct mac_configuration_cmd *config =
6697 bnx2x_sp(bp, mcast_config);
6699 bnx2x_set_mac_addr_e1(bp, 0);
6701 for (i = 0; i < config->hdr.length_6b; i++)
6702 CAM_INVALIDATE(config->config_table[i]);
6704 config->hdr.length_6b = i;
6705 if (CHIP_REV_IS_SLOW(bp))
6706 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6708 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6709 config->hdr.client_id = BP_CL_ID(bp);
6710 config->hdr.reserved1 = 0;
6712 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6713 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6714 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6717 bnx2x_set_mac_addr_e1h(bp, 0);
6719 for (i = 0; i < MC_HASH_SIZE; i++)
6720 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6723 if (CHIP_IS_E1H(bp))
6724 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6726 /* Close multi and leading connections
6727 Completions for ramrods are collected in a synchronous way */
6728 for_each_nondefault_queue(bp, i)
6729 if (bnx2x_stop_multi(bp, i))
6732 rc = bnx2x_stop_leading(bp);
6734 BNX2X_ERR("Stop leading failed!\n");
6735 #ifdef BNX2X_STOP_ON_ERROR
6744 reset_code = bnx2x_fw_command(bp, reset_code);
6746 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6747 load_count[0], load_count[1], load_count[2]);
6749 load_count[1 + port]--;
6750 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6751 load_count[0], load_count[1], load_count[2]);
6752 if (load_count[0] == 0)
6753 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6754 else if (load_count[1 + port] == 0)
6755 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6757 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6760 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6761 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6762 bnx2x__link_reset(bp);
6764 /* Reset the chip */
6765 bnx2x_reset_chip(bp, reset_code);
6767 /* Report UNLOAD_DONE to MCP */
6769 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6771 /* Free SKBs, SGEs, TPA pool and driver internals */
6772 bnx2x_free_skbs(bp);
6773 for_each_queue(bp, i)
6774 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6777 bp->state = BNX2X_STATE_CLOSED;
6779 netif_carrier_off(bp->dev);
6784 static void bnx2x_reset_task(struct work_struct *work)
6786 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6788 #ifdef BNX2X_STOP_ON_ERROR
6789 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6790 " so reset not done to allow debug dump,\n"
6791 KERN_ERR " you will need to reboot when done\n");
6797 if (!netif_running(bp->dev))
6798 goto reset_task_exit;
6800 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6801 bnx2x_nic_load(bp, LOAD_NORMAL);
6807 /* end of nic load/unload */
6812 * Init service functions
6815 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6819 /* Check if there is any driver already loaded */
6820 val = REG_RD(bp, MISC_REG_UNPREPARED);
6822 /* Check if it is the UNDI driver
6823 * UNDI driver initializes CID offset for normal bell to 0x7
6825 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6826 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6828 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6830 int func = BP_FUNC(bp);
6834 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6836 /* try unload UNDI on port 0 */
6839 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6840 DRV_MSG_SEQ_NUMBER_MASK);
6841 reset_code = bnx2x_fw_command(bp, reset_code);
6843 /* if UNDI is loaded on the other port */
6844 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6846 /* send "DONE" for previous unload */
6847 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6849 /* unload UNDI on port 1 */
6852 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6853 DRV_MSG_SEQ_NUMBER_MASK);
6854 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6856 bnx2x_fw_command(bp, reset_code);
6859 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6860 HC_REG_CONFIG_0), 0x1000);
6862 /* close input traffic and wait for it */
6863 /* Do not rcv packets to BRB */
6865 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6866 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6867 /* Do not direct rcv packets that are not for MCP to
6870 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6871 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6874 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6875 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6878 /* save NIG port swap info */
6879 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6880 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6883 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6886 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6888 /* take the NIG out of reset and restore swap values */
6890 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6891 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6892 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6893 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6895 /* send unload done to the MCP */
6896 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6898 /* restore our func and fw_seq */
6901 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6902 DRV_MSG_SEQ_NUMBER_MASK);
6904 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6908 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6910 u32 val, val2, val3, val4, id;
6913 /* Get the chip revision id and number. */
6914 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6915 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6916 id = ((val & 0xffff) << 16);
6917 val = REG_RD(bp, MISC_REG_CHIP_REV);
6918 id |= ((val & 0xf) << 12);
6919 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6920 id |= ((val & 0xff) << 4);
6921 REG_RD(bp, MISC_REG_BOND_ID);
6923 bp->common.chip_id = id;
6924 bp->link_params.chip_id = bp->common.chip_id;
6925 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6927 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6928 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6929 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6930 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6931 bp->common.flash_size, bp->common.flash_size);
6933 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6934 bp->link_params.shmem_base = bp->common.shmem_base;
6935 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6937 if (!bp->common.shmem_base ||
6938 (bp->common.shmem_base < 0xA0000) ||
6939 (bp->common.shmem_base >= 0xC0000)) {
6940 BNX2X_DEV_INFO("MCP not active\n");
6941 bp->flags |= NO_MCP_FLAG;
6945 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6946 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6947 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6948 BNX2X_ERR("BAD MCP validity signature\n");
6950 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6951 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6953 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6954 bp->common.hw_config, bp->common.board);
6956 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6957 SHARED_HW_CFG_LED_MODE_MASK) >>
6958 SHARED_HW_CFG_LED_MODE_SHIFT);
6960 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6961 bp->common.bc_ver = val;
6962 BNX2X_DEV_INFO("bc_ver %X\n", val);
6963 if (val < BNX2X_BC_VER) {
6964 /* for now only warn
6965 * later we might need to enforce this */
6966 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6967 " please upgrade BC\n", BNX2X_BC_VER, val);
6970 if (BP_E1HVN(bp) == 0) {
6971 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6972 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6974 /* no WOL capability for E1HVN != 0 */
6975 bp->flags |= NO_WOL_FLAG;
6977 BNX2X_DEV_INFO("%sWoL capable\n",
6978 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
6980 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6981 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6982 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6983 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6985 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6986 val, val2, val3, val4);
6989 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6992 int port = BP_PORT(bp);
6995 switch (switch_cfg) {
6997 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7000 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7001 switch (ext_phy_type) {
7002 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7003 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7006 bp->port.supported |= (SUPPORTED_10baseT_Half |
7007 SUPPORTED_10baseT_Full |
7008 SUPPORTED_100baseT_Half |
7009 SUPPORTED_100baseT_Full |
7010 SUPPORTED_1000baseT_Full |
7011 SUPPORTED_2500baseX_Full |
7016 SUPPORTED_Asym_Pause);
7019 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7020 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7023 bp->port.supported |= (SUPPORTED_10baseT_Half |
7024 SUPPORTED_10baseT_Full |
7025 SUPPORTED_100baseT_Half |
7026 SUPPORTED_100baseT_Full |
7027 SUPPORTED_1000baseT_Full |
7032 SUPPORTED_Asym_Pause);
7036 BNX2X_ERR("NVRAM config error. "
7037 "BAD SerDes ext_phy_config 0x%x\n",
7038 bp->link_params.ext_phy_config);
7042 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7044 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7047 case SWITCH_CFG_10G:
7048 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7051 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7052 switch (ext_phy_type) {
7053 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7054 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7057 bp->port.supported |= (SUPPORTED_10baseT_Half |
7058 SUPPORTED_10baseT_Full |
7059 SUPPORTED_100baseT_Half |
7060 SUPPORTED_100baseT_Full |
7061 SUPPORTED_1000baseT_Full |
7062 SUPPORTED_2500baseX_Full |
7063 SUPPORTED_10000baseT_Full |
7068 SUPPORTED_Asym_Pause);
7071 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7072 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7075 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7078 SUPPORTED_Asym_Pause);
7081 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7082 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7085 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7086 SUPPORTED_1000baseT_Full |
7089 SUPPORTED_Asym_Pause);
7092 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7093 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7096 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7097 SUPPORTED_1000baseT_Full |
7101 SUPPORTED_Asym_Pause);
7104 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7105 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7108 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7109 SUPPORTED_2500baseX_Full |
7110 SUPPORTED_1000baseT_Full |
7114 SUPPORTED_Asym_Pause);
7117 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7118 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7121 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7125 SUPPORTED_Asym_Pause);
7128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7129 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7130 bp->link_params.ext_phy_config);
7134 BNX2X_ERR("NVRAM config error. "
7135 "BAD XGXS ext_phy_config 0x%x\n",
7136 bp->link_params.ext_phy_config);
7140 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7142 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7147 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7148 bp->port.link_config);
7151 bp->link_params.phy_addr = bp->port.phy_addr;
7153 /* mask what we support according to speed_cap_mask */
7154 if (!(bp->link_params.speed_cap_mask &
7155 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7156 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7158 if (!(bp->link_params.speed_cap_mask &
7159 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7160 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7162 if (!(bp->link_params.speed_cap_mask &
7163 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7164 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7166 if (!(bp->link_params.speed_cap_mask &
7167 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7168 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7170 if (!(bp->link_params.speed_cap_mask &
7171 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7172 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7173 SUPPORTED_1000baseT_Full);
7175 if (!(bp->link_params.speed_cap_mask &
7176 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7177 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7179 if (!(bp->link_params.speed_cap_mask &
7180 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7181 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7183 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7186 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7188 bp->link_params.req_duplex = DUPLEX_FULL;
7190 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7191 case PORT_FEATURE_LINK_SPEED_AUTO:
7192 if (bp->port.supported & SUPPORTED_Autoneg) {
7193 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7194 bp->port.advertising = bp->port.supported;
7197 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7199 if ((ext_phy_type ==
7200 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7202 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7203 /* force 10G, no AN */
7204 bp->link_params.req_line_speed = SPEED_10000;
7205 bp->port.advertising =
7206 (ADVERTISED_10000baseT_Full |
7210 BNX2X_ERR("NVRAM config error. "
7211 "Invalid link_config 0x%x"
7212 " Autoneg not supported\n",
7213 bp->port.link_config);
7218 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7219 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7220 bp->link_params.req_line_speed = SPEED_10;
7221 bp->port.advertising = (ADVERTISED_10baseT_Full |
7224 BNX2X_ERR("NVRAM config error. "
7225 "Invalid link_config 0x%x"
7226 " speed_cap_mask 0x%x\n",
7227 bp->port.link_config,
7228 bp->link_params.speed_cap_mask);
7233 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7234 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7235 bp->link_params.req_line_speed = SPEED_10;
7236 bp->link_params.req_duplex = DUPLEX_HALF;
7237 bp->port.advertising = (ADVERTISED_10baseT_Half |
7240 BNX2X_ERR("NVRAM config error. "
7241 "Invalid link_config 0x%x"
7242 " speed_cap_mask 0x%x\n",
7243 bp->port.link_config,
7244 bp->link_params.speed_cap_mask);
7249 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7250 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7251 bp->link_params.req_line_speed = SPEED_100;
7252 bp->port.advertising = (ADVERTISED_100baseT_Full |
7255 BNX2X_ERR("NVRAM config error. "
7256 "Invalid link_config 0x%x"
7257 " speed_cap_mask 0x%x\n",
7258 bp->port.link_config,
7259 bp->link_params.speed_cap_mask);
7264 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7265 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7266 bp->link_params.req_line_speed = SPEED_100;
7267 bp->link_params.req_duplex = DUPLEX_HALF;
7268 bp->port.advertising = (ADVERTISED_100baseT_Half |
7271 BNX2X_ERR("NVRAM config error. "
7272 "Invalid link_config 0x%x"
7273 " speed_cap_mask 0x%x\n",
7274 bp->port.link_config,
7275 bp->link_params.speed_cap_mask);
7280 case PORT_FEATURE_LINK_SPEED_1G:
7281 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7282 bp->link_params.req_line_speed = SPEED_1000;
7283 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7286 BNX2X_ERR("NVRAM config error. "
7287 "Invalid link_config 0x%x"
7288 " speed_cap_mask 0x%x\n",
7289 bp->port.link_config,
7290 bp->link_params.speed_cap_mask);
7295 case PORT_FEATURE_LINK_SPEED_2_5G:
7296 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7297 bp->link_params.req_line_speed = SPEED_2500;
7298 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7301 BNX2X_ERR("NVRAM config error. "
7302 "Invalid link_config 0x%x"
7303 " speed_cap_mask 0x%x\n",
7304 bp->port.link_config,
7305 bp->link_params.speed_cap_mask);
7310 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7311 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7312 case PORT_FEATURE_LINK_SPEED_10G_KR:
7313 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7314 bp->link_params.req_line_speed = SPEED_10000;
7315 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7318 BNX2X_ERR("NVRAM config error. "
7319 "Invalid link_config 0x%x"
7320 " speed_cap_mask 0x%x\n",
7321 bp->port.link_config,
7322 bp->link_params.speed_cap_mask);
7328 BNX2X_ERR("NVRAM config error. "
7329 "BAD link speed link_config 0x%x\n",
7330 bp->port.link_config);
7331 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7332 bp->port.advertising = bp->port.supported;
7336 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7337 PORT_FEATURE_FLOW_CONTROL_MASK);
7338 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7339 !(bp->port.supported & SUPPORTED_Autoneg))
7340 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7342 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7343 " advertising 0x%x\n",
7344 bp->link_params.req_line_speed,
7345 bp->link_params.req_duplex,
7346 bp->link_params.req_flow_ctrl, bp->port.advertising);
7349 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7351 int port = BP_PORT(bp);
7354 bp->link_params.bp = bp;
7355 bp->link_params.port = port;
7357 bp->link_params.serdes_config =
7358 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7359 bp->link_params.lane_config =
7360 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7361 bp->link_params.ext_phy_config =
7363 dev_info.port_hw_config[port].external_phy_config);
7364 bp->link_params.speed_cap_mask =
7366 dev_info.port_hw_config[port].speed_capability_mask);
7368 bp->port.link_config =
7369 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7371 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7372 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7373 " link_config 0x%08x\n",
7374 bp->link_params.serdes_config,
7375 bp->link_params.lane_config,
7376 bp->link_params.ext_phy_config,
7377 bp->link_params.speed_cap_mask, bp->port.link_config);
7379 bp->link_params.switch_cfg = (bp->port.link_config &
7380 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7381 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7383 bnx2x_link_settings_requested(bp);
7385 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7386 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7387 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7388 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7389 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7390 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7391 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7392 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7393 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7394 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7397 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7399 int func = BP_FUNC(bp);
7403 bnx2x_get_common_hwinfo(bp);
7407 if (CHIP_IS_E1H(bp)) {
7409 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7411 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7412 FUNC_MF_CFG_E1HOV_TAG_MASK);
7413 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7417 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7419 func, bp->e1hov, bp->e1hov);
7421 BNX2X_DEV_INFO("Single function mode\n");
7423 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7424 " aborting\n", func);
7430 if (!BP_NOMCP(bp)) {
7431 bnx2x_get_port_hwinfo(bp);
7433 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7434 DRV_MSG_SEQ_NUMBER_MASK);
7435 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7439 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7440 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7441 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7442 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7443 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7444 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7445 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7446 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7447 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7448 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7449 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7451 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7459 /* only supposed to happen on emulation/FPGA */
7460 BNX2X_ERR("warning random MAC workaround active\n");
7461 random_ether_addr(bp->dev->dev_addr);
7462 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7468 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7470 int func = BP_FUNC(bp);
7473 /* Disable interrupt handling until HW is initialized */
7474 atomic_set(&bp->intr_sem, 1);
7476 mutex_init(&bp->port.phy_mutex);
7478 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7479 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7481 rc = bnx2x_get_hwinfo(bp);
7483 /* need to reset chip if undi was active */
7485 bnx2x_undi_unload(bp);
7487 if (CHIP_REV_IS_FPGA(bp))
7488 printk(KERN_ERR PFX "FPGA detected\n");
7490 if (BP_NOMCP(bp) && (func == 0))
7492 "MCP disabled, must load devices in order!\n");
7496 bp->flags &= ~TPA_ENABLE_FLAG;
7497 bp->dev->features &= ~NETIF_F_LRO;
7499 bp->flags |= TPA_ENABLE_FLAG;
7500 bp->dev->features |= NETIF_F_LRO;
7504 bp->tx_ring_size = MAX_TX_AVAIL;
7505 bp->rx_ring_size = MAX_RX_AVAIL;
7513 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7514 bp->current_interval = (poll ? poll : bp->timer_interval);
7516 init_timer(&bp->timer);
7517 bp->timer.expires = jiffies + bp->current_interval;
7518 bp->timer.data = (unsigned long) bp;
7519 bp->timer.function = bnx2x_timer;
7525 * ethtool service functions
7528 /* All ethtool functions called with rtnl_lock */
7530 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7532 struct bnx2x *bp = netdev_priv(dev);
7534 cmd->supported = bp->port.supported;
7535 cmd->advertising = bp->port.advertising;
7537 if (netif_carrier_ok(dev)) {
7538 cmd->speed = bp->link_vars.line_speed;
7539 cmd->duplex = bp->link_vars.duplex;
7541 cmd->speed = bp->link_params.req_line_speed;
7542 cmd->duplex = bp->link_params.req_duplex;
7547 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7548 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7549 if (vn_max_rate < cmd->speed)
7550 cmd->speed = vn_max_rate;
7553 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7555 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7557 switch (ext_phy_type) {
7558 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7559 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7560 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7561 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7562 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7563 cmd->port = PORT_FIBRE;
7566 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7567 cmd->port = PORT_TP;
7570 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7571 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7572 bp->link_params.ext_phy_config);
7576 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7577 bp->link_params.ext_phy_config);
7581 cmd->port = PORT_TP;
7583 cmd->phy_address = bp->port.phy_addr;
7584 cmd->transceiver = XCVR_INTERNAL;
7586 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7587 cmd->autoneg = AUTONEG_ENABLE;
7589 cmd->autoneg = AUTONEG_DISABLE;
7594 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7595 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7596 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7597 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7598 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7599 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7600 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7605 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7607 struct bnx2x *bp = netdev_priv(dev);
7613 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7614 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7615 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7616 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7617 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7618 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7619 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7621 if (cmd->autoneg == AUTONEG_ENABLE) {
7622 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7623 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7627 /* advertise the requested speed and duplex if supported */
7628 cmd->advertising &= bp->port.supported;
7630 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7631 bp->link_params.req_duplex = DUPLEX_FULL;
7632 bp->port.advertising |= (ADVERTISED_Autoneg |
7635 } else { /* forced speed */
7636 /* advertise the requested speed and duplex if supported */
7637 switch (cmd->speed) {
7639 if (cmd->duplex == DUPLEX_FULL) {
7640 if (!(bp->port.supported &
7641 SUPPORTED_10baseT_Full)) {
7643 "10M full not supported\n");
7647 advertising = (ADVERTISED_10baseT_Full |
7650 if (!(bp->port.supported &
7651 SUPPORTED_10baseT_Half)) {
7653 "10M half not supported\n");
7657 advertising = (ADVERTISED_10baseT_Half |
7663 if (cmd->duplex == DUPLEX_FULL) {
7664 if (!(bp->port.supported &
7665 SUPPORTED_100baseT_Full)) {
7667 "100M full not supported\n");
7671 advertising = (ADVERTISED_100baseT_Full |
7674 if (!(bp->port.supported &
7675 SUPPORTED_100baseT_Half)) {
7677 "100M half not supported\n");
7681 advertising = (ADVERTISED_100baseT_Half |
7687 if (cmd->duplex != DUPLEX_FULL) {
7688 DP(NETIF_MSG_LINK, "1G half not supported\n");
7692 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7693 DP(NETIF_MSG_LINK, "1G full not supported\n");
7697 advertising = (ADVERTISED_1000baseT_Full |
7702 if (cmd->duplex != DUPLEX_FULL) {
7704 "2.5G half not supported\n");
7708 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7710 "2.5G full not supported\n");
7714 advertising = (ADVERTISED_2500baseX_Full |
7719 if (cmd->duplex != DUPLEX_FULL) {
7720 DP(NETIF_MSG_LINK, "10G half not supported\n");
7724 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7725 DP(NETIF_MSG_LINK, "10G full not supported\n");
7729 advertising = (ADVERTISED_10000baseT_Full |
7734 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7738 bp->link_params.req_line_speed = cmd->speed;
7739 bp->link_params.req_duplex = cmd->duplex;
7740 bp->port.advertising = advertising;
7743 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7744 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7745 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7746 bp->port.advertising);
7748 if (netif_running(dev)) {
7749 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7756 #define PHY_FW_VER_LEN 10
7758 static void bnx2x_get_drvinfo(struct net_device *dev,
7759 struct ethtool_drvinfo *info)
7761 struct bnx2x *bp = netdev_priv(dev);
7762 u8 phy_fw_ver[PHY_FW_VER_LEN];
7764 strcpy(info->driver, DRV_MODULE_NAME);
7765 strcpy(info->version, DRV_MODULE_VERSION);
7767 phy_fw_ver[0] = '\0';
7769 bnx2x_acquire_phy_lock(bp);
7770 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7771 (bp->state != BNX2X_STATE_CLOSED),
7772 phy_fw_ver, PHY_FW_VER_LEN);
7773 bnx2x_release_phy_lock(bp);
7776 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7777 (bp->common.bc_ver & 0xff0000) >> 16,
7778 (bp->common.bc_ver & 0xff00) >> 8,
7779 (bp->common.bc_ver & 0xff),
7780 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7781 strcpy(info->bus_info, pci_name(bp->pdev));
7782 info->n_stats = BNX2X_NUM_STATS;
7783 info->testinfo_len = BNX2X_NUM_TESTS;
7784 info->eedump_len = bp->common.flash_size;
7785 info->regdump_len = 0;
7788 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7790 struct bnx2x *bp = netdev_priv(dev);
7792 if (bp->flags & NO_WOL_FLAG) {
7796 wol->supported = WAKE_MAGIC;
7798 wol->wolopts = WAKE_MAGIC;
7802 memset(&wol->sopass, 0, sizeof(wol->sopass));
7805 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7807 struct bnx2x *bp = netdev_priv(dev);
7809 if (wol->wolopts & ~WAKE_MAGIC)
7812 if (wol->wolopts & WAKE_MAGIC) {
7813 if (bp->flags & NO_WOL_FLAG)
7823 static u32 bnx2x_get_msglevel(struct net_device *dev)
7825 struct bnx2x *bp = netdev_priv(dev);
7827 return bp->msglevel;
7830 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7832 struct bnx2x *bp = netdev_priv(dev);
7834 if (capable(CAP_NET_ADMIN))
7835 bp->msglevel = level;
7838 static int bnx2x_nway_reset(struct net_device *dev)
7840 struct bnx2x *bp = netdev_priv(dev);
7845 if (netif_running(dev)) {
7846 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7853 static int bnx2x_get_eeprom_len(struct net_device *dev)
7855 struct bnx2x *bp = netdev_priv(dev);
7857 return bp->common.flash_size;
7860 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7862 int port = BP_PORT(bp);
7866 /* adjust timeout for emulation/FPGA */
7867 count = NVRAM_TIMEOUT_COUNT;
7868 if (CHIP_REV_IS_SLOW(bp))
7871 /* request access to nvram interface */
7872 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7873 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7875 for (i = 0; i < count*10; i++) {
7876 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7877 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7883 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7884 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7891 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7893 int port = BP_PORT(bp);
7897 /* adjust timeout for emulation/FPGA */
7898 count = NVRAM_TIMEOUT_COUNT;
7899 if (CHIP_REV_IS_SLOW(bp))
7902 /* relinquish nvram interface */
7903 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7904 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7906 for (i = 0; i < count*10; i++) {
7907 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7908 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7914 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7915 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7922 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7926 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7928 /* enable both bits, even on read */
7929 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7930 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7931 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7934 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7938 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7940 /* disable both bits, even after read */
7941 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7942 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7943 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7946 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7952 /* build the command word */
7953 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7955 /* need to clear DONE bit separately */
7956 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7958 /* address of the NVRAM to read from */
7959 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7960 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7962 /* issue a read command */
7963 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7965 /* adjust timeout for emulation/FPGA */
7966 count = NVRAM_TIMEOUT_COUNT;
7967 if (CHIP_REV_IS_SLOW(bp))
7970 /* wait for completion */
7973 for (i = 0; i < count; i++) {
7975 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7977 if (val & MCPR_NVM_COMMAND_DONE) {
7978 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7979 /* we read nvram data in cpu order
7980 * but ethtool sees it as an array of bytes
7981 * converting to big-endian will do the work */
7982 val = cpu_to_be32(val);
7992 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7999 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8001 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8006 if (offset + buf_size > bp->common.flash_size) {
8007 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8008 " buf_size (0x%x) > flash_size (0x%x)\n",
8009 offset, buf_size, bp->common.flash_size);
8013 /* request access to nvram interface */
8014 rc = bnx2x_acquire_nvram_lock(bp);
8018 /* enable access to nvram interface */
8019 bnx2x_enable_nvram_access(bp);
8021 /* read the first word(s) */
8022 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8023 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8024 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8025 memcpy(ret_buf, &val, 4);
8027 /* advance to the next dword */
8028 offset += sizeof(u32);
8029 ret_buf += sizeof(u32);
8030 buf_size -= sizeof(u32);
8035 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8036 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8037 memcpy(ret_buf, &val, 4);
8040 /* disable access to nvram interface */
8041 bnx2x_disable_nvram_access(bp);
8042 bnx2x_release_nvram_lock(bp);
8047 static int bnx2x_get_eeprom(struct net_device *dev,
8048 struct ethtool_eeprom *eeprom, u8 *eebuf)
8050 struct bnx2x *bp = netdev_priv(dev);
8053 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8054 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8055 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8056 eeprom->len, eeprom->len);
8058 /* parameters already validated in ethtool_get_eeprom */
8060 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8065 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8070 /* build the command word */
8071 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8073 /* need to clear DONE bit separately */
8074 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8076 /* write the data */
8077 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8079 /* address of the NVRAM to write to */
8080 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8081 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8083 /* issue the write command */
8084 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8086 /* adjust timeout for emulation/FPGA */
8087 count = NVRAM_TIMEOUT_COUNT;
8088 if (CHIP_REV_IS_SLOW(bp))
8091 /* wait for completion */
8093 for (i = 0; i < count; i++) {
8095 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8096 if (val & MCPR_NVM_COMMAND_DONE) {
8105 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8107 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8115 if (offset + buf_size > bp->common.flash_size) {
8116 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8117 " buf_size (0x%x) > flash_size (0x%x)\n",
8118 offset, buf_size, bp->common.flash_size);
8122 /* request access to nvram interface */
8123 rc = bnx2x_acquire_nvram_lock(bp);
8127 /* enable access to nvram interface */
8128 bnx2x_enable_nvram_access(bp);
8130 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8131 align_offset = (offset & ~0x03);
8132 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8135 val &= ~(0xff << BYTE_OFFSET(offset));
8136 val |= (*data_buf << BYTE_OFFSET(offset));
8138 /* nvram data is returned as an array of bytes
8139 * convert it back to cpu order */
8140 val = be32_to_cpu(val);
8142 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8146 /* disable access to nvram interface */
8147 bnx2x_disable_nvram_access(bp);
8148 bnx2x_release_nvram_lock(bp);
8153 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8161 if (buf_size == 1) /* ethtool */
8162 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8164 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8166 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8171 if (offset + buf_size > bp->common.flash_size) {
8172 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8173 " buf_size (0x%x) > flash_size (0x%x)\n",
8174 offset, buf_size, bp->common.flash_size);
8178 /* request access to nvram interface */
8179 rc = bnx2x_acquire_nvram_lock(bp);
8183 /* enable access to nvram interface */
8184 bnx2x_enable_nvram_access(bp);
8187 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8188 while ((written_so_far < buf_size) && (rc == 0)) {
8189 if (written_so_far == (buf_size - sizeof(u32)))
8190 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8191 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8192 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8193 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8194 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8196 memcpy(&val, data_buf, 4);
8198 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8200 /* advance to the next dword */
8201 offset += sizeof(u32);
8202 data_buf += sizeof(u32);
8203 written_so_far += sizeof(u32);
8207 /* disable access to nvram interface */
8208 bnx2x_disable_nvram_access(bp);
8209 bnx2x_release_nvram_lock(bp);
8214 static int bnx2x_set_eeprom(struct net_device *dev,
8215 struct ethtool_eeprom *eeprom, u8 *eebuf)
8217 struct bnx2x *bp = netdev_priv(dev);
8220 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8221 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8222 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8223 eeprom->len, eeprom->len);
8225 /* parameters already validated in ethtool_set_eeprom */
8227 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8228 if (eeprom->magic == 0x00504859)
8231 bnx2x_acquire_phy_lock(bp);
8232 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8233 bp->link_params.ext_phy_config,
8234 (bp->state != BNX2X_STATE_CLOSED),
8235 eebuf, eeprom->len);
8236 if ((bp->state == BNX2X_STATE_OPEN) ||
8237 (bp->state == BNX2X_STATE_DISABLED)) {
8238 rc |= bnx2x_link_reset(&bp->link_params,
8240 rc |= bnx2x_phy_init(&bp->link_params,
8243 bnx2x_release_phy_lock(bp);
8245 } else /* Only the PMF can access the PHY */
8248 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8253 static int bnx2x_get_coalesce(struct net_device *dev,
8254 struct ethtool_coalesce *coal)
8256 struct bnx2x *bp = netdev_priv(dev);
8258 memset(coal, 0, sizeof(struct ethtool_coalesce));
8260 coal->rx_coalesce_usecs = bp->rx_ticks;
8261 coal->tx_coalesce_usecs = bp->tx_ticks;
8266 static int bnx2x_set_coalesce(struct net_device *dev,
8267 struct ethtool_coalesce *coal)
8269 struct bnx2x *bp = netdev_priv(dev);
8271 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8272 if (bp->rx_ticks > 3000)
8273 bp->rx_ticks = 3000;
8275 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8276 if (bp->tx_ticks > 0x3000)
8277 bp->tx_ticks = 0x3000;
8279 if (netif_running(dev))
8280 bnx2x_update_coalesce(bp);
8285 static void bnx2x_get_ringparam(struct net_device *dev,
8286 struct ethtool_ringparam *ering)
8288 struct bnx2x *bp = netdev_priv(dev);
8290 ering->rx_max_pending = MAX_RX_AVAIL;
8291 ering->rx_mini_max_pending = 0;
8292 ering->rx_jumbo_max_pending = 0;
8294 ering->rx_pending = bp->rx_ring_size;
8295 ering->rx_mini_pending = 0;
8296 ering->rx_jumbo_pending = 0;
8298 ering->tx_max_pending = MAX_TX_AVAIL;
8299 ering->tx_pending = bp->tx_ring_size;
8302 static int bnx2x_set_ringparam(struct net_device *dev,
8303 struct ethtool_ringparam *ering)
8305 struct bnx2x *bp = netdev_priv(dev);
8308 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8309 (ering->tx_pending > MAX_TX_AVAIL) ||
8310 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8313 bp->rx_ring_size = ering->rx_pending;
8314 bp->tx_ring_size = ering->tx_pending;
8316 if (netif_running(dev)) {
8317 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8318 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8324 static void bnx2x_get_pauseparam(struct net_device *dev,
8325 struct ethtool_pauseparam *epause)
8327 struct bnx2x *bp = netdev_priv(dev);
8329 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8330 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8332 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8334 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8337 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8338 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8339 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8342 static int bnx2x_set_pauseparam(struct net_device *dev,
8343 struct ethtool_pauseparam *epause)
8345 struct bnx2x *bp = netdev_priv(dev);
8350 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8351 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8352 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8354 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8356 if (epause->rx_pause)
8357 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8359 if (epause->tx_pause)
8360 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8362 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8363 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8365 if (epause->autoneg) {
8366 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8367 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8371 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8372 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8376 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8378 if (netif_running(dev)) {
8379 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8386 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8388 struct bnx2x *bp = netdev_priv(dev);
8392 /* TPA requires Rx CSUM offloading */
8393 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8394 if (!(dev->features & NETIF_F_LRO)) {
8395 dev->features |= NETIF_F_LRO;
8396 bp->flags |= TPA_ENABLE_FLAG;
8400 } else if (dev->features & NETIF_F_LRO) {
8401 dev->features &= ~NETIF_F_LRO;
8402 bp->flags &= ~TPA_ENABLE_FLAG;
8406 if (changed && netif_running(dev)) {
8407 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8408 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8414 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8416 struct bnx2x *bp = netdev_priv(dev);
8421 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8423 struct bnx2x *bp = netdev_priv(dev);
8428 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8429 TPA'ed packets will be discarded due to wrong TCP CSUM */
8431 u32 flags = ethtool_op_get_flags(dev);
8433 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8439 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8442 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8443 dev->features |= NETIF_F_TSO6;
8445 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8446 dev->features &= ~NETIF_F_TSO6;
8452 static const struct {
8453 char string[ETH_GSTRING_LEN];
8454 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8455 { "register_test (offline)" },
8456 { "memory_test (offline)" },
8457 { "loopback_test (offline)" },
8458 { "nvram_test (online)" },
8459 { "interrupt_test (online)" },
8460 { "link_test (online)" },
8461 { "idle check (online)" },
8462 { "MC errors (online)" }
8465 static int bnx2x_self_test_count(struct net_device *dev)
8467 return BNX2X_NUM_TESTS;
8470 static int bnx2x_test_registers(struct bnx2x *bp)
8472 int idx, i, rc = -ENODEV;
8474 int port = BP_PORT(bp);
8475 static const struct {
8480 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8481 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8482 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8483 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8484 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8485 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8486 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8487 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8488 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8489 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8490 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8491 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8492 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8493 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8494 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8495 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8496 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8497 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8498 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8499 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8500 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8501 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8502 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8503 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8504 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8505 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8506 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8507 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8508 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8509 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8510 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8511 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8512 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8513 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8514 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8515 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8516 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8517 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8519 { 0xffffffff, 0, 0x00000000 }
8522 if (!netif_running(bp->dev))
8525 /* Repeat the test twice:
8526 First by writing 0x00000000, second by writing 0xffffffff */
8527 for (idx = 0; idx < 2; idx++) {
8534 wr_val = 0xffffffff;
8538 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8539 u32 offset, mask, save_val, val;
8541 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8542 mask = reg_tbl[i].mask;
8544 save_val = REG_RD(bp, offset);
8546 REG_WR(bp, offset, wr_val);
8547 val = REG_RD(bp, offset);
8549 /* Restore the original register's value */
8550 REG_WR(bp, offset, save_val);
8552 /* verify that value is as expected value */
8553 if ((val & mask) != (wr_val & mask))
8564 static int bnx2x_test_memory(struct bnx2x *bp)
8566 int i, j, rc = -ENODEV;
8568 static const struct {
8572 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8573 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8574 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8575 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8576 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8577 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8578 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8582 static const struct {
8588 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8589 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8590 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8591 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8592 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8593 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8595 { NULL, 0xffffffff, 0, 0 }
8598 if (!netif_running(bp->dev))
8601 /* Go through all the memories */
8602 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8603 for (j = 0; j < mem_tbl[i].size; j++)
8604 REG_RD(bp, mem_tbl[i].offset + j*4);
8606 /* Check the parity status */
8607 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8608 val = REG_RD(bp, prty_tbl[i].offset);
8609 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8610 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8612 "%s is 0x%x\n", prty_tbl[i].name, val);
8623 static void bnx2x_netif_start(struct bnx2x *bp)
8627 if (atomic_dec_and_test(&bp->intr_sem)) {
8628 if (netif_running(bp->dev)) {
8629 bnx2x_int_enable(bp);
8630 for_each_queue(bp, i)
8631 napi_enable(&bnx2x_fp(bp, i, napi));
8632 if (bp->state == BNX2X_STATE_OPEN)
8633 netif_wake_queue(bp->dev);
8638 static void bnx2x_netif_stop(struct bnx2x *bp)
8642 if (netif_running(bp->dev)) {
8643 netif_tx_disable(bp->dev);
8644 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8645 for_each_queue(bp, i)
8646 napi_disable(&bnx2x_fp(bp, i, napi));
8648 bnx2x_int_disable_sync(bp);
8651 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8656 while (bnx2x_link_test(bp) && cnt--)
8660 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8662 unsigned int pkt_size, num_pkts, i;
8663 struct sk_buff *skb;
8664 unsigned char *packet;
8665 struct bnx2x_fastpath *fp = &bp->fp[0];
8666 u16 tx_start_idx, tx_idx;
8667 u16 rx_start_idx, rx_idx;
8669 struct sw_tx_bd *tx_buf;
8670 struct eth_tx_bd *tx_bd;
8672 union eth_rx_cqe *cqe;
8674 struct sw_rx_bd *rx_buf;
8678 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8679 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8680 bnx2x_acquire_phy_lock(bp);
8681 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8682 bnx2x_release_phy_lock(bp);
8684 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8685 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8686 bnx2x_acquire_phy_lock(bp);
8687 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8688 bnx2x_release_phy_lock(bp);
8689 /* wait until link state is restored */
8690 bnx2x_wait_for_link(bp, link_up);
8696 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8699 goto test_loopback_exit;
8701 packet = skb_put(skb, pkt_size);
8702 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8703 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8704 for (i = ETH_HLEN; i < pkt_size; i++)
8705 packet[i] = (unsigned char) (i & 0xff);
8708 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8709 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8711 pkt_prod = fp->tx_pkt_prod++;
8712 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8713 tx_buf->first_bd = fp->tx_bd_prod;
8716 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8717 mapping = pci_map_single(bp->pdev, skb->data,
8718 skb_headlen(skb), PCI_DMA_TODEVICE);
8719 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8720 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8721 tx_bd->nbd = cpu_to_le16(1);
8722 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8723 tx_bd->vlan = cpu_to_le16(pkt_prod);
8724 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8725 ETH_TX_BD_FLAGS_END_BD);
8726 tx_bd->general_data = ((UNICAST_ADDRESS <<
8727 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8729 fp->hw_tx_prods->bds_prod =
8730 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8731 mb(); /* FW restriction: must not reorder writing nbd and packets */
8732 fp->hw_tx_prods->packets_prod =
8733 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8734 DOORBELL(bp, FP_IDX(fp), 0);
8740 bp->dev->trans_start = jiffies;
8744 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8745 if (tx_idx != tx_start_idx + num_pkts)
8746 goto test_loopback_exit;
8748 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8749 if (rx_idx != rx_start_idx + num_pkts)
8750 goto test_loopback_exit;
8752 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8753 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8754 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8755 goto test_loopback_rx_exit;
8757 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8758 if (len != pkt_size)
8759 goto test_loopback_rx_exit;
8761 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8763 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8764 for (i = ETH_HLEN; i < pkt_size; i++)
8765 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8766 goto test_loopback_rx_exit;
8770 test_loopback_rx_exit:
8771 bp->dev->last_rx = jiffies;
8773 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8774 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8775 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8776 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8778 /* Update producers */
8779 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8781 mmiowb(); /* keep prod updates ordered */
8784 bp->link_params.loopback_mode = LOOPBACK_NONE;
8789 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8793 if (!netif_running(bp->dev))
8794 return BNX2X_LOOPBACK_FAILED;
8796 bnx2x_netif_stop(bp);
8798 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8799 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8800 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8803 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8804 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8805 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8808 bnx2x_netif_start(bp);
8813 #define CRC32_RESIDUAL 0xdebb20e3
8815 static int bnx2x_test_nvram(struct bnx2x *bp)
8817 static const struct {
8821 { 0, 0x14 }, /* bootstrap */
8822 { 0x14, 0xec }, /* dir */
8823 { 0x100, 0x350 }, /* manuf_info */
8824 { 0x450, 0xf0 }, /* feature_info */
8825 { 0x640, 0x64 }, /* upgrade_key_info */
8827 { 0x708, 0x70 }, /* manuf_key_info */
8832 u8 *data = (u8 *)buf;
8836 rc = bnx2x_nvram_read(bp, 0, data, 4);
8838 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8839 goto test_nvram_exit;
8842 magic = be32_to_cpu(buf[0]);
8843 if (magic != 0x669955aa) {
8844 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8846 goto test_nvram_exit;
8849 for (i = 0; nvram_tbl[i].size; i++) {
8851 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8855 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8856 goto test_nvram_exit;
8859 csum = ether_crc_le(nvram_tbl[i].size, data);
8860 if (csum != CRC32_RESIDUAL) {
8862 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8864 goto test_nvram_exit;
8872 static int bnx2x_test_intr(struct bnx2x *bp)
8874 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8877 if (!netif_running(bp->dev))
8880 config->hdr.length_6b = 0;
8881 config->hdr.offset = 0;
8882 config->hdr.client_id = BP_CL_ID(bp);
8883 config->hdr.reserved1 = 0;
8885 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8886 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8887 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8889 bp->set_mac_pending++;
8890 for (i = 0; i < 10; i++) {
8891 if (!bp->set_mac_pending)
8893 msleep_interruptible(10);
8902 static void bnx2x_self_test(struct net_device *dev,
8903 struct ethtool_test *etest, u64 *buf)
8905 struct bnx2x *bp = netdev_priv(dev);
8907 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8909 if (!netif_running(dev))
8912 /* offline tests are not supported in MF mode */
8914 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8916 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8919 link_up = bp->link_vars.link_up;
8920 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8921 bnx2x_nic_load(bp, LOAD_DIAG);
8922 /* wait until link state is restored */
8923 bnx2x_wait_for_link(bp, link_up);
8925 if (bnx2x_test_registers(bp) != 0) {
8927 etest->flags |= ETH_TEST_FL_FAILED;
8929 if (bnx2x_test_memory(bp) != 0) {
8931 etest->flags |= ETH_TEST_FL_FAILED;
8933 buf[2] = bnx2x_test_loopback(bp, link_up);
8935 etest->flags |= ETH_TEST_FL_FAILED;
8937 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8938 bnx2x_nic_load(bp, LOAD_NORMAL);
8939 /* wait until link state is restored */
8940 bnx2x_wait_for_link(bp, link_up);
8942 if (bnx2x_test_nvram(bp) != 0) {
8944 etest->flags |= ETH_TEST_FL_FAILED;
8946 if (bnx2x_test_intr(bp) != 0) {
8948 etest->flags |= ETH_TEST_FL_FAILED;
8951 if (bnx2x_link_test(bp) != 0) {
8953 etest->flags |= ETH_TEST_FL_FAILED;
8955 buf[7] = bnx2x_mc_assert(bp);
8957 etest->flags |= ETH_TEST_FL_FAILED;
8959 #ifdef BNX2X_EXTRA_DEBUG
8960 bnx2x_panic_dump(bp);
8964 static const struct {
8968 #define STATS_FLAGS_PORT 1
8969 #define STATS_FLAGS_FUNC 2
8970 u8 string[ETH_GSTRING_LEN];
8971 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8972 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8973 8, STATS_FLAGS_FUNC, "rx_bytes" },
8974 { STATS_OFFSET32(error_bytes_received_hi),
8975 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8976 { STATS_OFFSET32(total_bytes_transmitted_hi),
8977 8, STATS_FLAGS_FUNC, "tx_bytes" },
8978 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8979 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8980 { STATS_OFFSET32(total_unicast_packets_received_hi),
8981 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8982 { STATS_OFFSET32(total_multicast_packets_received_hi),
8983 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8984 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8985 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8986 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8987 8, STATS_FLAGS_FUNC, "tx_packets" },
8988 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8989 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8990 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8991 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8992 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8993 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8994 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8995 8, STATS_FLAGS_PORT, "rx_align_errors" },
8996 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8997 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8998 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8999 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9000 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9001 8, STATS_FLAGS_PORT, "tx_deferred" },
9002 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9003 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9004 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9005 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9006 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9007 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9008 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9009 8, STATS_FLAGS_PORT, "rx_fragments" },
9010 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9011 8, STATS_FLAGS_PORT, "rx_jabbers" },
9012 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9013 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9014 { STATS_OFFSET32(jabber_packets_received),
9015 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9016 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9017 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9018 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9019 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9020 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9021 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9022 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9023 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9024 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9025 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9026 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9027 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9028 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9029 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9030 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9031 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9032 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9033 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9034 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9035 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9036 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9037 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9038 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9039 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9040 { STATS_OFFSET32(mac_filter_discard),
9041 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9042 { STATS_OFFSET32(no_buff_discard),
9043 4, STATS_FLAGS_FUNC, "rx_discards" },
9044 { STATS_OFFSET32(xxoverflow_discard),
9045 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9046 { STATS_OFFSET32(brb_drop_hi),
9047 8, STATS_FLAGS_PORT, "brb_discard" },
9048 { STATS_OFFSET32(brb_truncate_hi),
9049 8, STATS_FLAGS_PORT, "brb_truncate" },
9050 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9051 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9052 { STATS_OFFSET32(rx_skb_alloc_failed),
9053 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9054 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9055 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9058 #define IS_NOT_E1HMF_STAT(bp, i) \
9059 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9061 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9063 struct bnx2x *bp = netdev_priv(dev);
9066 switch (stringset) {
9068 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9069 if (IS_NOT_E1HMF_STAT(bp, i))
9071 strcpy(buf + j*ETH_GSTRING_LEN,
9072 bnx2x_stats_arr[i].string);
9078 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9083 static int bnx2x_get_stats_count(struct net_device *dev)
9085 struct bnx2x *bp = netdev_priv(dev);
9086 int i, num_stats = 0;
9088 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9089 if (IS_NOT_E1HMF_STAT(bp, i))
9096 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9097 struct ethtool_stats *stats, u64 *buf)
9099 struct bnx2x *bp = netdev_priv(dev);
9100 u32 *hw_stats = (u32 *)&bp->eth_stats;
9103 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9104 if (IS_NOT_E1HMF_STAT(bp, i))
9107 if (bnx2x_stats_arr[i].size == 0) {
9108 /* skip this counter */
9113 if (bnx2x_stats_arr[i].size == 4) {
9114 /* 4-byte counter */
9115 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9119 /* 8-byte counter */
9120 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9121 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9126 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9128 struct bnx2x *bp = netdev_priv(dev);
9129 int port = BP_PORT(bp);
9132 if (!netif_running(dev))
9141 for (i = 0; i < (data * 2); i++) {
9143 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9144 bp->link_params.hw_led_mode,
9145 bp->link_params.chip_id);
9147 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9148 bp->link_params.hw_led_mode,
9149 bp->link_params.chip_id);
9151 msleep_interruptible(500);
9152 if (signal_pending(current))
9156 if (bp->link_vars.link_up)
9157 bnx2x_set_led(bp, port, LED_MODE_OPER,
9158 bp->link_vars.line_speed,
9159 bp->link_params.hw_led_mode,
9160 bp->link_params.chip_id);
9165 static struct ethtool_ops bnx2x_ethtool_ops = {
9166 .get_settings = bnx2x_get_settings,
9167 .set_settings = bnx2x_set_settings,
9168 .get_drvinfo = bnx2x_get_drvinfo,
9169 .get_wol = bnx2x_get_wol,
9170 .set_wol = bnx2x_set_wol,
9171 .get_msglevel = bnx2x_get_msglevel,
9172 .set_msglevel = bnx2x_set_msglevel,
9173 .nway_reset = bnx2x_nway_reset,
9174 .get_link = ethtool_op_get_link,
9175 .get_eeprom_len = bnx2x_get_eeprom_len,
9176 .get_eeprom = bnx2x_get_eeprom,
9177 .set_eeprom = bnx2x_set_eeprom,
9178 .get_coalesce = bnx2x_get_coalesce,
9179 .set_coalesce = bnx2x_set_coalesce,
9180 .get_ringparam = bnx2x_get_ringparam,
9181 .set_ringparam = bnx2x_set_ringparam,
9182 .get_pauseparam = bnx2x_get_pauseparam,
9183 .set_pauseparam = bnx2x_set_pauseparam,
9184 .get_rx_csum = bnx2x_get_rx_csum,
9185 .set_rx_csum = bnx2x_set_rx_csum,
9186 .get_tx_csum = ethtool_op_get_tx_csum,
9187 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9188 .set_flags = bnx2x_set_flags,
9189 .get_flags = ethtool_op_get_flags,
9190 .get_sg = ethtool_op_get_sg,
9191 .set_sg = ethtool_op_set_sg,
9192 .get_tso = ethtool_op_get_tso,
9193 .set_tso = bnx2x_set_tso,
9194 .self_test_count = bnx2x_self_test_count,
9195 .self_test = bnx2x_self_test,
9196 .get_strings = bnx2x_get_strings,
9197 .phys_id = bnx2x_phys_id,
9198 .get_stats_count = bnx2x_get_stats_count,
9199 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9202 /* end of ethtool_ops */
9204 /****************************************************************************
9205 * General service functions
9206 ****************************************************************************/
9208 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9212 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9216 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9217 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9218 PCI_PM_CTRL_PME_STATUS));
9220 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9221 /* delay required during transition out of D3hot */
9226 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9230 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9232 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9235 /* No more memory access after this point until
9236 * device is brought back to D0.
9247 * net_device service functions
9250 static int bnx2x_poll(struct napi_struct *napi, int budget)
9252 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9254 struct bnx2x *bp = fp->bp;
9258 #ifdef BNX2X_STOP_ON_ERROR
9259 if (unlikely(bp->panic))
9263 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9264 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9265 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9267 bnx2x_update_fpsb_idx(fp);
9269 if (BNX2X_HAS_TX_WORK(fp))
9270 bnx2x_tx_int(fp, budget);
9272 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9273 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9275 if (BNX2X_HAS_RX_WORK(fp))
9276 work_done = bnx2x_rx_int(fp, budget);
9278 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9279 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9280 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9283 /* must not complete if we consumed full budget */
9284 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9286 #ifdef BNX2X_STOP_ON_ERROR
9289 netif_rx_complete(bp->dev, napi);
9291 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9292 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9293 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9294 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9300 /* we split the first BD into headers and data BDs
9301 * to ease the pain of our fellow microcode engineers
9302 * we use one mapping for both BDs
9303 * So far this has only been observed to happen
9304 * in Other Operating Systems(TM)
9306 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9307 struct bnx2x_fastpath *fp,
9308 struct eth_tx_bd **tx_bd, u16 hlen,
9309 u16 bd_prod, int nbd)
9311 struct eth_tx_bd *h_tx_bd = *tx_bd;
9312 struct eth_tx_bd *d_tx_bd;
9314 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9316 /* first fix first BD */
9317 h_tx_bd->nbd = cpu_to_le16(nbd);
9318 h_tx_bd->nbytes = cpu_to_le16(hlen);
9320 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9321 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9322 h_tx_bd->addr_lo, h_tx_bd->nbd);
9324 /* now get a new data BD
9325 * (after the pbd) and fill it */
9326 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9327 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9329 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9330 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9332 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9333 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9334 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9336 /* this marks the BD as one that has no individual mapping
9337 * the FW ignores this flag in a BD not marked start
9339 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9340 DP(NETIF_MSG_TX_QUEUED,
9341 "TSO split data size is %d (%x:%x)\n",
9342 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9344 /* update tx_bd for marking the last BD flag */
9350 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9353 csum = (u16) ~csum_fold(csum_sub(csum,
9354 csum_partial(t_header - fix, fix, 0)));
9357 csum = (u16) ~csum_fold(csum_add(csum,
9358 csum_partial(t_header, -fix, 0)));
9360 return swab16(csum);
9363 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9367 if (skb->ip_summed != CHECKSUM_PARTIAL)
9371 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9373 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9374 rc |= XMIT_CSUM_TCP;
9378 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9379 rc |= XMIT_CSUM_TCP;
9383 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9386 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9392 /* check if packet requires linearization (packet is too fragmented) */
9393 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9398 int first_bd_sz = 0;
9400 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9401 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9403 if (xmit_type & XMIT_GSO) {
9404 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9405 /* Check if LSO packet needs to be copied:
9406 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9407 int wnd_size = MAX_FETCH_BD - 3;
9408 /* Number of windows to check */
9409 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9414 /* Headers length */
9415 hlen = (int)(skb_transport_header(skb) - skb->data) +
9418 /* Amount of data (w/o headers) on linear part of SKB*/
9419 first_bd_sz = skb_headlen(skb) - hlen;
9421 wnd_sum = first_bd_sz;
9423 /* Calculate the first sum - it's special */
9424 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9426 skb_shinfo(skb)->frags[frag_idx].size;
9428 /* If there was data on linear skb data - check it */
9429 if (first_bd_sz > 0) {
9430 if (unlikely(wnd_sum < lso_mss)) {
9435 wnd_sum -= first_bd_sz;
9438 /* Others are easier: run through the frag list and
9439 check all windows */
9440 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9442 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9444 if (unlikely(wnd_sum < lso_mss)) {
9449 skb_shinfo(skb)->frags[wnd_idx].size;
9453 /* in non-LSO too fragmented packet should always
9460 if (unlikely(to_copy))
9461 DP(NETIF_MSG_TX_QUEUED,
9462 "Linearization IS REQUIRED for %s packet. "
9463 "num_frags %d hlen %d first_bd_sz %d\n",
9464 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9465 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9470 /* called with netif_tx_lock
9471 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9472 * netif_wake_queue()
9474 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9476 struct bnx2x *bp = netdev_priv(dev);
9477 struct bnx2x_fastpath *fp;
9478 struct sw_tx_bd *tx_buf;
9479 struct eth_tx_bd *tx_bd;
9480 struct eth_tx_parse_bd *pbd = NULL;
9481 u16 pkt_prod, bd_prod;
9484 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9485 int vlan_off = (bp->e1hov ? 4 : 0);
9489 #ifdef BNX2X_STOP_ON_ERROR
9490 if (unlikely(bp->panic))
9491 return NETDEV_TX_BUSY;
9494 fp_index = (smp_processor_id() % bp->num_queues);
9495 fp = &bp->fp[fp_index];
9497 if (unlikely(bnx2x_tx_avail(bp->fp) <
9498 (skb_shinfo(skb)->nr_frags + 3))) {
9499 bp->eth_stats.driver_xoff++,
9500 netif_stop_queue(dev);
9501 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9502 return NETDEV_TX_BUSY;
9505 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9506 " gso type %x xmit_type %x\n",
9507 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9508 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9510 /* First, check if we need to linearize the skb
9511 (due to FW restrictions) */
9512 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9513 /* Statistics of linearization */
9515 if (skb_linearize(skb) != 0) {
9516 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9517 "silently dropping this SKB\n");
9518 dev_kfree_skb_any(skb);
9519 return NETDEV_TX_OK;
9524 Please read carefully. First we use one BD which we mark as start,
9525 then for TSO or xsum we have a parsing info BD,
9526 and only then we have the rest of the TSO BDs.
9527 (don't forget to mark the last one as last,
9528 and to unmap only AFTER you write to the BD ...)
9529 And above all, all pdb sizes are in words - NOT DWORDS!
9532 pkt_prod = fp->tx_pkt_prod++;
9533 bd_prod = TX_BD(fp->tx_bd_prod);
9535 /* get a tx_buf and first BD */
9536 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9537 tx_bd = &fp->tx_desc_ring[bd_prod];
9539 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9540 tx_bd->general_data = (UNICAST_ADDRESS <<
9541 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9543 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9545 /* remember the first BD of the packet */
9546 tx_buf->first_bd = fp->tx_bd_prod;
9549 DP(NETIF_MSG_TX_QUEUED,
9550 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9551 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9553 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9554 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9555 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9558 tx_bd->vlan = cpu_to_le16(pkt_prod);
9562 /* turn on parsing and get a BD */
9563 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9564 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9566 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9569 if (xmit_type & XMIT_CSUM) {
9570 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9572 /* for now NS flag is not used in Linux */
9573 pbd->global_data = (hlen |
9574 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9575 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9577 pbd->ip_hlen = (skb_transport_header(skb) -
9578 skb_network_header(skb)) / 2;
9580 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9582 pbd->total_hlen = cpu_to_le16(hlen);
9583 hlen = hlen*2 - vlan_off;
9585 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9587 if (xmit_type & XMIT_CSUM_V4)
9588 tx_bd->bd_flags.as_bitfield |=
9589 ETH_TX_BD_FLAGS_IP_CSUM;
9591 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9593 if (xmit_type & XMIT_CSUM_TCP) {
9594 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9597 s8 fix = SKB_CS_OFF(skb); /* signed! */
9599 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9600 pbd->cs_offset = fix / 2;
9602 DP(NETIF_MSG_TX_QUEUED,
9603 "hlen %d offset %d fix %d csum before fix %x\n",
9604 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9607 /* HW bug: fixup the CSUM */
9608 pbd->tcp_pseudo_csum =
9609 bnx2x_csum_fix(skb_transport_header(skb),
9612 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9613 pbd->tcp_pseudo_csum);
9617 mapping = pci_map_single(bp->pdev, skb->data,
9618 skb_headlen(skb), PCI_DMA_TODEVICE);
9620 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9621 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9622 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9623 tx_bd->nbd = cpu_to_le16(nbd);
9624 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9626 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9627 " nbytes %d flags %x vlan %x\n",
9628 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9629 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9630 le16_to_cpu(tx_bd->vlan));
9632 if (xmit_type & XMIT_GSO) {
9634 DP(NETIF_MSG_TX_QUEUED,
9635 "TSO packet len %d hlen %d total len %d tso size %d\n",
9636 skb->len, hlen, skb_headlen(skb),
9637 skb_shinfo(skb)->gso_size);
9639 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9641 if (unlikely(skb_headlen(skb) > hlen))
9642 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9645 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9646 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9647 pbd->tcp_flags = pbd_tcp_flags(skb);
9649 if (xmit_type & XMIT_GSO_V4) {
9650 pbd->ip_id = swab16(ip_hdr(skb)->id);
9651 pbd->tcp_pseudo_csum =
9652 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9654 0, IPPROTO_TCP, 0));
9657 pbd->tcp_pseudo_csum =
9658 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9659 &ipv6_hdr(skb)->daddr,
9660 0, IPPROTO_TCP, 0));
9662 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9665 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9666 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9668 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9669 tx_bd = &fp->tx_desc_ring[bd_prod];
9671 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9672 frag->size, PCI_DMA_TODEVICE);
9674 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9675 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9676 tx_bd->nbytes = cpu_to_le16(frag->size);
9677 tx_bd->vlan = cpu_to_le16(pkt_prod);
9678 tx_bd->bd_flags.as_bitfield = 0;
9680 DP(NETIF_MSG_TX_QUEUED,
9681 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9682 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9683 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9686 /* now at last mark the BD as the last BD */
9687 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9689 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9690 tx_bd, tx_bd->bd_flags.as_bitfield);
9692 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9694 /* now send a tx doorbell, counting the next BD
9695 * if the packet contains or ends with it
9697 if (TX_BD_POFF(bd_prod) < nbd)
9701 DP(NETIF_MSG_TX_QUEUED,
9702 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9703 " tcp_flags %x xsum %x seq %u hlen %u\n",
9704 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9705 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9706 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9708 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9710 fp->hw_tx_prods->bds_prod =
9711 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9712 mb(); /* FW restriction: must not reorder writing nbd and packets */
9713 fp->hw_tx_prods->packets_prod =
9714 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9715 DOORBELL(bp, FP_IDX(fp), 0);
9719 fp->tx_bd_prod += nbd;
9720 dev->trans_start = jiffies;
9722 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9723 netif_stop_queue(dev);
9724 bp->eth_stats.driver_xoff++;
9725 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9726 netif_wake_queue(dev);
9730 return NETDEV_TX_OK;
9733 /* called with rtnl_lock */
9734 static int bnx2x_open(struct net_device *dev)
9736 struct bnx2x *bp = netdev_priv(dev);
9738 bnx2x_set_power_state(bp, PCI_D0);
9740 return bnx2x_nic_load(bp, LOAD_OPEN);
9743 /* called with rtnl_lock */
9744 static int bnx2x_close(struct net_device *dev)
9746 struct bnx2x *bp = netdev_priv(dev);
9748 /* Unload the driver, release IRQs */
9749 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9750 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9751 if (!CHIP_REV_IS_SLOW(bp))
9752 bnx2x_set_power_state(bp, PCI_D3hot);
9757 /* called with netif_tx_lock from set_multicast */
9758 static void bnx2x_set_rx_mode(struct net_device *dev)
9760 struct bnx2x *bp = netdev_priv(dev);
9761 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9762 int port = BP_PORT(bp);
9764 if (bp->state != BNX2X_STATE_OPEN) {
9765 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9769 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9771 if (dev->flags & IFF_PROMISC)
9772 rx_mode = BNX2X_RX_MODE_PROMISC;
9774 else if ((dev->flags & IFF_ALLMULTI) ||
9775 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9776 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9778 else { /* some multicasts */
9779 if (CHIP_IS_E1(bp)) {
9781 struct dev_mc_list *mclist;
9782 struct mac_configuration_cmd *config =
9783 bnx2x_sp(bp, mcast_config);
9785 for (i = 0, mclist = dev->mc_list;
9786 mclist && (i < dev->mc_count);
9787 i++, mclist = mclist->next) {
9789 config->config_table[i].
9790 cam_entry.msb_mac_addr =
9791 swab16(*(u16 *)&mclist->dmi_addr[0]);
9792 config->config_table[i].
9793 cam_entry.middle_mac_addr =
9794 swab16(*(u16 *)&mclist->dmi_addr[2]);
9795 config->config_table[i].
9796 cam_entry.lsb_mac_addr =
9797 swab16(*(u16 *)&mclist->dmi_addr[4]);
9798 config->config_table[i].cam_entry.flags =
9800 config->config_table[i].
9801 target_table_entry.flags = 0;
9802 config->config_table[i].
9803 target_table_entry.client_id = 0;
9804 config->config_table[i].
9805 target_table_entry.vlan_id = 0;
9808 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9809 config->config_table[i].
9810 cam_entry.msb_mac_addr,
9811 config->config_table[i].
9812 cam_entry.middle_mac_addr,
9813 config->config_table[i].
9814 cam_entry.lsb_mac_addr);
9816 old = config->hdr.length_6b;
9818 for (; i < old; i++) {
9819 if (CAM_IS_INVALID(config->
9821 i--; /* already invalidated */
9825 CAM_INVALIDATE(config->
9830 if (CHIP_REV_IS_SLOW(bp))
9831 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9833 offset = BNX2X_MAX_MULTICAST*(1 + port);
9835 config->hdr.length_6b = i;
9836 config->hdr.offset = offset;
9837 config->hdr.client_id = BP_CL_ID(bp);
9838 config->hdr.reserved1 = 0;
9840 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9841 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9842 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9845 /* Accept one or more multicasts */
9846 struct dev_mc_list *mclist;
9847 u32 mc_filter[MC_HASH_SIZE];
9848 u32 crc, bit, regidx;
9851 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9853 for (i = 0, mclist = dev->mc_list;
9854 mclist && (i < dev->mc_count);
9855 i++, mclist = mclist->next) {
9857 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9858 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9859 mclist->dmi_addr[0], mclist->dmi_addr[1],
9860 mclist->dmi_addr[2], mclist->dmi_addr[3],
9861 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9863 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9864 bit = (crc >> 24) & 0xff;
9867 mc_filter[regidx] |= (1 << bit);
9870 for (i = 0; i < MC_HASH_SIZE; i++)
9871 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9876 bp->rx_mode = rx_mode;
9877 bnx2x_set_storm_rx_mode(bp);
9880 /* called with rtnl_lock */
9881 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9883 struct sockaddr *addr = p;
9884 struct bnx2x *bp = netdev_priv(dev);
9886 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9889 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9890 if (netif_running(dev)) {
9892 bnx2x_set_mac_addr_e1(bp, 1);
9894 bnx2x_set_mac_addr_e1h(bp, 1);
9900 /* called with rtnl_lock */
9901 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9903 struct mii_ioctl_data *data = if_mii(ifr);
9904 struct bnx2x *bp = netdev_priv(dev);
9905 int port = BP_PORT(bp);
9910 data->phy_id = bp->port.phy_addr;
9917 if (!netif_running(dev))
9920 mutex_lock(&bp->port.phy_mutex);
9921 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9922 DEFAULT_PHY_DEV_ADDR,
9923 (data->reg_num & 0x1f), &mii_regval);
9924 data->val_out = mii_regval;
9925 mutex_unlock(&bp->port.phy_mutex);
9930 if (!capable(CAP_NET_ADMIN))
9933 if (!netif_running(dev))
9936 mutex_lock(&bp->port.phy_mutex);
9937 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9938 DEFAULT_PHY_DEV_ADDR,
9939 (data->reg_num & 0x1f), data->val_in);
9940 mutex_unlock(&bp->port.phy_mutex);
9951 /* called with rtnl_lock */
9952 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9954 struct bnx2x *bp = netdev_priv(dev);
9957 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9958 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9961 /* This does not race with packet allocation
9962 * because the actual alloc size is
9963 * only updated as part of load
9967 if (netif_running(dev)) {
9968 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9969 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9975 static void bnx2x_tx_timeout(struct net_device *dev)
9977 struct bnx2x *bp = netdev_priv(dev);
9979 #ifdef BNX2X_STOP_ON_ERROR
9983 /* This allows the netif to be shutdown gracefully before resetting */
9984 schedule_work(&bp->reset_task);
9988 /* called with rtnl_lock */
9989 static void bnx2x_vlan_rx_register(struct net_device *dev,
9990 struct vlan_group *vlgrp)
9992 struct bnx2x *bp = netdev_priv(dev);
9995 if (netif_running(dev))
9996 bnx2x_set_client_config(bp);
10001 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10002 static void poll_bnx2x(struct net_device *dev)
10004 struct bnx2x *bp = netdev_priv(dev);
10006 disable_irq(bp->pdev->irq);
10007 bnx2x_interrupt(bp->pdev->irq, dev);
10008 enable_irq(bp->pdev->irq);
10012 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10013 struct net_device *dev)
10018 SET_NETDEV_DEV(dev, &pdev->dev);
10019 bp = netdev_priv(dev);
10024 bp->func = PCI_FUNC(pdev->devfn);
10026 rc = pci_enable_device(pdev);
10028 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10032 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10033 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10036 goto err_out_disable;
10039 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10040 printk(KERN_ERR PFX "Cannot find second PCI device"
10041 " base address, aborting\n");
10043 goto err_out_disable;
10046 if (atomic_read(&pdev->enable_cnt) == 1) {
10047 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10049 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10051 goto err_out_disable;
10054 pci_set_master(pdev);
10055 pci_save_state(pdev);
10058 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10059 if (bp->pm_cap == 0) {
10060 printk(KERN_ERR PFX "Cannot find power management"
10061 " capability, aborting\n");
10063 goto err_out_release;
10066 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10067 if (bp->pcie_cap == 0) {
10068 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10071 goto err_out_release;
10074 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10075 bp->flags |= USING_DAC_FLAG;
10076 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10077 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10078 " failed, aborting\n");
10080 goto err_out_release;
10083 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10084 printk(KERN_ERR PFX "System does not support DMA,"
10087 goto err_out_release;
10090 dev->mem_start = pci_resource_start(pdev, 0);
10091 dev->base_addr = dev->mem_start;
10092 dev->mem_end = pci_resource_end(pdev, 0);
10094 dev->irq = pdev->irq;
10096 bp->regview = ioremap_nocache(dev->base_addr,
10097 pci_resource_len(pdev, 0));
10098 if (!bp->regview) {
10099 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10101 goto err_out_release;
10104 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10105 min_t(u64, BNX2X_DB_SIZE,
10106 pci_resource_len(pdev, 2)));
10107 if (!bp->doorbells) {
10108 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10110 goto err_out_unmap;
10113 bnx2x_set_power_state(bp, PCI_D0);
10115 /* clean indirect addresses */
10116 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10117 PCICFG_VENDOR_ID_OFFSET);
10118 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10119 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10120 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10121 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10123 dev->hard_start_xmit = bnx2x_start_xmit;
10124 dev->watchdog_timeo = TX_TIMEOUT;
10126 dev->ethtool_ops = &bnx2x_ethtool_ops;
10127 dev->open = bnx2x_open;
10128 dev->stop = bnx2x_close;
10129 dev->set_multicast_list = bnx2x_set_rx_mode;
10130 dev->set_mac_address = bnx2x_change_mac_addr;
10131 dev->do_ioctl = bnx2x_ioctl;
10132 dev->change_mtu = bnx2x_change_mtu;
10133 dev->tx_timeout = bnx2x_tx_timeout;
10135 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10137 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10138 dev->poll_controller = poll_bnx2x;
10140 dev->features |= NETIF_F_SG;
10141 dev->features |= NETIF_F_HW_CSUM;
10142 if (bp->flags & USING_DAC_FLAG)
10143 dev->features |= NETIF_F_HIGHDMA;
10145 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10147 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10148 dev->features |= NETIF_F_TSO6;
10154 iounmap(bp->regview);
10155 bp->regview = NULL;
10157 if (bp->doorbells) {
10158 iounmap(bp->doorbells);
10159 bp->doorbells = NULL;
10163 if (atomic_read(&pdev->enable_cnt) == 1)
10164 pci_release_regions(pdev);
10167 pci_disable_device(pdev);
10168 pci_set_drvdata(pdev, NULL);
10174 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10176 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10178 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10182 /* return value of 1=2.5GHz 2=5GHz */
10183 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10185 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10187 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10191 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10192 const struct pci_device_id *ent)
10194 static int version_printed;
10195 struct net_device *dev = NULL;
10198 DECLARE_MAC_BUF(mac);
10200 if (version_printed++ == 0)
10201 printk(KERN_INFO "%s", version);
10203 /* dev zeroed in init_etherdev */
10204 dev = alloc_etherdev(sizeof(*bp));
10206 printk(KERN_ERR PFX "Cannot allocate net device\n");
10210 netif_carrier_off(dev);
10212 bp = netdev_priv(dev);
10213 bp->msglevel = debug;
10215 rc = bnx2x_init_dev(pdev, dev);
10221 rc = register_netdev(dev);
10223 dev_err(&pdev->dev, "Cannot register net device\n");
10224 goto init_one_exit;
10227 pci_set_drvdata(pdev, dev);
10229 rc = bnx2x_init_bp(bp);
10231 unregister_netdev(dev);
10232 goto init_one_exit;
10235 bp->common.name = board_info[ent->driver_data].name;
10236 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10237 " IRQ %d, ", dev->name, bp->common.name,
10238 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10239 bnx2x_get_pcie_width(bp),
10240 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10241 dev->base_addr, bp->pdev->irq);
10242 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10247 iounmap(bp->regview);
10250 iounmap(bp->doorbells);
10254 if (atomic_read(&pdev->enable_cnt) == 1)
10255 pci_release_regions(pdev);
10257 pci_disable_device(pdev);
10258 pci_set_drvdata(pdev, NULL);
10263 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10265 struct net_device *dev = pci_get_drvdata(pdev);
10269 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10272 bp = netdev_priv(dev);
10274 unregister_netdev(dev);
10277 iounmap(bp->regview);
10280 iounmap(bp->doorbells);
10284 if (atomic_read(&pdev->enable_cnt) == 1)
10285 pci_release_regions(pdev);
10287 pci_disable_device(pdev);
10288 pci_set_drvdata(pdev, NULL);
10291 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10293 struct net_device *dev = pci_get_drvdata(pdev);
10297 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10300 bp = netdev_priv(dev);
10304 pci_save_state(pdev);
10306 if (!netif_running(dev)) {
10311 netif_device_detach(dev);
10313 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10315 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10322 static int bnx2x_resume(struct pci_dev *pdev)
10324 struct net_device *dev = pci_get_drvdata(pdev);
10329 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10332 bp = netdev_priv(dev);
10336 pci_restore_state(pdev);
10338 if (!netif_running(dev)) {
10343 bnx2x_set_power_state(bp, PCI_D0);
10344 netif_device_attach(dev);
10346 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10354 * bnx2x_io_error_detected - called when PCI error is detected
10355 * @pdev: Pointer to PCI device
10356 * @state: The current pci connection state
10358 * This function is called after a PCI bus error affecting
10359 * this device has been detected.
10361 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10362 pci_channel_state_t state)
10364 struct net_device *dev = pci_get_drvdata(pdev);
10365 struct bnx2x *bp = netdev_priv(dev);
10369 netif_device_detach(dev);
10371 if (netif_running(dev))
10372 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10374 pci_disable_device(pdev);
10378 /* Request a slot reset */
10379 return PCI_ERS_RESULT_NEED_RESET;
10383 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10384 * @pdev: Pointer to PCI device
10386 * Restart the card from scratch, as if from a cold-boot.
10388 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10390 struct net_device *dev = pci_get_drvdata(pdev);
10391 struct bnx2x *bp = netdev_priv(dev);
10395 if (pci_enable_device(pdev)) {
10396 dev_err(&pdev->dev,
10397 "Cannot re-enable PCI device after reset\n");
10399 return PCI_ERS_RESULT_DISCONNECT;
10402 pci_set_master(pdev);
10403 pci_restore_state(pdev);
10405 if (netif_running(dev))
10406 bnx2x_set_power_state(bp, PCI_D0);
10410 return PCI_ERS_RESULT_RECOVERED;
10414 * bnx2x_io_resume - called when traffic can start flowing again
10415 * @pdev: Pointer to PCI device
10417 * This callback is called when the error recovery driver tells us that
10418 * its OK to resume normal operation.
10420 static void bnx2x_io_resume(struct pci_dev *pdev)
10422 struct net_device *dev = pci_get_drvdata(pdev);
10423 struct bnx2x *bp = netdev_priv(dev);
10427 if (netif_running(dev))
10428 bnx2x_nic_load(bp, LOAD_OPEN);
10430 netif_device_attach(dev);
10435 static struct pci_error_handlers bnx2x_err_handler = {
10436 .error_detected = bnx2x_io_error_detected,
10437 .slot_reset = bnx2x_io_slot_reset,
10438 .resume = bnx2x_io_resume,
10441 static struct pci_driver bnx2x_pci_driver = {
10442 .name = DRV_MODULE_NAME,
10443 .id_table = bnx2x_pci_tbl,
10444 .probe = bnx2x_init_one,
10445 .remove = __devexit_p(bnx2x_remove_one),
10446 .suspend = bnx2x_suspend,
10447 .resume = bnx2x_resume,
10448 .err_handler = &bnx2x_err_handler,
10451 static int __init bnx2x_init(void)
10453 return pci_register_driver(&bnx2x_pci_driver);
10456 static void __exit bnx2x_cleanup(void)
10458 pci_unregister_driver(&bnx2x_pci_driver);
10461 module_init(bnx2x_init);
10462 module_exit(bnx2x_cleanup);