1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
61 #include "bnx2x_init.h"
63 #define DRV_MODULE_VERSION "1.45.6"
64 #define DRV_MODULE_RELDATE "2008/06/23"
65 #define BNX2X_BC_VER 0x040200
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT (5*HZ)
70 static char version[] __devinitdata =
71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
79 static int disable_tpa;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
100 enum bnx2x_board_type {
106 /* indexed by board_type, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
155 static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 struct dmae_command *dmae = &bp->init_dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 mutex_lock(&bp->dmae_mutex);
197 memset(dmae, 0, sizeof(struct dmae_command));
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 DMAE_CMD_ENDIANITY_DW_SWAP |
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_val = DMAE_COMP_VAL;
218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
245 BNX2X_ERR("dmae timeout!\n");
251 mutex_unlock(&bp->dmae_mutex);
254 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
256 struct dmae_command *dmae = &bp->init_dmae;
257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
271 mutex_lock(&bp->dmae_mutex);
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
282 DMAE_CMD_ENDIANITY_DW_SWAP |
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
293 dmae->comp_val = DMAE_COMP_VAL;
295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
309 while (*wb_comp != DMAE_COMP_VAL) {
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
318 BNX2X_ERR("dmae timeout!\n");
323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
327 mutex_unlock(&bp->dmae_mutex);
330 /* used only for slowpath so not inlined */
331 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
341 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
345 REG_RD_DMAE(bp, reg, wb_data, 2);
347 return HILO_U64(wb_data[0], wb_data[1]);
351 static int bnx2x_mc_assert(struct bnx2x *bp)
355 u32 row0, row1, row2, row3;
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
472 static void bnx2x_fw_dump(struct bnx2x *bp)
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
487 printk(KERN_CONT "%s", (char *)data);
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
494 printk(KERN_CONT "%s", (char *)data);
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
499 static void bnx2x_panic_dump(struct bnx2x *bp)
504 BNX2X_ERR("begin crash dump -----------------\n");
506 for_each_queue(bp, i) {
507 struct bnx2x_fastpath *fp = &bp->fp[i];
508 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
514 BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)"
515 " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)"
516 " rx_sge_prod(%x) last_max_sge(%x)\n",
517 fp->rx_comp_prod, fp->rx_comp_cons,
518 le16_to_cpu(*fp->rx_cons_sb),
519 le16_to_cpu(*fp->rx_bd_cons_sb),
520 fp->rx_sge_prod, fp->last_max_sge);
521 BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)"
522 " bd data(%x,%x) rx_alloc_failed(%lx)\n",
523 fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod,
524 hw_prods->bds_prod, fp->rx_alloc_failed);
526 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
527 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
528 for (j = start; j < end; j++) {
529 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
531 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
532 sw_bd->skb, sw_bd->first_bd);
535 start = TX_BD(fp->tx_bd_cons - 10);
536 end = TX_BD(fp->tx_bd_cons + 254);
537 for (j = start; j < end; j++) {
538 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
540 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
541 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
545 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
546 for (j = start; j < end; j++) {
547 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
548 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
550 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
551 j, rx_bd[1], rx_bd[0], sw_bd->skb);
555 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
556 for (j = start; j < end; j++) {
557 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
558 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
560 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
561 j, rx_sge[1], rx_sge[0], sw_page->page);
564 start = RCQ_BD(fp->rx_comp_cons - 10);
565 end = RCQ_BD(fp->rx_comp_cons + 503);
566 for (j = start; j < end; j++) {
567 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
569 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
570 j, cqe[0], cqe[1], cqe[2], cqe[3]);
574 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
575 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
576 " spq_prod_idx(%u)\n",
577 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
578 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
582 BNX2X_ERR("end crash dump -----------------\n");
584 bp->stats_state = STATS_STATE_DISABLED;
585 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
588 static void bnx2x_int_enable(struct bnx2x *bp)
590 int port = BP_PORT(bp);
591 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592 u32 val = REG_RD(bp, addr);
593 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
596 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
601 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
602 HC_CONFIG_0_REG_INT_LINE_EN_0 |
603 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
606 val, port, addr, msix);
608 REG_WR(bp, addr, val);
610 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
613 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
614 val, port, addr, msix);
616 REG_WR(bp, addr, val);
618 if (CHIP_IS_E1H(bp)) {
619 /* init leading/trailing edge */
621 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623 /* enable nig attention */
628 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
633 static void bnx2x_int_disable(struct bnx2x *bp)
635 int port = BP_PORT(bp);
636 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637 u32 val = REG_RD(bp, addr);
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641 HC_CONFIG_0_REG_INT_LINE_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
647 REG_WR(bp, addr, val);
648 if (REG_RD(bp, addr) != val)
649 BNX2X_ERR("BUG! proper val not read from IGU!\n");
652 static void bnx2x_int_disable_sync(struct bnx2x *bp)
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
657 /* disable interrupt handling */
658 atomic_inc(&bp->intr_sem);
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
662 /* make sure all ISRs are done */
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
670 synchronize_irq(bp->pdev->irq);
672 /* make sure sp_task is not running */
673 cancel_work_sync(&bp->sp_task);
679 * General service functions
682 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
683 u8 storm, u16 index, u8 op, u8 update)
685 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
686 struct igu_ack_register igu_ack;
688 igu_ack.status_block_index = index;
689 igu_ack.sb_id_and_flags =
690 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
691 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
692 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
693 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
695 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
696 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
697 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
700 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
702 struct host_status_block *fpsb = fp->status_blk;
705 barrier(); /* status block is written to by the chip */
706 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
707 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
711 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
717 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
719 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
721 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
724 if ((fp->rx_comp_cons != rx_cons_sb) ||
725 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
726 (fp->tx_pkt_prod != fp->tx_pkt_cons))
732 static u16 bnx2x_ack_int(struct bnx2x *bp)
734 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
735 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
737 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
738 result, BAR_IGU_INTMEM + igu_addr);
741 #warning IGU_DEBUG active
743 BNX2X_ERR("read %x from IGU\n", result);
744 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
752 * fast path service functions
755 /* free skb in the packet ring at pos idx
756 * return idx of last bd freed
758 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
761 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
762 struct eth_tx_bd *tx_bd;
763 struct sk_buff *skb = tx_buf->skb;
764 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
767 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
771 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
772 tx_bd = &fp->tx_desc_ring[bd_idx];
773 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
774 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
776 nbd = le16_to_cpu(tx_bd->nbd) - 1;
777 new_cons = nbd + tx_buf->first_bd;
778 #ifdef BNX2X_STOP_ON_ERROR
779 if (nbd > (MAX_SKB_FRAGS + 2)) {
780 BNX2X_ERR("BAD nbd!\n");
785 /* Skip a parse bd and the TSO split header bd
786 since they have no mapping */
788 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
790 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
791 ETH_TX_BD_FLAGS_TCP_CSUM |
792 ETH_TX_BD_FLAGS_SW_LSO)) {
794 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
795 tx_bd = &fp->tx_desc_ring[bd_idx];
796 /* is this a TSO split header bd? */
797 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
799 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
806 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
807 tx_bd = &fp->tx_desc_ring[bd_idx];
808 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
809 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
811 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
817 tx_buf->first_bd = 0;
823 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
829 barrier(); /* Tell compiler that prod and cons can change */
830 prod = fp->tx_bd_prod;
831 cons = fp->tx_bd_cons;
833 /* NUM_TX_RINGS = number of "next-page" entries
834 It will be used as a threshold */
835 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
837 #ifdef BNX2X_STOP_ON_ERROR
839 WARN_ON(used > fp->bp->tx_ring_size);
840 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
843 return (s16)(fp->bp->tx_ring_size) - used;
846 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
848 struct bnx2x *bp = fp->bp;
849 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
852 #ifdef BNX2X_STOP_ON_ERROR
853 if (unlikely(bp->panic))
857 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
858 sw_cons = fp->tx_pkt_cons;
860 while (sw_cons != hw_cons) {
863 pkt_cons = TX_BD(sw_cons);
865 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
867 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
868 hw_cons, sw_cons, pkt_cons);
870 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
872 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
875 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
883 fp->tx_pkt_cons = sw_cons;
884 fp->tx_bd_cons = bd_cons;
886 /* Need to make the tx_cons update visible to start_xmit()
887 * before checking for netif_queue_stopped(). Without the
888 * memory barrier, there is a small possibility that start_xmit()
889 * will miss it and cause the queue to be stopped forever.
893 /* TBD need a thresh? */
894 if (unlikely(netif_queue_stopped(bp->dev))) {
896 netif_tx_lock(bp->dev);
898 if (netif_queue_stopped(bp->dev) &&
899 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
900 netif_wake_queue(bp->dev);
902 netif_tx_unlock(bp->dev);
906 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
907 union eth_rx_cqe *rr_cqe)
909 struct bnx2x *bp = fp->bp;
910 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
911 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
914 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
915 FP_IDX(fp), cid, command, bp->state,
916 rr_cqe->ramrod_cqe.ramrod_type);
921 switch (command | fp->state) {
922 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
923 BNX2X_FP_STATE_OPENING):
924 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
926 fp->state = BNX2X_FP_STATE_OPEN;
929 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
930 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
932 fp->state = BNX2X_FP_STATE_HALTED;
936 BNX2X_ERR("unexpected MC reply (%d) "
937 "fp->state is %x\n", command, fp->state);
940 mb(); /* force bnx2x_wait_ramrod() to see the change */
944 switch (command | bp->state) {
945 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
946 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
947 bp->state = BNX2X_STATE_OPEN;
950 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
951 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
952 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
953 fp->state = BNX2X_FP_STATE_HALTED;
956 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
957 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
958 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
961 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
962 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
963 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
964 bp->set_mac_pending = 0;
967 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
968 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
972 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
976 mb(); /* force bnx2x_wait_ramrod() to see the change */
979 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
980 struct bnx2x_fastpath *fp, u16 index)
982 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
983 struct page *page = sw_buf->page;
984 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
986 /* Skip "next page" elements */
990 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
991 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
992 __free_pages(page, PAGES_PER_SGE_SHIFT);
999 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1000 struct bnx2x_fastpath *fp, int last)
1004 for (i = 0; i < last; i++)
1005 bnx2x_free_rx_sge(bp, fp, i);
1008 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1009 struct bnx2x_fastpath *fp, u16 index)
1011 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1012 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1013 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1016 if (unlikely(page == NULL))
1019 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1020 PCI_DMA_FROMDEVICE);
1021 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1022 __free_pages(page, PAGES_PER_SGE_SHIFT);
1026 sw_buf->page = page;
1027 pci_unmap_addr_set(sw_buf, mapping, mapping);
1029 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1030 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1035 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1036 struct bnx2x_fastpath *fp, u16 index)
1038 struct sk_buff *skb;
1039 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1040 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1043 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1044 if (unlikely(skb == NULL))
1047 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1048 PCI_DMA_FROMDEVICE);
1049 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1055 pci_unmap_addr_set(rx_buf, mapping, mapping);
1057 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1058 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1063 /* note that we are not allocating a new skb,
1064 * we are just moving one from cons to prod
1065 * we are not creating a new mapping,
1066 * so there is no need to check for dma_mapping_error().
1068 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1069 struct sk_buff *skb, u16 cons, u16 prod)
1071 struct bnx2x *bp = fp->bp;
1072 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1073 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1074 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1075 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1077 pci_dma_sync_single_for_device(bp->pdev,
1078 pci_unmap_addr(cons_rx_buf, mapping),
1079 bp->rx_offset + RX_COPY_THRESH,
1080 PCI_DMA_FROMDEVICE);
1082 prod_rx_buf->skb = cons_rx_buf->skb;
1083 pci_unmap_addr_set(prod_rx_buf, mapping,
1084 pci_unmap_addr(cons_rx_buf, mapping));
1085 *prod_bd = *cons_bd;
1088 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1091 u16 last_max = fp->last_max_sge;
1093 if (SUB_S16(idx, last_max) > 0)
1094 fp->last_max_sge = idx;
1097 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1101 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1102 int idx = RX_SGE_CNT * i - 1;
1104 for (j = 0; j < 2; j++) {
1105 SGE_MASK_CLEAR_BIT(fp, idx);
1111 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1112 struct eth_fast_path_rx_cqe *fp_cqe)
1114 struct bnx2x *bp = fp->bp;
1115 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1116 le16_to_cpu(fp_cqe->len_on_bd)) >>
1118 u16 last_max, last_elem, first_elem;
1125 /* First mark all used pages */
1126 for (i = 0; i < sge_len; i++)
1127 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1129 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1130 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1132 /* Here we assume that the last SGE index is the biggest */
1133 prefetch((void *)(fp->sge_mask));
1134 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1136 last_max = RX_SGE(fp->last_max_sge);
1137 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1138 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1140 /* If ring is not full */
1141 if (last_elem + 1 != first_elem)
1144 /* Now update the prod */
1145 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1146 if (likely(fp->sge_mask[i]))
1149 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1150 delta += RX_SGE_MASK_ELEM_SZ;
1154 fp->rx_sge_prod += delta;
1155 /* clear page-end entries */
1156 bnx2x_clear_sge_mask_next_elems(fp);
1159 DP(NETIF_MSG_RX_STATUS,
1160 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1161 fp->last_max_sge, fp->rx_sge_prod);
1164 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1166 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1167 memset(fp->sge_mask, 0xff,
1168 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1170 /* Clear the two last indeces in the page to 1:
1171 these are the indeces that correspond to the "next" element,
1172 hence will never be indicated and should be removed from
1173 the calculations. */
1174 bnx2x_clear_sge_mask_next_elems(fp);
1177 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1178 struct sk_buff *skb, u16 cons, u16 prod)
1180 struct bnx2x *bp = fp->bp;
1181 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1182 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1183 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1186 /* move empty skb from pool to prod and map it */
1187 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1188 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1189 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1190 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1192 /* move partial skb from cons to pool (don't unmap yet) */
1193 fp->tpa_pool[queue] = *cons_rx_buf;
1195 /* mark bin state as start - print error if current state != stop */
1196 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1197 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1199 fp->tpa_state[queue] = BNX2X_TPA_START;
1201 /* point prod_bd to new skb */
1202 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1203 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1205 #ifdef BNX2X_STOP_ON_ERROR
1206 fp->tpa_queue_used |= (1 << queue);
1207 #ifdef __powerpc64__
1208 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1210 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1212 fp->tpa_queue_used);
1216 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1217 struct sk_buff *skb,
1218 struct eth_fast_path_rx_cqe *fp_cqe,
1221 struct sw_rx_page *rx_pg, old_rx_pg;
1223 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1224 u32 i, frag_len, frag_size, pages;
1228 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1229 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1231 /* This is needed in order to enable forwarding support */
1233 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1234 max(frag_size, (u32)len_on_bd));
1236 #ifdef BNX2X_STOP_ON_ERROR
1237 if (pages > 8*PAGES_PER_SGE) {
1238 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1240 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1241 fp_cqe->pkt_len, len_on_bd);
1247 /* Run through the SGL and compose the fragmented skb */
1248 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1249 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1251 /* FW gives the indices of the SGE as if the ring is an array
1252 (meaning that "next" element will consume 2 indices) */
1253 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1254 rx_pg = &fp->rx_page_ring[sge_idx];
1258 /* If we fail to allocate a substitute page, we simply stop
1259 where we are and drop the whole packet */
1260 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1261 if (unlikely(err)) {
1262 fp->rx_alloc_failed++;
1266 /* Unmap the page as we r going to pass it to the stack */
1267 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1268 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1270 /* Add one frag and update the appropriate fields in the skb */
1271 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1273 skb->data_len += frag_len;
1274 skb->truesize += frag_len;
1275 skb->len += frag_len;
1277 frag_size -= frag_len;
1283 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1284 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1287 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1288 struct sk_buff *skb = rx_buf->skb;
1290 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1292 /* Unmap skb in the pool anyway, as we are going to change
1293 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1295 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1296 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1298 /* if alloc failed drop the packet and keep the buffer in the bin */
1299 if (likely(new_skb)) {
1302 prefetch(((char *)(skb)) + 128);
1304 /* else fix ip xsum and give it to the stack */
1305 /* (no need to map the new skb) */
1306 #ifdef BNX2X_STOP_ON_ERROR
1307 if (pad + len > bp->rx_buf_size) {
1308 BNX2X_ERR("skb_put is about to fail... "
1309 "pad %d len %d rx_buf_size %d\n",
1310 pad, len, bp->rx_buf_size);
1316 skb_reserve(skb, pad);
1319 skb->protocol = eth_type_trans(skb, bp->dev);
1320 skb->ip_summed = CHECKSUM_UNNECESSARY;
1325 iph = (struct iphdr *)skb->data;
1327 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1330 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1331 &cqe->fast_path_cqe, cqe_idx)) {
1333 if ((bp->vlgrp != NULL) &&
1334 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1335 PARSING_FLAGS_VLAN))
1336 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1337 le16_to_cpu(cqe->fast_path_cqe.
1341 netif_receive_skb(skb);
1343 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1344 " - dropping packet!\n");
1348 bp->dev->last_rx = jiffies;
1350 /* put new skb in bin */
1351 fp->tpa_pool[queue].skb = new_skb;
1354 DP(NETIF_MSG_RX_STATUS,
1355 "Failed to allocate new skb - dropping packet!\n");
1356 fp->rx_alloc_failed++;
1359 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1362 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1363 struct bnx2x_fastpath *fp,
1364 u16 bd_prod, u16 rx_comp_prod,
1367 struct tstorm_eth_rx_producers rx_prods = {0};
1370 /* Update producers */
1371 rx_prods.bd_prod = bd_prod;
1372 rx_prods.cqe_prod = rx_comp_prod;
1373 rx_prods.sge_prod = rx_sge_prod;
1375 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1376 REG_WR(bp, BAR_TSTRORM_INTMEM +
1377 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1378 ((u32 *)&rx_prods)[i]);
1380 DP(NETIF_MSG_RX_STATUS,
1381 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1382 bd_prod, rx_comp_prod, rx_sge_prod);
1385 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1387 struct bnx2x *bp = fp->bp;
1388 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1389 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1393 #ifdef BNX2X_STOP_ON_ERROR
1394 if (unlikely(bp->panic))
1398 /* CQ "next element" is of the size of the regular element,
1399 that's why it's ok here */
1400 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1401 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1404 bd_cons = fp->rx_bd_cons;
1405 bd_prod = fp->rx_bd_prod;
1406 bd_prod_fw = bd_prod;
1407 sw_comp_cons = fp->rx_comp_cons;
1408 sw_comp_prod = fp->rx_comp_prod;
1410 /* Memory barrier necessary as speculative reads of the rx
1411 * buffer can be ahead of the index in the status block
1415 DP(NETIF_MSG_RX_STATUS,
1416 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1417 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1419 while (sw_comp_cons != hw_comp_cons) {
1420 struct sw_rx_bd *rx_buf = NULL;
1421 struct sk_buff *skb;
1422 union eth_rx_cqe *cqe;
1426 comp_ring_cons = RCQ_BD(sw_comp_cons);
1427 bd_prod = RX_BD(bd_prod);
1428 bd_cons = RX_BD(bd_cons);
1430 cqe = &fp->rx_comp_ring[comp_ring_cons];
1431 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1433 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1434 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1435 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1436 cqe->fast_path_cqe.rss_hash_result,
1437 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1438 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1440 /* is this a slowpath msg? */
1441 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1442 bnx2x_sp_event(fp, cqe);
1445 /* this is an rx packet */
1447 rx_buf = &fp->rx_buf_ring[bd_cons];
1449 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1450 pad = cqe->fast_path_cqe.placement_offset;
1452 /* If CQE is marked both TPA_START and TPA_END
1453 it is a non-TPA CQE */
1454 if ((!fp->disable_tpa) &&
1455 (TPA_TYPE(cqe_fp_flags) !=
1456 (TPA_TYPE_START | TPA_TYPE_END))) {
1457 queue = cqe->fast_path_cqe.queue_index;
1459 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1460 DP(NETIF_MSG_RX_STATUS,
1461 "calling tpa_start on queue %d\n",
1464 bnx2x_tpa_start(fp, queue, skb,
1469 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1470 DP(NETIF_MSG_RX_STATUS,
1471 "calling tpa_stop on queue %d\n",
1474 if (!BNX2X_RX_SUM_FIX(cqe))
1475 BNX2X_ERR("STOP on none TCP "
1478 /* This is a size of the linear data
1480 len = le16_to_cpu(cqe->fast_path_cqe.
1482 bnx2x_tpa_stop(bp, fp, queue, pad,
1483 len, cqe, comp_ring_cons);
1484 #ifdef BNX2X_STOP_ON_ERROR
1489 bnx2x_update_sge_prod(fp,
1490 &cqe->fast_path_cqe);
1495 pci_dma_sync_single_for_device(bp->pdev,
1496 pci_unmap_addr(rx_buf, mapping),
1497 pad + RX_COPY_THRESH,
1498 PCI_DMA_FROMDEVICE);
1500 prefetch(((char *)(skb)) + 128);
1502 /* is this an error packet? */
1503 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1504 /* do we sometimes forward error packets anyway? */
1505 DP(NETIF_MSG_RX_ERR,
1506 "ERROR flags %x rx packet %u\n",
1507 cqe_fp_flags, sw_comp_cons);
1508 /* TBD make sure MC counts this as a drop */
1512 /* Since we don't have a jumbo ring
1513 * copy small packets if mtu > 1500
1515 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1516 (len <= RX_COPY_THRESH)) {
1517 struct sk_buff *new_skb;
1519 new_skb = netdev_alloc_skb(bp->dev,
1521 if (new_skb == NULL) {
1522 DP(NETIF_MSG_RX_ERR,
1523 "ERROR packet dropped "
1524 "because of alloc failure\n");
1525 fp->rx_alloc_failed++;
1530 skb_copy_from_linear_data_offset(skb, pad,
1531 new_skb->data + pad, len);
1532 skb_reserve(new_skb, pad);
1533 skb_put(new_skb, len);
1535 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1539 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1540 pci_unmap_single(bp->pdev,
1541 pci_unmap_addr(rx_buf, mapping),
1542 bp->rx_buf_use_size,
1543 PCI_DMA_FROMDEVICE);
1544 skb_reserve(skb, pad);
1548 DP(NETIF_MSG_RX_ERR,
1549 "ERROR packet dropped because "
1550 "of alloc failure\n");
1551 fp->rx_alloc_failed++;
1553 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1557 skb->protocol = eth_type_trans(skb, bp->dev);
1559 skb->ip_summed = CHECKSUM_NONE;
1560 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1561 skb->ip_summed = CHECKSUM_UNNECESSARY;
1563 /* TBD do we pass bad csum packets in promisc */
1567 if ((bp->vlgrp != NULL) &&
1568 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1569 PARSING_FLAGS_VLAN))
1570 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1571 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1574 netif_receive_skb(skb);
1576 bp->dev->last_rx = jiffies;
1581 bd_cons = NEXT_RX_IDX(bd_cons);
1582 bd_prod = NEXT_RX_IDX(bd_prod);
1583 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1586 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1587 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1589 if (rx_pkt == budget)
1593 fp->rx_bd_cons = bd_cons;
1594 fp->rx_bd_prod = bd_prod_fw;
1595 fp->rx_comp_cons = sw_comp_cons;
1596 fp->rx_comp_prod = sw_comp_prod;
1598 /* Update producers */
1599 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1601 mmiowb(); /* keep prod updates ordered */
1603 fp->rx_pkt += rx_pkt;
1609 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1611 struct bnx2x_fastpath *fp = fp_cookie;
1612 struct bnx2x *bp = fp->bp;
1613 struct net_device *dev = bp->dev;
1614 int index = FP_IDX(fp);
1616 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1617 index, FP_SB_ID(fp));
1618 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1620 #ifdef BNX2X_STOP_ON_ERROR
1621 if (unlikely(bp->panic))
1625 prefetch(fp->rx_cons_sb);
1626 prefetch(fp->tx_cons_sb);
1627 prefetch(&fp->status_blk->c_status_block.status_block_index);
1628 prefetch(&fp->status_blk->u_status_block.status_block_index);
1630 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1635 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1637 struct net_device *dev = dev_instance;
1638 struct bnx2x *bp = netdev_priv(dev);
1639 u16 status = bnx2x_ack_int(bp);
1642 /* Return here if interrupt is shared and it's not for us */
1643 if (unlikely(status == 0)) {
1644 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1647 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1649 #ifdef BNX2X_STOP_ON_ERROR
1650 if (unlikely(bp->panic))
1654 /* Return here if interrupt is disabled */
1655 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1656 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1660 mask = 0x2 << bp->fp[0].sb_id;
1661 if (status & mask) {
1662 struct bnx2x_fastpath *fp = &bp->fp[0];
1664 prefetch(fp->rx_cons_sb);
1665 prefetch(fp->tx_cons_sb);
1666 prefetch(&fp->status_blk->c_status_block.status_block_index);
1667 prefetch(&fp->status_blk->u_status_block.status_block_index);
1669 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1675 if (unlikely(status & 0x1)) {
1676 schedule_work(&bp->sp_task);
1684 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1690 /* end of fast path */
1692 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1697 * General service functions
1700 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1703 u32 resource_bit = (1 << resource);
1704 u8 port = BP_PORT(bp);
1707 /* Validating that the resource is within range */
1708 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1710 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1711 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1715 /* Validating that the resource is not already taken */
1716 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1717 if (lock_status & resource_bit) {
1718 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1719 lock_status, resource_bit);
1723 /* Try for 1 second every 5ms */
1724 for (cnt = 0; cnt < 200; cnt++) {
1725 /* Try to acquire the lock */
1726 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1728 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1729 if (lock_status & resource_bit)
1734 DP(NETIF_MSG_HW, "Timeout\n");
1738 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1741 u32 resource_bit = (1 << resource);
1742 u8 port = BP_PORT(bp);
1744 /* Validating that the resource is within range */
1745 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1747 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1748 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1752 /* Validating that the resource is currently taken */
1753 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1754 if (!(lock_status & resource_bit)) {
1755 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1756 lock_status, resource_bit);
1760 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1764 /* HW Lock for shared dual port PHYs */
1765 static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1767 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1769 mutex_lock(&bp->port.phy_mutex);
1771 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1772 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1773 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1776 static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1778 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1780 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1781 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1782 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1784 mutex_unlock(&bp->port.phy_mutex);
1787 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1789 /* The GPIO should be swapped if swap register is set and active */
1790 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1791 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
1792 int gpio_shift = gpio_num +
1793 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1794 u32 gpio_mask = (1 << gpio_shift);
1797 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1798 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1802 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1803 /* read GPIO and mask except the float bits */
1804 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1807 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1808 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1809 gpio_num, gpio_shift);
1810 /* clear FLOAT and set CLR */
1811 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1812 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1815 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1816 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1817 gpio_num, gpio_shift);
1818 /* clear FLOAT and set SET */
1819 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1820 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1823 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1824 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1825 gpio_num, gpio_shift);
1827 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1834 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1835 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1840 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1842 u32 spio_mask = (1 << spio_num);
1845 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1846 (spio_num > MISC_REGISTERS_SPIO_7)) {
1847 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1851 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1852 /* read SPIO and mask except the float bits */
1853 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1856 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1857 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1858 /* clear FLOAT and set CLR */
1859 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1860 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1863 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1864 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1865 /* clear FLOAT and set SET */
1866 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1867 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1870 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1871 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1873 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1880 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1881 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1886 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1888 switch (bp->link_vars.ieee_fc) {
1889 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1890 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1893 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1894 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1897 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1898 bp->port.advertising |= ADVERTISED_Asym_Pause;
1901 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1907 static void bnx2x_link_report(struct bnx2x *bp)
1909 if (bp->link_vars.link_up) {
1910 if (bp->state == BNX2X_STATE_OPEN)
1911 netif_carrier_on(bp->dev);
1912 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1914 printk("%d Mbps ", bp->link_vars.line_speed);
1916 if (bp->link_vars.duplex == DUPLEX_FULL)
1917 printk("full duplex");
1919 printk("half duplex");
1921 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1922 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1923 printk(", receive ");
1924 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1925 printk("& transmit ");
1927 printk(", transmit ");
1929 printk("flow control ON");
1933 } else { /* link_down */
1934 netif_carrier_off(bp->dev);
1935 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1939 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1941 if (!BP_NOMCP(bp)) {
1944 /* Initialize link parameters structure variables */
1945 bp->link_params.mtu = bp->dev->mtu;
1947 bnx2x_phy_hw_lock(bp);
1948 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1949 bnx2x_phy_hw_unlock(bp);
1951 if (bp->link_vars.link_up)
1952 bnx2x_link_report(bp);
1954 bnx2x_calc_fc_adv(bp);
1958 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1962 static void bnx2x_link_set(struct bnx2x *bp)
1964 if (!BP_NOMCP(bp)) {
1965 bnx2x_phy_hw_lock(bp);
1966 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1967 bnx2x_phy_hw_unlock(bp);
1969 bnx2x_calc_fc_adv(bp);
1971 BNX2X_ERR("Bootcode is missing -not setting link\n");
1974 static void bnx2x__link_reset(struct bnx2x *bp)
1976 if (!BP_NOMCP(bp)) {
1977 bnx2x_phy_hw_lock(bp);
1978 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1979 bnx2x_phy_hw_unlock(bp);
1981 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1984 static u8 bnx2x_link_test(struct bnx2x *bp)
1988 bnx2x_phy_hw_lock(bp);
1989 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1990 bnx2x_phy_hw_unlock(bp);
1995 /* Calculates the sum of vn_min_rates.
1996 It's needed for further normalizing of the min_rates.
2001 0 - if all the min_rates are 0.
2002 In the later case fainess algorithm should be deactivated.
2003 If not all min_rates are zero then those that are zeroes will
2006 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2008 int i, port = BP_PORT(bp);
2012 for (i = 0; i < E1HVN_MAX; i++) {
2014 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2015 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2016 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2017 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2018 /* If min rate is zero - set it to 1 */
2020 vn_min_rate = DEF_MIN_RATE;
2024 wsum += vn_min_rate;
2028 /* ... only if all min rates are zeros - disable FAIRNESS */
2035 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2038 struct cmng_struct_per_port *m_cmng_port)
2040 u32 r_param = port_rate / 8;
2041 int port = BP_PORT(bp);
2044 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2046 /* Enable minmax only if we are in e1hmf mode */
2048 u32 fair_periodic_timeout_usec;
2051 /* Enable rate shaping and fairness */
2052 m_cmng_port->flags.cmng_vn_enable = 1;
2053 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2054 m_cmng_port->flags.rate_shaping_enable = 1;
2057 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2058 " fairness will be disabled\n");
2060 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2061 m_cmng_port->rs_vars.rs_periodic_timeout =
2062 RS_PERIODIC_TIMEOUT_USEC / 4;
2064 /* this is the threshold below which no timer arming will occur
2065 1.25 coefficient is for the threshold to be a little bigger
2066 than the real time, to compensate for timer in-accuracy */
2067 m_cmng_port->rs_vars.rs_threshold =
2068 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2070 /* resolution of fairness timer */
2071 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2072 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2073 t_fair = T_FAIR_COEF / port_rate;
2075 /* this is the threshold below which we won't arm
2076 the timer anymore */
2077 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2079 /* we multiply by 1e3/8 to get bytes/msec.
2080 We don't want the credits to pass a credit
2081 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2082 m_cmng_port->fair_vars.upper_bound =
2083 r_param * t_fair * FAIR_MEM;
2084 /* since each tick is 4 usec */
2085 m_cmng_port->fair_vars.fairness_timeout =
2086 fair_periodic_timeout_usec / 4;
2089 /* Disable rate shaping and fairness */
2090 m_cmng_port->flags.cmng_vn_enable = 0;
2091 m_cmng_port->flags.fairness_enable = 0;
2092 m_cmng_port->flags.rate_shaping_enable = 0;
2095 "Single function mode minmax will be disabled\n");
2098 /* Store it to internal memory */
2099 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2100 REG_WR(bp, BAR_XSTRORM_INTMEM +
2101 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2102 ((u32 *)(m_cmng_port))[i]);
2105 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2106 u32 wsum, u16 port_rate,
2107 struct cmng_struct_per_port *m_cmng_port)
2109 struct rate_shaping_vars_per_vn m_rs_vn;
2110 struct fairness_vars_per_vn m_fair_vn;
2111 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2112 u16 vn_min_rate, vn_max_rate;
2115 /* If function is hidden - set min and max to zeroes */
2116 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2121 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2122 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2123 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2124 if current min rate is zero - set it to 1.
2125 This is a requirment of the algorithm. */
2126 if ((vn_min_rate == 0) && wsum)
2127 vn_min_rate = DEF_MIN_RATE;
2128 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2129 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2132 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2133 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2135 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2136 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2138 /* global vn counter - maximal Mbps for this vn */
2139 m_rs_vn.vn_counter.rate = vn_max_rate;
2141 /* quota - number of bytes transmitted in this period */
2142 m_rs_vn.vn_counter.quota =
2143 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2145 #ifdef BNX2X_PER_PROT_QOS
2146 /* per protocol counter */
2147 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2148 /* maximal Mbps for this protocol */
2149 m_rs_vn.protocol_counters[protocol].rate =
2150 protocol_max_rate[protocol];
2151 /* the quota in each timer period -
2152 number of bytes transmitted in this period */
2153 m_rs_vn.protocol_counters[protocol].quota =
2154 (u32)(rs_periodic_timeout_usec *
2156 protocol_counters[protocol].rate/8));
2161 /* credit for each period of the fairness algorithm:
2162 number of bytes in T_FAIR (the vn share the port rate).
2163 wsum should not be larger than 10000, thus
2164 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2165 m_fair_vn.vn_credit_delta =
2166 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2167 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2168 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2169 m_fair_vn.vn_credit_delta);
2172 #ifdef BNX2X_PER_PROT_QOS
2174 u32 protocolWeightSum = 0;
2176 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2177 protocolWeightSum +=
2178 drvInit.protocol_min_rate[protocol];
2179 /* per protocol counter -
2180 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2181 if (protocolWeightSum > 0) {
2183 protocol < NUM_OF_PROTOCOLS; protocol++)
2184 /* credit for each period of the
2185 fairness algorithm - number of bytes in
2186 T_FAIR (the protocol share the vn rate) */
2187 m_fair_vn.protocol_credit_delta[protocol] =
2188 (u32)((vn_min_rate / 8) * t_fair *
2189 protocol_min_rate / protocolWeightSum);
2194 /* Store it to internal memory */
2195 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2196 REG_WR(bp, BAR_XSTRORM_INTMEM +
2197 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2198 ((u32 *)(&m_rs_vn))[i]);
2200 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2201 REG_WR(bp, BAR_XSTRORM_INTMEM +
2202 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2203 ((u32 *)(&m_fair_vn))[i]);
2206 /* This function is called upon link interrupt */
2207 static void bnx2x_link_attn(struct bnx2x *bp)
2211 /* Make sure that we are synced with the current statistics */
2212 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2214 bnx2x_phy_hw_lock(bp);
2215 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2216 bnx2x_phy_hw_unlock(bp);
2218 if (bp->link_vars.link_up) {
2220 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2221 struct host_port_stats *pstats;
2223 pstats = bnx2x_sp(bp, port_stats);
2224 /* reset old bmac stats */
2225 memset(&(pstats->mac_stx[0]), 0,
2226 sizeof(struct mac_stx));
2228 if ((bp->state == BNX2X_STATE_OPEN) ||
2229 (bp->state == BNX2X_STATE_DISABLED))
2230 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2233 /* indicate link status */
2234 bnx2x_link_report(bp);
2239 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2240 if (vn == BP_E1HVN(bp))
2243 func = ((vn << 1) | BP_PORT(bp));
2245 /* Set the attention towards other drivers
2247 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2248 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2252 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2253 struct cmng_struct_per_port m_cmng_port;
2255 int port = BP_PORT(bp);
2257 /* Init RATE SHAPING and FAIRNESS contexts */
2258 wsum = bnx2x_calc_vn_wsum(bp);
2259 bnx2x_init_port_minmax(bp, (int)wsum,
2260 bp->link_vars.line_speed,
2263 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2264 bnx2x_init_vn_minmax(bp, 2*vn + port,
2265 wsum, bp->link_vars.line_speed,
2270 static void bnx2x__link_status_update(struct bnx2x *bp)
2272 if (bp->state != BNX2X_STATE_OPEN)
2275 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2277 if (bp->link_vars.link_up)
2278 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2280 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2282 /* indicate link status */
2283 bnx2x_link_report(bp);
2286 static void bnx2x_pmf_update(struct bnx2x *bp)
2288 int port = BP_PORT(bp);
2292 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2294 /* enable nig attention */
2295 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2296 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2297 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2299 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2307 * General service functions
2310 /* the slow path queue is odd since completions arrive on the fastpath ring */
2311 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2312 u32 data_hi, u32 data_lo, int common)
2314 int func = BP_FUNC(bp);
2316 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2317 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2318 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2319 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2320 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2322 #ifdef BNX2X_STOP_ON_ERROR
2323 if (unlikely(bp->panic))
2327 spin_lock_bh(&bp->spq_lock);
2329 if (!bp->spq_left) {
2330 BNX2X_ERR("BUG! SPQ ring full!\n");
2331 spin_unlock_bh(&bp->spq_lock);
2336 /* CID needs port number to be encoded int it */
2337 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2338 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2340 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2342 bp->spq_prod_bd->hdr.type |=
2343 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2345 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2346 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2350 if (bp->spq_prod_bd == bp->spq_last_bd) {
2351 bp->spq_prod_bd = bp->spq;
2352 bp->spq_prod_idx = 0;
2353 DP(NETIF_MSG_TIMER, "end of spq\n");
2360 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2363 spin_unlock_bh(&bp->spq_lock);
2367 /* acquire split MCP access lock register */
2368 static int bnx2x_lock_alr(struct bnx2x *bp)
2375 for (j = 0; j < i*10; j++) {
2377 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2378 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2379 if (val & (1L << 31))
2384 if (!(val & (1L << 31))) {
2385 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2392 /* Release split MCP access lock register */
2393 static void bnx2x_unlock_alr(struct bnx2x *bp)
2397 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2400 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2402 struct host_def_status_block *def_sb = bp->def_status_blk;
2405 barrier(); /* status block is written to by the chip */
2407 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2408 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2411 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2412 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2415 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2416 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2419 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2420 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2423 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2424 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2431 * slow path service functions
2434 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2436 int port = BP_PORT(bp);
2437 int func = BP_FUNC(bp);
2438 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
2439 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2440 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2441 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2442 NIG_REG_MASK_INTERRUPT_PORT0;
2444 if (~bp->aeu_mask & (asserted & 0xff))
2445 BNX2X_ERR("IGU ERROR\n");
2446 if (bp->attn_state & asserted)
2447 BNX2X_ERR("IGU ERROR\n");
2449 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2450 bp->aeu_mask, asserted);
2451 bp->aeu_mask &= ~(asserted & 0xff);
2452 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2454 REG_WR(bp, aeu_addr, bp->aeu_mask);
2456 bp->attn_state |= asserted;
2458 if (asserted & ATTN_HARD_WIRED_MASK) {
2459 if (asserted & ATTN_NIG_FOR_FUNC) {
2461 /* save nig interrupt mask */
2462 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2463 REG_WR(bp, nig_int_mask_addr, 0);
2465 bnx2x_link_attn(bp);
2467 /* handle unicore attn? */
2469 if (asserted & ATTN_SW_TIMER_4_FUNC)
2470 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2472 if (asserted & GPIO_2_FUNC)
2473 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2475 if (asserted & GPIO_3_FUNC)
2476 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2478 if (asserted & GPIO_4_FUNC)
2479 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2482 if (asserted & ATTN_GENERAL_ATTN_1) {
2483 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2484 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2486 if (asserted & ATTN_GENERAL_ATTN_2) {
2487 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2488 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2490 if (asserted & ATTN_GENERAL_ATTN_3) {
2491 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2492 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2495 if (asserted & ATTN_GENERAL_ATTN_4) {
2496 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2497 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2499 if (asserted & ATTN_GENERAL_ATTN_5) {
2500 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2501 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2503 if (asserted & ATTN_GENERAL_ATTN_6) {
2504 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2505 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2509 } /* if hardwired */
2511 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2512 asserted, BAR_IGU_INTMEM + igu_addr);
2513 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2515 /* now set back the mask */
2516 if (asserted & ATTN_NIG_FOR_FUNC)
2517 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2520 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2522 int port = BP_PORT(bp);
2526 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2527 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2529 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2531 val = REG_RD(bp, reg_offset);
2532 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2533 REG_WR(bp, reg_offset, val);
2535 BNX2X_ERR("SPIO5 hw attention\n");
2537 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2538 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2539 /* Fan failure attention */
2541 /* The PHY reset is controled by GPIO 1 */
2542 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2543 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2544 /* Low power mode is controled by GPIO 2 */
2545 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2546 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2547 /* mark the failure */
2548 bp->link_params.ext_phy_config &=
2549 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2550 bp->link_params.ext_phy_config |=
2551 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2553 dev_info.port_hw_config[port].
2554 external_phy_config,
2555 bp->link_params.ext_phy_config);
2556 /* log the failure */
2557 printk(KERN_ERR PFX "Fan Failure on Network"
2558 " Controller %s has caused the driver to"
2559 " shutdown the card to prevent permanent"
2560 " damage. Please contact Dell Support for"
2561 " assistance\n", bp->dev->name);
2569 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2571 val = REG_RD(bp, reg_offset);
2572 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2573 REG_WR(bp, reg_offset, val);
2575 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2576 (attn & HW_INTERRUT_ASSERT_SET_0));
2581 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2585 if (attn & BNX2X_DOORQ_ASSERT) {
2587 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2588 BNX2X_ERR("DB hw attention 0x%x\n", val);
2589 /* DORQ discard attention */
2591 BNX2X_ERR("FATAL error from DORQ\n");
2594 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2596 int port = BP_PORT(bp);
2599 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2600 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2602 val = REG_RD(bp, reg_offset);
2603 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2604 REG_WR(bp, reg_offset, val);
2606 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2607 (attn & HW_INTERRUT_ASSERT_SET_1));
2612 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2616 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2618 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2619 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2620 /* CFC error attention */
2622 BNX2X_ERR("FATAL error from CFC\n");
2625 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2627 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2628 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2629 /* RQ_USDMDP_FIFO_OVERFLOW */
2631 BNX2X_ERR("FATAL error from PXP\n");
2634 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2636 int port = BP_PORT(bp);
2639 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2640 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2642 val = REG_RD(bp, reg_offset);
2643 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2644 REG_WR(bp, reg_offset, val);
2646 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2647 (attn & HW_INTERRUT_ASSERT_SET_2));
2652 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2656 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2658 if (attn & BNX2X_PMF_LINK_ASSERT) {
2659 int func = BP_FUNC(bp);
2661 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2662 bnx2x__link_status_update(bp);
2663 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2665 bnx2x_pmf_update(bp);
2667 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2669 BNX2X_ERR("MC assert!\n");
2670 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2671 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2672 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2673 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2676 } else if (attn & BNX2X_MCP_ASSERT) {
2678 BNX2X_ERR("MCP assert!\n");
2679 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2683 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2686 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2687 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2688 if (attn & BNX2X_GRC_TIMEOUT) {
2689 val = CHIP_IS_E1H(bp) ?
2690 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2691 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2693 if (attn & BNX2X_GRC_RSV) {
2694 val = CHIP_IS_E1H(bp) ?
2695 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2696 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2698 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2702 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2704 struct attn_route attn;
2705 struct attn_route group_mask;
2706 int port = BP_PORT(bp);
2711 /* need to take HW lock because MCP or other port might also
2712 try to handle this event */
2715 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2716 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2717 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2718 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2719 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2720 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2722 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2723 if (deasserted & (1 << index)) {
2724 group_mask = bp->attn_group[index];
2726 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2727 index, group_mask.sig[0], group_mask.sig[1],
2728 group_mask.sig[2], group_mask.sig[3]);
2730 bnx2x_attn_int_deasserted3(bp,
2731 attn.sig[3] & group_mask.sig[3]);
2732 bnx2x_attn_int_deasserted1(bp,
2733 attn.sig[1] & group_mask.sig[1]);
2734 bnx2x_attn_int_deasserted2(bp,
2735 attn.sig[2] & group_mask.sig[2]);
2736 bnx2x_attn_int_deasserted0(bp,
2737 attn.sig[0] & group_mask.sig[0]);
2739 if ((attn.sig[0] & group_mask.sig[0] &
2740 HW_PRTY_ASSERT_SET_0) ||
2741 (attn.sig[1] & group_mask.sig[1] &
2742 HW_PRTY_ASSERT_SET_1) ||
2743 (attn.sig[2] & group_mask.sig[2] &
2744 HW_PRTY_ASSERT_SET_2))
2745 BNX2X_ERR("FATAL HW block parity attention\n");
2749 bnx2x_unlock_alr(bp);
2751 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
2754 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
2755 val, BAR_IGU_INTMEM + reg_addr); */
2756 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2758 if (bp->aeu_mask & (deasserted & 0xff))
2759 BNX2X_ERR("IGU BUG!\n");
2760 if (~bp->attn_state & deasserted)
2761 BNX2X_ERR("IGU BUG!\n");
2763 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2764 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2766 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2767 bp->aeu_mask |= (deasserted & 0xff);
2769 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2770 REG_WR(bp, reg_addr, bp->aeu_mask);
2772 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2773 bp->attn_state &= ~deasserted;
2774 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2777 static void bnx2x_attn_int(struct bnx2x *bp)
2779 /* read local copy of bits */
2780 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2781 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2782 u32 attn_state = bp->attn_state;
2784 /* look for changed bits */
2785 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2786 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2789 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2790 attn_bits, attn_ack, asserted, deasserted);
2792 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2793 BNX2X_ERR("BAD attention state\n");
2795 /* handle bits that were raised */
2797 bnx2x_attn_int_asserted(bp, asserted);
2800 bnx2x_attn_int_deasserted(bp, deasserted);
2803 static void bnx2x_sp_task(struct work_struct *work)
2805 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2809 /* Return here if interrupt is disabled */
2810 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2811 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2815 status = bnx2x_update_dsb_idx(bp);
2816 /* if (status == 0) */
2817 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2819 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
2825 /* CStorm events: query_stats, port delete ramrod */
2827 bp->stats_pending = 0;
2829 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2831 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2833 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2835 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2837 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2842 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2844 struct net_device *dev = dev_instance;
2845 struct bnx2x *bp = netdev_priv(dev);
2847 /* Return here if interrupt is disabled */
2848 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2849 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2853 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2855 #ifdef BNX2X_STOP_ON_ERROR
2856 if (unlikely(bp->panic))
2860 schedule_work(&bp->sp_task);
2865 /* end of slow path */
2869 /****************************************************************************
2871 ****************************************************************************/
2873 /* sum[hi:lo] += add[hi:lo] */
2874 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2877 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2880 /* difference = minuend - subtrahend */
2881 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2883 if (m_lo < s_lo) { \
2885 d_hi = m_hi - s_hi; \
2887 /* we can 'loan' 1 */ \
2889 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2891 /* m_hi <= s_hi */ \
2896 /* m_lo >= s_lo */ \
2897 if (m_hi < s_hi) { \
2901 /* m_hi >= s_hi */ \
2902 d_hi = m_hi - s_hi; \
2903 d_lo = m_lo - s_lo; \
2908 #define UPDATE_STAT64(s, t) \
2910 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2911 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2912 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2913 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2914 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2915 pstats->mac_stx[1].t##_lo, diff.lo); \
2918 #define UPDATE_STAT64_NIG(s, t) \
2920 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2921 diff.lo, new->s##_lo, old->s##_lo); \
2922 ADD_64(estats->t##_hi, diff.hi, \
2923 estats->t##_lo, diff.lo); \
2926 /* sum[hi:lo] += add */
2927 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2930 s_hi += (s_lo < a) ? 1 : 0; \
2933 #define UPDATE_EXTEND_STAT(s) \
2935 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2936 pstats->mac_stx[1].s##_lo, \
2940 #define UPDATE_EXTEND_TSTAT(s, t) \
2942 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2943 old_tclient->s = le32_to_cpu(tclient->s); \
2944 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2947 #define UPDATE_EXTEND_XSTAT(s, t) \
2949 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2950 old_xclient->s = le32_to_cpu(xclient->s); \
2951 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2955 * General service functions
2958 static inline long bnx2x_hilo(u32 *hiref)
2960 u32 lo = *(hiref + 1);
2961 #if (BITS_PER_LONG == 64)
2964 return HILO_U64(hi, lo);
2971 * Init service functions
2974 static void bnx2x_storm_stats_init(struct bnx2x *bp)
2976 int func = BP_FUNC(bp);
2978 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
2979 REG_WR(bp, BAR_XSTRORM_INTMEM +
2980 XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2982 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
2983 REG_WR(bp, BAR_TSTRORM_INTMEM +
2984 TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2986 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
2987 REG_WR(bp, BAR_CSTRORM_INTMEM +
2988 CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2990 REG_WR(bp, BAR_XSTRORM_INTMEM +
2991 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2992 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2993 REG_WR(bp, BAR_XSTRORM_INTMEM +
2994 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2995 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2997 REG_WR(bp, BAR_TSTRORM_INTMEM +
2998 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2999 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3000 REG_WR(bp, BAR_TSTRORM_INTMEM +
3001 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3002 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3005 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3007 if (!bp->stats_pending) {
3008 struct eth_query_ramrod_data ramrod_data = {0};
3011 ramrod_data.drv_counter = bp->stats_counter++;
3012 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3013 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3015 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3016 ((u32 *)&ramrod_data)[1],
3017 ((u32 *)&ramrod_data)[0], 0);
3019 /* stats ramrod has it's own slot on the spq */
3021 bp->stats_pending = 1;
3026 static void bnx2x_stats_init(struct bnx2x *bp)
3028 int port = BP_PORT(bp);
3030 bp->executer_idx = 0;
3031 bp->stats_counter = 0;
3035 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3037 bp->port.port_stx = 0;
3038 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3040 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3041 bp->port.old_nig_stats.brb_discard =
3042 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3043 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3044 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3045 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3046 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3048 /* function stats */
3049 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3050 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3051 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3052 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3054 bp->stats_state = STATS_STATE_DISABLED;
3055 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3056 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3059 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3061 struct dmae_command *dmae = &bp->stats_dmae;
3062 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3064 *stats_comp = DMAE_COMP_VAL;
3067 if (bp->executer_idx) {
3068 int loader_idx = PMF_DMAE_C(bp);
3070 memset(dmae, 0, sizeof(struct dmae_command));
3072 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3073 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3074 DMAE_CMD_DST_RESET |
3076 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3078 DMAE_CMD_ENDIANITY_DW_SWAP |
3080 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3082 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3083 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3084 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3085 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3086 sizeof(struct dmae_command) *
3087 (loader_idx + 1)) >> 2;
3088 dmae->dst_addr_hi = 0;
3089 dmae->len = sizeof(struct dmae_command) >> 2;
3092 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3093 dmae->comp_addr_hi = 0;
3097 bnx2x_post_dmae(bp, dmae, loader_idx);
3099 } else if (bp->func_stx) {
3101 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3105 static int bnx2x_stats_comp(struct bnx2x *bp)
3107 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3111 while (*stats_comp != DMAE_COMP_VAL) {
3114 BNX2X_ERR("timeout waiting for stats finished\n");
3123 * Statistics service functions
3126 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3128 struct dmae_command *dmae;
3130 int loader_idx = PMF_DMAE_C(bp);
3131 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3134 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3135 BNX2X_ERR("BUG!\n");
3139 bp->executer_idx = 0;
3141 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3143 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3145 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3147 DMAE_CMD_ENDIANITY_DW_SWAP |
3149 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3150 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3152 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3153 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3154 dmae->src_addr_lo = bp->port.port_stx >> 2;
3155 dmae->src_addr_hi = 0;
3156 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3157 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3158 dmae->len = DMAE_LEN32_RD_MAX;
3159 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3160 dmae->comp_addr_hi = 0;
3163 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3164 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3165 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3166 dmae->src_addr_hi = 0;
3167 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3168 DMAE_LEN32_RD_MAX * 4);
3169 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3170 DMAE_LEN32_RD_MAX * 4);
3171 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3172 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3173 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3174 dmae->comp_val = DMAE_COMP_VAL;
3177 bnx2x_hw_stats_post(bp);
3178 bnx2x_stats_comp(bp);
3181 static void bnx2x_port_stats_init(struct bnx2x *bp)
3183 struct dmae_command *dmae;
3184 int port = BP_PORT(bp);
3185 int vn = BP_E1HVN(bp);
3187 int loader_idx = PMF_DMAE_C(bp);
3189 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3192 if (!bp->link_vars.link_up || !bp->port.pmf) {
3193 BNX2X_ERR("BUG!\n");
3197 bp->executer_idx = 0;
3200 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3201 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3202 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3204 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3206 DMAE_CMD_ENDIANITY_DW_SWAP |
3208 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3209 (vn << DMAE_CMD_E1HVN_SHIFT));
3211 if (bp->port.port_stx) {
3213 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3214 dmae->opcode = opcode;
3215 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3216 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3217 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3218 dmae->dst_addr_hi = 0;
3219 dmae->len = sizeof(struct host_port_stats) >> 2;
3220 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3221 dmae->comp_addr_hi = 0;
3227 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3228 dmae->opcode = opcode;
3229 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3230 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3231 dmae->dst_addr_lo = bp->func_stx >> 2;
3232 dmae->dst_addr_hi = 0;
3233 dmae->len = sizeof(struct host_func_stats) >> 2;
3234 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3235 dmae->comp_addr_hi = 0;
3240 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3241 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3242 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3244 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3246 DMAE_CMD_ENDIANITY_DW_SWAP |
3248 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3249 (vn << DMAE_CMD_E1HVN_SHIFT));
3251 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3253 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3254 NIG_REG_INGRESS_BMAC0_MEM);
3256 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3257 BIGMAC_REGISTER_TX_STAT_GTBYT */
3258 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3259 dmae->opcode = opcode;
3260 dmae->src_addr_lo = (mac_addr +
3261 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3262 dmae->src_addr_hi = 0;
3263 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3264 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3265 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3266 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3267 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3268 dmae->comp_addr_hi = 0;
3271 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3272 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3273 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274 dmae->opcode = opcode;
3275 dmae->src_addr_lo = (mac_addr +
3276 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3277 dmae->src_addr_hi = 0;
3278 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3279 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3280 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3281 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3282 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3283 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3284 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285 dmae->comp_addr_hi = 0;
3288 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3290 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3292 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3293 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3294 dmae->opcode = opcode;
3295 dmae->src_addr_lo = (mac_addr +
3296 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3297 dmae->src_addr_hi = 0;
3298 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3299 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3300 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3301 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302 dmae->comp_addr_hi = 0;
3305 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3306 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3307 dmae->opcode = opcode;
3308 dmae->src_addr_lo = (mac_addr +
3309 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3310 dmae->src_addr_hi = 0;
3311 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3312 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3313 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3314 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3316 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3317 dmae->comp_addr_hi = 0;
3320 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322 dmae->opcode = opcode;
3323 dmae->src_addr_lo = (mac_addr +
3324 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3325 dmae->src_addr_hi = 0;
3326 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3327 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3328 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3329 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3330 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3331 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3332 dmae->comp_addr_hi = 0;
3337 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3338 dmae->opcode = opcode;
3339 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3340 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3341 dmae->src_addr_hi = 0;
3342 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3343 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3344 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3345 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3346 dmae->comp_addr_hi = 0;
3349 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3350 dmae->opcode = opcode;
3351 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3352 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3353 dmae->src_addr_hi = 0;
3354 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3355 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3356 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3357 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3358 dmae->len = (2*sizeof(u32)) >> 2;
3359 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3360 dmae->comp_addr_hi = 0;
3363 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3364 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3365 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3366 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3368 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3370 DMAE_CMD_ENDIANITY_DW_SWAP |
3372 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3373 (vn << DMAE_CMD_E1HVN_SHIFT));
3374 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3375 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3376 dmae->src_addr_hi = 0;
3377 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3378 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3379 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3380 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3381 dmae->len = (2*sizeof(u32)) >> 2;
3382 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3383 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3384 dmae->comp_val = DMAE_COMP_VAL;
3389 static void bnx2x_func_stats_init(struct bnx2x *bp)
3391 struct dmae_command *dmae = &bp->stats_dmae;
3392 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3395 if (!bp->func_stx) {
3396 BNX2X_ERR("BUG!\n");
3400 bp->executer_idx = 0;
3401 memset(dmae, 0, sizeof(struct dmae_command));
3403 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3404 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3405 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3407 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3409 DMAE_CMD_ENDIANITY_DW_SWAP |
3411 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3412 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3413 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3414 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3415 dmae->dst_addr_lo = bp->func_stx >> 2;
3416 dmae->dst_addr_hi = 0;
3417 dmae->len = sizeof(struct host_func_stats) >> 2;
3418 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3419 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3420 dmae->comp_val = DMAE_COMP_VAL;
3425 static void bnx2x_stats_start(struct bnx2x *bp)
3428 bnx2x_port_stats_init(bp);
3430 else if (bp->func_stx)
3431 bnx2x_func_stats_init(bp);
3433 bnx2x_hw_stats_post(bp);
3434 bnx2x_storm_stats_post(bp);
3437 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3439 bnx2x_stats_comp(bp);
3440 bnx2x_stats_pmf_update(bp);
3441 bnx2x_stats_start(bp);
3444 static void bnx2x_stats_restart(struct bnx2x *bp)
3446 bnx2x_stats_comp(bp);
3447 bnx2x_stats_start(bp);
3450 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3452 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3453 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3454 struct regpair diff;
3456 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3457 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3458 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3459 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3460 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3461 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3462 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3463 UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
3464 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3465 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3466 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3467 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3468 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3469 UPDATE_STAT64(tx_stat_gt127,
3470 tx_stat_etherstatspkts65octetsto127octets);
3471 UPDATE_STAT64(tx_stat_gt255,
3472 tx_stat_etherstatspkts128octetsto255octets);
3473 UPDATE_STAT64(tx_stat_gt511,
3474 tx_stat_etherstatspkts256octetsto511octets);
3475 UPDATE_STAT64(tx_stat_gt1023,
3476 tx_stat_etherstatspkts512octetsto1023octets);
3477 UPDATE_STAT64(tx_stat_gt1518,
3478 tx_stat_etherstatspkts1024octetsto1522octets);
3479 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3480 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3481 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3482 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3483 UPDATE_STAT64(tx_stat_gterr,
3484 tx_stat_dot3statsinternalmactransmiterrors);
3485 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3488 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3490 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3491 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3493 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3494 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3495 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3496 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3497 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3498 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3499 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3500 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3501 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3502 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3503 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3504 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3505 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3506 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3507 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3508 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3509 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3511 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3512 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3513 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3514 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3515 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3516 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3517 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3518 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3519 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3520 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3521 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3522 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3523 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3526 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3528 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3529 struct nig_stats *old = &(bp->port.old_nig_stats);
3530 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3531 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3532 struct regpair diff;
3534 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3535 bnx2x_bmac_stats_update(bp);
3537 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3538 bnx2x_emac_stats_update(bp);
3540 else { /* unreached */
3541 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3545 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3546 new->brb_discard - old->brb_discard);
3548 UPDATE_STAT64_NIG(egress_mac_pkt0,
3549 etherstatspkts1024octetsto1522octets);
3550 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3552 memcpy(old, new, sizeof(struct nig_stats));
3554 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3555 sizeof(struct mac_stx));
3556 estats->brb_drop_hi = pstats->brb_drop_hi;
3557 estats->brb_drop_lo = pstats->brb_drop_lo;
3559 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3564 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3566 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3567 int cl_id = BP_CL_ID(bp);
3568 struct tstorm_per_port_stats *tport =
3569 &stats->tstorm_common.port_statistics;
3570 struct tstorm_per_client_stats *tclient =
3571 &stats->tstorm_common.client_statistics[cl_id];
3572 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3573 struct xstorm_per_client_stats *xclient =
3574 &stats->xstorm_common.client_statistics[cl_id];
3575 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3576 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3577 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3580 /* are storm stats valid? */
3581 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3582 bp->stats_counter) {
3583 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3584 " tstorm counter (%d) != stats_counter (%d)\n",
3585 tclient->stats_counter, bp->stats_counter);
3588 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3589 bp->stats_counter) {
3590 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3591 " xstorm counter (%d) != stats_counter (%d)\n",
3592 xclient->stats_counter, bp->stats_counter);
3596 fstats->total_bytes_received_hi =
3597 fstats->valid_bytes_received_hi =
3598 le32_to_cpu(tclient->total_rcv_bytes.hi);
3599 fstats->total_bytes_received_lo =
3600 fstats->valid_bytes_received_lo =
3601 le32_to_cpu(tclient->total_rcv_bytes.lo);
3603 estats->error_bytes_received_hi =
3604 le32_to_cpu(tclient->rcv_error_bytes.hi);
3605 estats->error_bytes_received_lo =
3606 le32_to_cpu(tclient->rcv_error_bytes.lo);
3607 ADD_64(estats->error_bytes_received_hi,
3608 estats->rx_stat_ifhcinbadoctets_hi,
3609 estats->error_bytes_received_lo,
3610 estats->rx_stat_ifhcinbadoctets_lo);
3612 ADD_64(fstats->total_bytes_received_hi,
3613 estats->error_bytes_received_hi,
3614 fstats->total_bytes_received_lo,
3615 estats->error_bytes_received_lo);
3617 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3618 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3619 total_multicast_packets_received);
3620 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3621 total_broadcast_packets_received);
3623 fstats->total_bytes_transmitted_hi =
3624 le32_to_cpu(xclient->total_sent_bytes.hi);
3625 fstats->total_bytes_transmitted_lo =
3626 le32_to_cpu(xclient->total_sent_bytes.lo);
3628 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3629 total_unicast_packets_transmitted);
3630 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3631 total_multicast_packets_transmitted);
3632 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3633 total_broadcast_packets_transmitted);
3635 memcpy(estats, &(fstats->total_bytes_received_hi),
3636 sizeof(struct host_func_stats) - 2*sizeof(u32));
3638 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3639 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3640 estats->brb_truncate_discard =
3641 le32_to_cpu(tport->brb_truncate_discard);
3642 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3644 old_tclient->rcv_unicast_bytes.hi =
3645 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3646 old_tclient->rcv_unicast_bytes.lo =
3647 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3648 old_tclient->rcv_broadcast_bytes.hi =
3649 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3650 old_tclient->rcv_broadcast_bytes.lo =
3651 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3652 old_tclient->rcv_multicast_bytes.hi =
3653 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3654 old_tclient->rcv_multicast_bytes.lo =
3655 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3656 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3658 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3659 old_tclient->packets_too_big_discard =
3660 le32_to_cpu(tclient->packets_too_big_discard);
3661 estats->no_buff_discard =
3662 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3663 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3665 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3666 old_xclient->unicast_bytes_sent.hi =
3667 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3668 old_xclient->unicast_bytes_sent.lo =
3669 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3670 old_xclient->multicast_bytes_sent.hi =
3671 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3672 old_xclient->multicast_bytes_sent.lo =
3673 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3674 old_xclient->broadcast_bytes_sent.hi =
3675 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3676 old_xclient->broadcast_bytes_sent.lo =
3677 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3679 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3684 static void bnx2x_net_stats_update(struct bnx2x *bp)
3686 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3687 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3688 struct net_device_stats *nstats = &bp->dev->stats;
3690 nstats->rx_packets =
3691 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3692 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3693 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3695 nstats->tx_packets =
3696 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3697 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3698 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3700 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3702 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3704 nstats->rx_dropped = old_tclient->checksum_discard +
3705 estats->mac_discard;
3706 nstats->tx_dropped = 0;
3709 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3711 nstats->collisions =
3712 estats->tx_stat_dot3statssinglecollisionframes_lo +
3713 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3714 estats->tx_stat_dot3statslatecollisions_lo +
3715 estats->tx_stat_dot3statsexcessivecollisions_lo;
3717 estats->jabber_packets_received =
3718 old_tclient->packets_too_big_discard +
3719 estats->rx_stat_dot3statsframestoolong_lo;
3721 nstats->rx_length_errors =
3722 estats->rx_stat_etherstatsundersizepkts_lo +
3723 estats->jabber_packets_received;
3724 nstats->rx_over_errors = estats->brb_drop_lo +
3725 estats->brb_truncate_discard;
3726 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3727 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3728 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3729 nstats->rx_missed_errors = estats->xxoverflow_discard;
3731 nstats->rx_errors = nstats->rx_length_errors +
3732 nstats->rx_over_errors +
3733 nstats->rx_crc_errors +
3734 nstats->rx_frame_errors +
3735 nstats->rx_fifo_errors +
3736 nstats->rx_missed_errors;
3738 nstats->tx_aborted_errors =
3739 estats->tx_stat_dot3statslatecollisions_lo +
3740 estats->tx_stat_dot3statsexcessivecollisions_lo;
3741 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3742 nstats->tx_fifo_errors = 0;
3743 nstats->tx_heartbeat_errors = 0;
3744 nstats->tx_window_errors = 0;
3746 nstats->tx_errors = nstats->tx_aborted_errors +
3747 nstats->tx_carrier_errors;
3750 static void bnx2x_stats_update(struct bnx2x *bp)
3752 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3755 if (*stats_comp != DMAE_COMP_VAL)
3759 update = (bnx2x_hw_stats_update(bp) == 0);
3761 update |= (bnx2x_storm_stats_update(bp) == 0);
3764 bnx2x_net_stats_update(bp);
3767 if (bp->stats_pending) {
3768 bp->stats_pending++;
3769 if (bp->stats_pending == 3) {
3770 BNX2X_ERR("stats not updated for 3 times\n");
3777 if (bp->msglevel & NETIF_MSG_TIMER) {
3778 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3779 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3780 struct net_device_stats *nstats = &bp->dev->stats;
3783 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3784 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3786 bnx2x_tx_avail(bp->fp),
3787 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3788 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3790 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3791 bp->fp->rx_comp_cons),
3792 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3793 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3794 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3795 estats->driver_xoff, estats->brb_drop_lo);
3796 printk(KERN_DEBUG "tstats: checksum_discard %u "
3797 "packets_too_big_discard %u no_buff_discard %u "
3798 "mac_discard %u mac_filter_discard %u "
3799 "xxovrflow_discard %u brb_truncate_discard %u "
3800 "ttl0_discard %u\n",
3801 old_tclient->checksum_discard,
3802 old_tclient->packets_too_big_discard,
3803 old_tclient->no_buff_discard, estats->mac_discard,
3804 estats->mac_filter_discard, estats->xxoverflow_discard,
3805 estats->brb_truncate_discard,
3806 old_tclient->ttl0_discard);
3808 for_each_queue(bp, i) {
3809 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3810 bnx2x_fp(bp, i, tx_pkt),
3811 bnx2x_fp(bp, i, rx_pkt),
3812 bnx2x_fp(bp, i, rx_calls));
3816 bnx2x_hw_stats_post(bp);
3817 bnx2x_storm_stats_post(bp);
3820 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3822 struct dmae_command *dmae;
3824 int loader_idx = PMF_DMAE_C(bp);
3825 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3827 bp->executer_idx = 0;
3829 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3831 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3833 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3835 DMAE_CMD_ENDIANITY_DW_SWAP |
3837 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3838 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3840 if (bp->port.port_stx) {
3842 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3844 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3846 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3847 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3848 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3849 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3850 dmae->dst_addr_hi = 0;
3851 dmae->len = sizeof(struct host_port_stats) >> 2;
3853 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3854 dmae->comp_addr_hi = 0;
3857 dmae->comp_addr_lo =
3858 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3859 dmae->comp_addr_hi =
3860 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3861 dmae->comp_val = DMAE_COMP_VAL;
3869 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3870 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3871 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3872 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3873 dmae->dst_addr_lo = bp->func_stx >> 2;
3874 dmae->dst_addr_hi = 0;
3875 dmae->len = sizeof(struct host_func_stats) >> 2;
3876 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3877 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3878 dmae->comp_val = DMAE_COMP_VAL;
3884 static void bnx2x_stats_stop(struct bnx2x *bp)
3888 bnx2x_stats_comp(bp);
3891 update = (bnx2x_hw_stats_update(bp) == 0);
3893 update |= (bnx2x_storm_stats_update(bp) == 0);
3896 bnx2x_net_stats_update(bp);
3899 bnx2x_port_stats_stop(bp);
3901 bnx2x_hw_stats_post(bp);
3902 bnx2x_stats_comp(bp);
3906 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3910 static const struct {
3911 void (*action)(struct bnx2x *bp);
3912 enum bnx2x_stats_state next_state;
3913 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3916 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3917 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3918 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3919 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3922 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3923 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3924 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3925 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3929 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3931 enum bnx2x_stats_state state = bp->stats_state;
3933 bnx2x_stats_stm[state][event].action(bp);
3934 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3936 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3937 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3938 state, event, bp->stats_state);
3941 static void bnx2x_timer(unsigned long data)
3943 struct bnx2x *bp = (struct bnx2x *) data;
3945 if (!netif_running(bp->dev))
3948 if (atomic_read(&bp->intr_sem) != 0)
3952 struct bnx2x_fastpath *fp = &bp->fp[0];
3955 bnx2x_tx_int(fp, 1000);
3956 rc = bnx2x_rx_int(fp, 1000);
3959 if (!BP_NOMCP(bp)) {
3960 int func = BP_FUNC(bp);
3964 ++bp->fw_drv_pulse_wr_seq;
3965 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3966 /* TBD - add SYSTEM_TIME */
3967 drv_pulse = bp->fw_drv_pulse_wr_seq;
3968 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3970 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3971 MCP_PULSE_SEQ_MASK);
3972 /* The delta between driver pulse and mcp response
3973 * should be 1 (before mcp response) or 0 (after mcp response)
3975 if ((drv_pulse != mcp_pulse) &&
3976 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3977 /* someone lost a heartbeat... */
3978 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3979 drv_pulse, mcp_pulse);
3983 if ((bp->state == BNX2X_STATE_OPEN) ||
3984 (bp->state == BNX2X_STATE_DISABLED))
3985 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3988 mod_timer(&bp->timer, jiffies + bp->current_interval);
3991 /* end of Statistics */
3996 * nic init service functions
3999 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4001 int port = BP_PORT(bp);
4003 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4004 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4005 sizeof(struct ustorm_def_status_block)/4);
4006 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4007 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4008 sizeof(struct cstorm_def_status_block)/4);
4011 static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
4012 struct host_status_block *sb, dma_addr_t mapping)
4014 int port = BP_PORT(bp);
4015 int func = BP_FUNC(bp);
4020 section = ((u64)mapping) + offsetof(struct host_status_block,
4022 sb->u_status_block.status_block_id = sb_id;
4024 REG_WR(bp, BAR_USTRORM_INTMEM +
4025 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4026 REG_WR(bp, BAR_USTRORM_INTMEM +
4027 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4029 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4030 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4032 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4033 REG_WR16(bp, BAR_USTRORM_INTMEM +
4034 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4037 section = ((u64)mapping) + offsetof(struct host_status_block,
4039 sb->c_status_block.status_block_id = sb_id;
4041 REG_WR(bp, BAR_CSTRORM_INTMEM +
4042 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4043 REG_WR(bp, BAR_CSTRORM_INTMEM +
4044 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4046 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4047 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4049 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4050 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4051 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4053 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4056 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4058 int func = BP_FUNC(bp);
4060 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4061 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4062 sizeof(struct ustorm_def_status_block)/4);
4063 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4064 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4065 sizeof(struct cstorm_def_status_block)/4);
4066 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4067 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4068 sizeof(struct xstorm_def_status_block)/4);
4069 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4070 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4071 sizeof(struct tstorm_def_status_block)/4);
4074 static void bnx2x_init_def_sb(struct bnx2x *bp,
4075 struct host_def_status_block *def_sb,
4076 dma_addr_t mapping, int sb_id)
4078 int port = BP_PORT(bp);
4079 int func = BP_FUNC(bp);
4080 int index, val, reg_offset;
4084 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4085 atten_status_block);
4086 def_sb->atten_status_block.status_block_id = sb_id;
4088 bp->def_att_idx = 0;
4091 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4092 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4094 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4095 bp->attn_group[index].sig[0] = REG_RD(bp,
4096 reg_offset + 0x10*index);
4097 bp->attn_group[index].sig[1] = REG_RD(bp,
4098 reg_offset + 0x4 + 0x10*index);
4099 bp->attn_group[index].sig[2] = REG_RD(bp,
4100 reg_offset + 0x8 + 0x10*index);
4101 bp->attn_group[index].sig[3] = REG_RD(bp,
4102 reg_offset + 0xc + 0x10*index);
4105 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4106 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4108 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4109 HC_REG_ATTN_MSG0_ADDR_L);
4111 REG_WR(bp, reg_offset, U64_LO(section));
4112 REG_WR(bp, reg_offset + 4, U64_HI(section));
4114 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4116 val = REG_RD(bp, reg_offset);
4118 REG_WR(bp, reg_offset, val);
4121 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4122 u_def_status_block);
4123 def_sb->u_def_status_block.status_block_id = sb_id;
4127 REG_WR(bp, BAR_USTRORM_INTMEM +
4128 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4129 REG_WR(bp, BAR_USTRORM_INTMEM +
4130 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4132 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4133 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4134 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4137 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4138 REG_WR16(bp, BAR_USTRORM_INTMEM +
4139 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4142 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4143 c_def_status_block);
4144 def_sb->c_def_status_block.status_block_id = sb_id;
4148 REG_WR(bp, BAR_CSTRORM_INTMEM +
4149 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4150 REG_WR(bp, BAR_CSTRORM_INTMEM +
4151 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4153 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4154 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4155 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4158 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4159 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4160 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4163 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4164 t_def_status_block);
4165 def_sb->t_def_status_block.status_block_id = sb_id;
4169 REG_WR(bp, BAR_TSTRORM_INTMEM +
4170 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4171 REG_WR(bp, BAR_TSTRORM_INTMEM +
4172 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4174 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4175 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4176 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4179 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4180 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4181 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4184 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4185 x_def_status_block);
4186 def_sb->x_def_status_block.status_block_id = sb_id;
4190 REG_WR(bp, BAR_XSTRORM_INTMEM +
4191 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4192 REG_WR(bp, BAR_XSTRORM_INTMEM +
4193 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4195 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4196 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4197 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4200 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4201 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4202 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4204 bp->stats_pending = 0;
4206 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4209 static void bnx2x_update_coalesce(struct bnx2x *bp)
4211 int port = BP_PORT(bp);
4214 for_each_queue(bp, i) {
4215 int sb_id = bp->fp[i].sb_id;
4217 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4218 REG_WR8(bp, BAR_USTRORM_INTMEM +
4219 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4220 HC_INDEX_U_ETH_RX_CQ_CONS),
4222 REG_WR16(bp, BAR_USTRORM_INTMEM +
4223 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4224 HC_INDEX_U_ETH_RX_CQ_CONS),
4225 bp->rx_ticks ? 0 : 1);
4227 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4228 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4229 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4230 HC_INDEX_C_ETH_TX_CQ_CONS),
4232 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4233 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4234 HC_INDEX_C_ETH_TX_CQ_CONS),
4235 bp->tx_ticks ? 0 : 1);
4239 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4240 struct bnx2x_fastpath *fp, int last)
4244 for (i = 0; i < last; i++) {
4245 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4246 struct sk_buff *skb = rx_buf->skb;
4249 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4253 if (fp->tpa_state[i] == BNX2X_TPA_START)
4254 pci_unmap_single(bp->pdev,
4255 pci_unmap_addr(rx_buf, mapping),
4256 bp->rx_buf_use_size,
4257 PCI_DMA_FROMDEVICE);
4264 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4266 int func = BP_FUNC(bp);
4267 u16 ring_prod, cqe_ring_prod = 0;
4270 bp->rx_buf_use_size = bp->dev->mtu;
4271 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4272 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4274 if (bp->flags & TPA_ENABLE_FLAG) {
4276 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4277 bp->rx_buf_use_size, bp->rx_buf_size,
4278 bp->dev->mtu + ETH_OVREHEAD);
4280 for_each_queue(bp, j) {
4281 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4282 struct bnx2x_fastpath *fp = &bp->fp[j];
4284 fp->tpa_pool[i].skb =
4285 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4286 if (!fp->tpa_pool[i].skb) {
4287 BNX2X_ERR("Failed to allocate TPA "
4288 "skb pool for queue[%d] - "
4289 "disabling TPA on this "
4291 bnx2x_free_tpa_pool(bp, fp, i);
4292 fp->disable_tpa = 1;
4295 pci_unmap_addr_set((struct sw_rx_bd *)
4296 &bp->fp->tpa_pool[i],
4298 fp->tpa_state[i] = BNX2X_TPA_STOP;
4303 for_each_queue(bp, j) {
4304 struct bnx2x_fastpath *fp = &bp->fp[j];
4307 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4308 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4310 /* "next page" elements initialization */
4312 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4313 struct eth_rx_sge *sge;
4315 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4317 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4318 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4320 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4321 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4324 bnx2x_init_sge_ring_bit_mask(fp);
4327 for (i = 1; i <= NUM_RX_RINGS; i++) {
4328 struct eth_rx_bd *rx_bd;
4330 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4332 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4333 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4335 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4336 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4340 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4341 struct eth_rx_cqe_next_page *nextpg;
4343 nextpg = (struct eth_rx_cqe_next_page *)
4344 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4346 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4347 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4349 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4350 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4353 /* Allocate SGEs and initialize the ring elements */
4354 for (i = 0, ring_prod = 0;
4355 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4357 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4358 BNX2X_ERR("was only able to allocate "
4360 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4361 /* Cleanup already allocated elements */
4362 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4363 bnx2x_free_tpa_pool(bp, fp,
4364 ETH_MAX_AGGREGATION_QUEUES_E1H);
4365 fp->disable_tpa = 1;
4369 ring_prod = NEXT_SGE_IDX(ring_prod);
4371 fp->rx_sge_prod = ring_prod;
4373 /* Allocate BDs and initialize BD ring */
4374 fp->rx_comp_cons = fp->rx_alloc_failed = 0;
4375 cqe_ring_prod = ring_prod = 0;
4376 for (i = 0; i < bp->rx_ring_size; i++) {
4377 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4378 BNX2X_ERR("was only able to allocate "
4380 fp->rx_alloc_failed++;
4383 ring_prod = NEXT_RX_IDX(ring_prod);
4384 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4385 WARN_ON(ring_prod <= i);
4388 fp->rx_bd_prod = ring_prod;
4389 /* must not have more available CQEs than BDs */
4390 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4392 fp->rx_pkt = fp->rx_calls = 0;
4395 * this will generate an interrupt (to the TSTORM)
4396 * must only be done after chip is initialized
4398 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4403 REG_WR(bp, BAR_USTRORM_INTMEM +
4404 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4405 U64_LO(fp->rx_comp_mapping));
4406 REG_WR(bp, BAR_USTRORM_INTMEM +
4407 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4408 U64_HI(fp->rx_comp_mapping));
4412 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4416 for_each_queue(bp, j) {
4417 struct bnx2x_fastpath *fp = &bp->fp[j];
4419 for (i = 1; i <= NUM_TX_RINGS; i++) {
4420 struct eth_tx_bd *tx_bd =
4421 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4424 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4425 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4427 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4428 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4431 fp->tx_pkt_prod = 0;
4432 fp->tx_pkt_cons = 0;
4435 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4440 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4442 int func = BP_FUNC(bp);
4444 spin_lock_init(&bp->spq_lock);
4446 bp->spq_left = MAX_SPQ_PENDING;
4447 bp->spq_prod_idx = 0;
4448 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4449 bp->spq_prod_bd = bp->spq;
4450 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4452 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4453 U64_LO(bp->spq_mapping));
4455 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4456 U64_HI(bp->spq_mapping));
4458 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4462 static void bnx2x_init_context(struct bnx2x *bp)
4466 for_each_queue(bp, i) {
4467 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4468 struct bnx2x_fastpath *fp = &bp->fp[i];
4469 u8 sb_id = FP_SB_ID(fp);
4471 context->xstorm_st_context.tx_bd_page_base_hi =
4472 U64_HI(fp->tx_desc_mapping);
4473 context->xstorm_st_context.tx_bd_page_base_lo =
4474 U64_LO(fp->tx_desc_mapping);
4475 context->xstorm_st_context.db_data_addr_hi =
4476 U64_HI(fp->tx_prods_mapping);
4477 context->xstorm_st_context.db_data_addr_lo =
4478 U64_LO(fp->tx_prods_mapping);
4479 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4480 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4482 context->ustorm_st_context.common.sb_index_numbers =
4483 BNX2X_RX_SB_INDEX_NUM;
4484 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4485 context->ustorm_st_context.common.status_block_id = sb_id;
4486 context->ustorm_st_context.common.flags =
4487 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4488 context->ustorm_st_context.common.mc_alignment_size = 64;
4489 context->ustorm_st_context.common.bd_buff_size =
4490 bp->rx_buf_use_size;
4491 context->ustorm_st_context.common.bd_page_base_hi =
4492 U64_HI(fp->rx_desc_mapping);
4493 context->ustorm_st_context.common.bd_page_base_lo =
4494 U64_LO(fp->rx_desc_mapping);
4495 if (!fp->disable_tpa) {
4496 context->ustorm_st_context.common.flags |=
4497 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4498 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4499 context->ustorm_st_context.common.sge_buff_size =
4500 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4501 context->ustorm_st_context.common.sge_page_base_hi =
4502 U64_HI(fp->rx_sge_mapping);
4503 context->ustorm_st_context.common.sge_page_base_lo =
4504 U64_LO(fp->rx_sge_mapping);
4507 context->cstorm_st_context.sb_index_number =
4508 HC_INDEX_C_ETH_TX_CQ_CONS;
4509 context->cstorm_st_context.status_block_id = sb_id;
4511 context->xstorm_ag_context.cdu_reserved =
4512 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4513 CDU_REGION_NUMBER_XCM_AG,
4514 ETH_CONNECTION_TYPE);
4515 context->ustorm_ag_context.cdu_usage =
4516 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4517 CDU_REGION_NUMBER_UCM_AG,
4518 ETH_CONNECTION_TYPE);
4522 static void bnx2x_init_ind_table(struct bnx2x *bp)
4524 int port = BP_PORT(bp);
4530 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4531 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4532 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4533 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4534 i % bp->num_queues);
4536 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4539 static void bnx2x_set_client_config(struct bnx2x *bp)
4541 struct tstorm_eth_client_config tstorm_client = {0};
4542 int port = BP_PORT(bp);
4545 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4546 tstorm_client.statistics_counter_id = 0;
4547 tstorm_client.config_flags =
4548 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4550 if (bp->rx_mode && bp->vlgrp) {
4551 tstorm_client.config_flags |=
4552 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4553 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4557 if (bp->flags & TPA_ENABLE_FLAG) {
4558 tstorm_client.max_sges_for_packet =
4559 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4560 tstorm_client.max_sges_for_packet =
4561 ((tstorm_client.max_sges_for_packet +
4562 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4563 PAGES_PER_SGE_SHIFT;
4565 tstorm_client.config_flags |=
4566 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4569 for_each_queue(bp, i) {
4570 REG_WR(bp, BAR_TSTRORM_INTMEM +
4571 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4572 ((u32 *)&tstorm_client)[0]);
4573 REG_WR(bp, BAR_TSTRORM_INTMEM +
4574 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4575 ((u32 *)&tstorm_client)[1]);
4578 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4579 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4582 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4584 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4585 int mode = bp->rx_mode;
4586 int mask = (1 << BP_L_ID(bp));
4587 int func = BP_FUNC(bp);
4590 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4593 case BNX2X_RX_MODE_NONE: /* no Rx */
4594 tstorm_mac_filter.ucast_drop_all = mask;
4595 tstorm_mac_filter.mcast_drop_all = mask;
4596 tstorm_mac_filter.bcast_drop_all = mask;
4598 case BNX2X_RX_MODE_NORMAL:
4599 tstorm_mac_filter.bcast_accept_all = mask;
4601 case BNX2X_RX_MODE_ALLMULTI:
4602 tstorm_mac_filter.mcast_accept_all = mask;
4603 tstorm_mac_filter.bcast_accept_all = mask;
4605 case BNX2X_RX_MODE_PROMISC:
4606 tstorm_mac_filter.ucast_accept_all = mask;
4607 tstorm_mac_filter.mcast_accept_all = mask;
4608 tstorm_mac_filter.bcast_accept_all = mask;
4611 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4615 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4616 REG_WR(bp, BAR_TSTRORM_INTMEM +
4617 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4618 ((u32 *)&tstorm_mac_filter)[i]);
4620 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4621 ((u32 *)&tstorm_mac_filter)[i]); */
4624 if (mode != BNX2X_RX_MODE_NONE)
4625 bnx2x_set_client_config(bp);
4628 static void bnx2x_init_internal(struct bnx2x *bp)
4630 struct tstorm_eth_function_common_config tstorm_config = {0};
4631 struct stats_indication_flags stats_flags = {0};
4632 int port = BP_PORT(bp);
4633 int func = BP_FUNC(bp);
4637 tstorm_config.config_flags = MULTI_FLAGS;
4638 tstorm_config.rss_result_mask = MULTI_MASK;
4641 tstorm_config.leading_client_id = BP_L_ID(bp);
4643 REG_WR(bp, BAR_TSTRORM_INTMEM +
4644 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4645 (*(u32 *)&tstorm_config));
4647 /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4648 (*(u32 *)&tstorm_config)); */
4650 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4651 bnx2x_set_storm_rx_mode(bp);
4653 stats_flags.collect_eth = 1;
4655 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
4656 ((u32 *)&stats_flags)[0]);
4657 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
4658 ((u32 *)&stats_flags)[1]);
4660 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
4661 ((u32 *)&stats_flags)[0]);
4662 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
4663 ((u32 *)&stats_flags)[1]);
4665 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
4666 ((u32 *)&stats_flags)[0]);
4667 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
4668 ((u32 *)&stats_flags)[1]);
4670 /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
4671 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
4673 if (CHIP_IS_E1H(bp)) {
4674 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4676 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4678 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4680 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4683 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4687 /* Zero this manualy as its initialization is
4688 currently missing in the initTool */
4689 for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++)
4690 REG_WR(bp, BAR_USTRORM_INTMEM +
4691 USTORM_AGG_DATA_OFFSET + 4*i, 0);
4693 for_each_queue(bp, i) {
4694 struct bnx2x_fastpath *fp = &bp->fp[i];
4697 REG_WR(bp, BAR_USTRORM_INTMEM +
4698 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4699 U64_LO(fp->rx_comp_mapping));
4700 REG_WR(bp, BAR_USTRORM_INTMEM +
4701 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4702 U64_HI(fp->rx_comp_mapping));
4704 max_agg_size = min((u32)(bp->rx_buf_use_size +
4705 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4707 REG_WR16(bp, BAR_USTRORM_INTMEM +
4708 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4713 static void bnx2x_nic_init(struct bnx2x *bp)
4717 for_each_queue(bp, i) {
4718 struct bnx2x_fastpath *fp = &bp->fp[i];
4721 fp->state = BNX2X_FP_STATE_CLOSED;
4723 fp->cl_id = BP_L_ID(bp) + i;
4724 fp->sb_id = fp->cl_id;
4726 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4727 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4728 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4729 fp->status_blk_mapping);
4732 bnx2x_init_def_sb(bp, bp->def_status_blk,
4733 bp->def_status_blk_mapping, DEF_SB_ID);
4734 bnx2x_update_coalesce(bp);
4735 bnx2x_init_rx_rings(bp);
4736 bnx2x_init_tx_ring(bp);
4737 bnx2x_init_sp_ring(bp);
4738 bnx2x_init_context(bp);
4739 bnx2x_init_internal(bp);
4740 bnx2x_storm_stats_init(bp);
4741 bnx2x_init_ind_table(bp);
4742 bnx2x_int_enable(bp);
4745 /* end of nic init */
4748 * gzip service functions
4751 static int bnx2x_gunzip_init(struct bnx2x *bp)
4753 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4754 &bp->gunzip_mapping);
4755 if (bp->gunzip_buf == NULL)
4758 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4759 if (bp->strm == NULL)
4762 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4764 if (bp->strm->workspace == NULL)
4774 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4775 bp->gunzip_mapping);
4776 bp->gunzip_buf = NULL;
4779 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4780 " un-compression\n", bp->dev->name);
4784 static void bnx2x_gunzip_end(struct bnx2x *bp)
4786 kfree(bp->strm->workspace);
4791 if (bp->gunzip_buf) {
4792 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4793 bp->gunzip_mapping);
4794 bp->gunzip_buf = NULL;
4798 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4802 /* check gzip header */
4803 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4810 if (zbuf[3] & FNAME)
4811 while ((zbuf[n++] != 0) && (n < len));
4813 bp->strm->next_in = zbuf + n;
4814 bp->strm->avail_in = len - n;
4815 bp->strm->next_out = bp->gunzip_buf;
4816 bp->strm->avail_out = FW_BUF_SIZE;
4818 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4822 rc = zlib_inflate(bp->strm, Z_FINISH);
4823 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4824 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4825 bp->dev->name, bp->strm->msg);
4827 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4828 if (bp->gunzip_outlen & 0x3)
4829 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4830 " gunzip_outlen (%d) not aligned\n",
4831 bp->dev->name, bp->gunzip_outlen);
4832 bp->gunzip_outlen >>= 2;
4834 zlib_inflateEnd(bp->strm);
4836 if (rc == Z_STREAM_END)
4842 /* nic load/unload */
4845 * General service functions
4848 /* send a NIG loopback debug packet */
4849 static void bnx2x_lb_pckt(struct bnx2x *bp)
4853 /* Ethernet source and destination addresses */
4854 wb_write[0] = 0x55555555;
4855 wb_write[1] = 0x55555555;
4856 wb_write[2] = 0x20; /* SOP */
4857 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4859 /* NON-IP protocol */
4860 wb_write[0] = 0x09000000;
4861 wb_write[1] = 0x55555555;
4862 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4863 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4866 /* some of the internal memories
4867 * are not directly readable from the driver
4868 * to test them we send debug packets
4870 static int bnx2x_int_mem_test(struct bnx2x *bp)
4876 if (CHIP_REV_IS_FPGA(bp))
4878 else if (CHIP_REV_IS_EMUL(bp))
4883 DP(NETIF_MSG_HW, "start part1\n");
4885 /* Disable inputs of parser neighbor blocks */
4886 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4887 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4888 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4889 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4891 /* Write 0 to parser credits for CFC search request */
4892 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4894 /* send Ethernet packet */
4897 /* TODO do i reset NIG statistic? */
4898 /* Wait until NIG register shows 1 packet of size 0x10 */
4899 count = 1000 * factor;
4902 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4903 val = *bnx2x_sp(bp, wb_data[0]);
4911 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4915 /* Wait until PRS register shows 1 packet */
4916 count = 1000 * factor;
4918 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4926 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4930 /* Reset and init BRB, PRS */
4931 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4933 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4935 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4936 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4938 DP(NETIF_MSG_HW, "part2\n");
4940 /* Disable inputs of parser neighbor blocks */
4941 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4942 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4943 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4944 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4946 /* Write 0 to parser credits for CFC search request */
4947 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4949 /* send 10 Ethernet packets */
4950 for (i = 0; i < 10; i++)
4953 /* Wait until NIG register shows 10 + 1
4954 packets of size 11*0x10 = 0xb0 */
4955 count = 1000 * factor;
4958 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4959 val = *bnx2x_sp(bp, wb_data[0]);
4967 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4971 /* Wait until PRS register shows 2 packets */
4972 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4974 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4976 /* Write 1 to parser credits for CFC search request */
4977 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4979 /* Wait until PRS register shows 3 packets */
4980 msleep(10 * factor);
4981 /* Wait until NIG register shows 1 packet of size 0x10 */
4982 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4984 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4986 /* clear NIG EOP FIFO */
4987 for (i = 0; i < 11; i++)
4988 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4989 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4991 BNX2X_ERR("clear of NIG failed\n");
4995 /* Reset and init BRB, PRS, NIG */
4996 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4998 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5000 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5001 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5004 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5007 /* Enable inputs of parser neighbor blocks */
5008 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5009 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5010 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5011 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5013 DP(NETIF_MSG_HW, "done\n");
5018 static void enable_blocks_attention(struct bnx2x *bp)
5020 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5021 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5022 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5023 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5024 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5025 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5026 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5027 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5028 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5029 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5030 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5031 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5032 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5033 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5034 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5035 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5036 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5037 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5038 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5039 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5040 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5041 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5042 if (CHIP_REV_IS_FPGA(bp))
5043 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5045 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5046 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5047 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5048 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5049 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5050 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5051 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5052 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5053 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5054 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5058 static int bnx2x_init_common(struct bnx2x *bp)
5062 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5064 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5065 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5067 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5068 if (CHIP_IS_E1H(bp))
5069 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5071 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5073 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5075 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5076 if (CHIP_IS_E1(bp)) {
5077 /* enable HW interrupt from PXP on USDM overflow
5078 bit 16 on INT_MASK_0 */
5079 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5082 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5086 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5087 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5088 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5089 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5090 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5091 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5093 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5094 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5095 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5096 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5097 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5102 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5105 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5107 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5108 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5109 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5112 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5113 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5115 /* let the HW do it's magic ... */
5117 /* finish PXP init */
5118 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5120 BNX2X_ERR("PXP2 CFG failed\n");
5123 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5125 BNX2X_ERR("PXP2 RD_INIT failed\n");
5129 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5130 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5132 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5134 /* clean the DMAE memory */
5136 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5138 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5139 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5140 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5141 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5143 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5144 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5145 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5146 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5148 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5149 /* soft reset pulse */
5150 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5151 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5154 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5157 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5158 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5159 if (!CHIP_REV_IS_SLOW(bp)) {
5160 /* enable hw interrupt from doorbell Q */
5161 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5164 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5165 if (CHIP_REV_IS_SLOW(bp)) {
5166 /* fix for emulation and FPGA for no pause */
5167 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5168 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5169 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5170 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5173 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5174 if (CHIP_IS_E1H(bp))
5175 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5177 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5178 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5179 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5180 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5182 if (CHIP_IS_E1H(bp)) {
5183 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5184 STORM_INTMEM_SIZE_E1H/2);
5186 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5187 0, STORM_INTMEM_SIZE_E1H/2);
5188 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5189 STORM_INTMEM_SIZE_E1H/2);
5191 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5192 0, STORM_INTMEM_SIZE_E1H/2);
5193 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5194 STORM_INTMEM_SIZE_E1H/2);
5196 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5197 0, STORM_INTMEM_SIZE_E1H/2);
5198 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5199 STORM_INTMEM_SIZE_E1H/2);
5201 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5202 0, STORM_INTMEM_SIZE_E1H/2);
5204 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5205 STORM_INTMEM_SIZE_E1);
5206 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5207 STORM_INTMEM_SIZE_E1);
5208 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5209 STORM_INTMEM_SIZE_E1);
5210 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5211 STORM_INTMEM_SIZE_E1);
5214 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5215 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5216 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5217 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5220 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5222 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5225 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5226 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5227 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5229 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5230 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5231 REG_WR(bp, i, 0xc0cac01a);
5232 /* TODO: replace with something meaningful */
5234 if (CHIP_IS_E1H(bp))
5235 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5236 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5238 if (sizeof(union cdu_context) != 1024)
5239 /* we currently assume that a context is 1024 bytes */
5240 printk(KERN_ALERT PFX "please adjust the size of"
5241 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5243 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5244 val = (4 << 24) + (0 << 12) + 1024;
5245 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5246 if (CHIP_IS_E1(bp)) {
5247 /* !!! fix pxp client crdit until excel update */
5248 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5249 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5252 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5253 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5255 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5256 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5258 /* PXPCS COMMON comes here */
5259 /* Reset PCIE errors for debug */
5260 REG_WR(bp, 0x2814, 0xffffffff);
5261 REG_WR(bp, 0x3820, 0xffffffff);
5263 /* EMAC0 COMMON comes here */
5264 /* EMAC1 COMMON comes here */
5265 /* DBU COMMON comes here */
5266 /* DBG COMMON comes here */
5268 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5269 if (CHIP_IS_E1H(bp)) {
5270 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5271 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5274 if (CHIP_REV_IS_SLOW(bp))
5277 /* finish CFC init */
5278 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5280 BNX2X_ERR("CFC LL_INIT failed\n");
5283 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5285 BNX2X_ERR("CFC AC_INIT failed\n");
5288 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5290 BNX2X_ERR("CFC CAM_INIT failed\n");
5293 REG_WR(bp, CFC_REG_DEBUG0, 0);
5295 /* read NIG statistic
5296 to see if this is our first up since powerup */
5297 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5298 val = *bnx2x_sp(bp, wb_data[0]);
5300 /* do internal memory self test */
5301 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5302 BNX2X_ERR("internal mem self test failed\n");
5306 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5307 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5308 /* Fan failure is indicated by SPIO 5 */
5309 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5310 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5312 /* set to active low mode */
5313 val = REG_RD(bp, MISC_REG_SPIO_INT);
5314 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5315 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5316 REG_WR(bp, MISC_REG_SPIO_INT, val);
5318 /* enable interrupt to signal the IGU */
5319 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5320 val |= (1 << MISC_REGISTERS_SPIO_5);
5321 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5328 /* clear PXP2 attentions */
5329 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5331 enable_blocks_attention(bp);
5333 if (bp->flags & TPA_ENABLE_FLAG) {
5334 struct tstorm_eth_tpa_exist tmp = {0};
5338 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5340 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5347 static int bnx2x_init_port(struct bnx2x *bp)
5349 int port = BP_PORT(bp);
5352 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5354 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5356 /* Port PXP comes here */
5357 /* Port PXP2 comes here */
5362 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5363 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5364 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5365 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5370 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5371 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5372 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5373 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5378 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5379 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5380 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5381 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5383 /* Port CMs come here */
5385 /* Port QM comes here */
5387 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5388 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5390 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5391 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5393 /* Port DQ comes here */
5394 /* Port BRB1 comes here */
5395 /* Port PRS comes here */
5396 /* Port TSDM comes here */
5397 /* Port CSDM comes here */
5398 /* Port USDM comes here */
5399 /* Port XSDM comes here */
5400 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5401 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5402 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5403 port ? USEM_PORT1_END : USEM_PORT0_END);
5404 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5405 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5406 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5407 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5408 /* Port UPB comes here */
5409 /* Port XPB comes here */
5411 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5412 port ? PBF_PORT1_END : PBF_PORT0_END);
5414 /* configure PBF to work without PAUSE mtu 9000 */
5415 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5417 /* update threshold */
5418 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5419 /* update init credit */
5420 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5423 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5425 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5428 /* tell the searcher where the T2 table is */
5429 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5431 wb_write[0] = U64_LO(bp->t2_mapping);
5432 wb_write[1] = U64_HI(bp->t2_mapping);
5433 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5434 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5435 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5436 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5438 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5439 /* Port SRCH comes here */
5441 /* Port CDU comes here */
5442 /* Port CFC comes here */
5444 if (CHIP_IS_E1(bp)) {
5445 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5446 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5448 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5449 port ? HC_PORT1_END : HC_PORT0_END);
5451 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5452 MISC_AEU_PORT0_START,
5453 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5454 /* init aeu_mask_attn_func_0/1:
5455 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5456 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5457 * bits 4-7 are used for "per vn group attention" */
5458 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5459 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5461 /* Port PXPCS comes here */
5462 /* Port EMAC0 comes here */
5463 /* Port EMAC1 comes here */
5464 /* Port DBU comes here */
5465 /* Port DBG comes here */
5466 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5467 port ? NIG_PORT1_END : NIG_PORT0_END);
5469 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5471 if (CHIP_IS_E1H(bp)) {
5473 struct cmng_struct_per_port m_cmng_port;
5476 /* 0x2 disable e1hov, 0x1 enable */
5477 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5478 (IS_E1HMF(bp) ? 0x1 : 0x2));
5480 /* Init RATE SHAPING and FAIRNESS contexts.
5481 Initialize as if there is 10G link. */
5482 wsum = bnx2x_calc_vn_wsum(bp);
5483 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5485 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5486 bnx2x_init_vn_minmax(bp, 2*vn + port,
5487 wsum, 10000, &m_cmng_port);
5490 /* Port MCP comes here */
5491 /* Port DMAE comes here */
5493 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5494 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5495 /* add SPIO 5 to group 0 */
5496 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5497 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5498 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5505 bnx2x__link_reset(bp);
5510 #define ILT_PER_FUNC (768/2)
5511 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5512 /* the phys address is shifted right 12 bits and has an added
5513 1=valid bit added to the 53rd bit
5514 then since this is a wide register(TM)
5515 we split it into two 32 bit writes
5517 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5518 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5519 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5520 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5522 #define CNIC_ILT_LINES 0
5524 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5528 if (CHIP_IS_E1H(bp))
5529 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5531 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5533 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5536 static int bnx2x_init_func(struct bnx2x *bp)
5538 int port = BP_PORT(bp);
5539 int func = BP_FUNC(bp);
5542 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5544 i = FUNC_ILT_BASE(func);
5546 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5547 if (CHIP_IS_E1H(bp)) {
5548 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5549 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5551 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5552 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5555 if (CHIP_IS_E1H(bp)) {
5556 for (i = 0; i < 9; i++)
5557 bnx2x_init_block(bp,
5558 cm_start[func][i], cm_end[func][i]);
5560 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5561 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5564 /* HC init per function */
5565 if (CHIP_IS_E1H(bp)) {
5566 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5568 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5569 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5571 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5573 if (CHIP_IS_E1H(bp))
5574 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5576 /* Reset PCIE errors for debug */
5577 REG_WR(bp, 0x2114, 0xffffffff);
5578 REG_WR(bp, 0x2120, 0xffffffff);
5583 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5587 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5588 BP_FUNC(bp), load_code);
5591 mutex_init(&bp->dmae_mutex);
5592 bnx2x_gunzip_init(bp);
5594 switch (load_code) {
5595 case FW_MSG_CODE_DRV_LOAD_COMMON:
5596 rc = bnx2x_init_common(bp);
5601 case FW_MSG_CODE_DRV_LOAD_PORT:
5603 rc = bnx2x_init_port(bp);
5608 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5610 rc = bnx2x_init_func(bp);
5616 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5620 if (!BP_NOMCP(bp)) {
5621 int func = BP_FUNC(bp);
5623 bp->fw_drv_pulse_wr_seq =
5624 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5625 DRV_PULSE_SEQ_MASK);
5626 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5627 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5628 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5632 /* this needs to be done before gunzip end */
5633 bnx2x_zero_def_sb(bp);
5634 for_each_queue(bp, i)
5635 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5638 bnx2x_gunzip_end(bp);
5643 /* send the MCP a request, block until there is a reply */
5644 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5646 int func = BP_FUNC(bp);
5647 u32 seq = ++bp->fw_seq;
5650 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5652 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5653 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5656 /* let the FW do it's magic ... */
5659 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5661 /* Give the FW up to 2 second (200*10ms) */
5662 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5664 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5665 cnt*delay, rc, seq);
5667 /* is this a reply to our command? */
5668 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5669 rc &= FW_MSG_CODE_MASK;
5673 BNX2X_ERR("FW failed to respond!\n");
5681 static void bnx2x_free_mem(struct bnx2x *bp)
5684 #define BNX2X_PCI_FREE(x, y, size) \
5687 pci_free_consistent(bp->pdev, size, x, y); \
5693 #define BNX2X_FREE(x) \
5704 for_each_queue(bp, i) {
5707 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5708 bnx2x_fp(bp, i, status_blk_mapping),
5709 sizeof(struct host_status_block) +
5710 sizeof(struct eth_tx_db_data));
5712 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5713 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5714 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5715 bnx2x_fp(bp, i, tx_desc_mapping),
5716 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5718 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5719 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5720 bnx2x_fp(bp, i, rx_desc_mapping),
5721 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5723 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5724 bnx2x_fp(bp, i, rx_comp_mapping),
5725 sizeof(struct eth_fast_path_rx_cqe) *
5729 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5730 bnx2x_fp(bp, i, rx_sge_mapping),
5731 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5733 /* end of fastpath */
5735 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5736 sizeof(struct host_def_status_block));
5738 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5739 sizeof(struct bnx2x_slowpath));
5742 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5743 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5744 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5745 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5747 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5749 #undef BNX2X_PCI_FREE
5753 static int bnx2x_alloc_mem(struct bnx2x *bp)
5756 #define BNX2X_PCI_ALLOC(x, y, size) \
5758 x = pci_alloc_consistent(bp->pdev, size, y); \
5760 goto alloc_mem_err; \
5761 memset(x, 0, size); \
5764 #define BNX2X_ALLOC(x, size) \
5766 x = vmalloc(size); \
5768 goto alloc_mem_err; \
5769 memset(x, 0, size); \
5775 for_each_queue(bp, i) {
5776 bnx2x_fp(bp, i, bp) = bp;
5779 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5780 &bnx2x_fp(bp, i, status_blk_mapping),
5781 sizeof(struct host_status_block) +
5782 sizeof(struct eth_tx_db_data));
5784 bnx2x_fp(bp, i, hw_tx_prods) =
5785 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5787 bnx2x_fp(bp, i, tx_prods_mapping) =
5788 bnx2x_fp(bp, i, status_blk_mapping) +
5789 sizeof(struct host_status_block);
5791 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5792 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5793 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5794 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5795 &bnx2x_fp(bp, i, tx_desc_mapping),
5796 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5798 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5799 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5800 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5801 &bnx2x_fp(bp, i, rx_desc_mapping),
5802 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5804 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5805 &bnx2x_fp(bp, i, rx_comp_mapping),
5806 sizeof(struct eth_fast_path_rx_cqe) *
5810 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5811 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5812 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5813 &bnx2x_fp(bp, i, rx_sge_mapping),
5814 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5816 /* end of fastpath */
5818 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5819 sizeof(struct host_def_status_block));
5821 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5822 sizeof(struct bnx2x_slowpath));
5825 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5828 for (i = 0; i < 64*1024; i += 64) {
5829 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5830 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5833 /* allocate searcher T2 table
5834 we allocate 1/4 of alloc num for T2
5835 (which is not entered into the ILT) */
5836 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5839 for (i = 0; i < 16*1024; i += 64)
5840 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5842 /* now fixup the last line in the block to point to the next block */
5843 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5845 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5846 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5848 /* QM queues (128*MAX_CONN) */
5849 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5852 /* Slow path ring */
5853 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5861 #undef BNX2X_PCI_ALLOC
5865 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5869 for_each_queue(bp, i) {
5870 struct bnx2x_fastpath *fp = &bp->fp[i];
5872 u16 bd_cons = fp->tx_bd_cons;
5873 u16 sw_prod = fp->tx_pkt_prod;
5874 u16 sw_cons = fp->tx_pkt_cons;
5876 while (sw_cons != sw_prod) {
5877 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5883 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5887 for_each_queue(bp, j) {
5888 struct bnx2x_fastpath *fp = &bp->fp[j];
5890 for (i = 0; i < NUM_RX_BD; i++) {
5891 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5892 struct sk_buff *skb = rx_buf->skb;
5897 pci_unmap_single(bp->pdev,
5898 pci_unmap_addr(rx_buf, mapping),
5899 bp->rx_buf_use_size,
5900 PCI_DMA_FROMDEVICE);
5905 if (!fp->disable_tpa)
5906 bnx2x_free_tpa_pool(bp, fp,
5907 ETH_MAX_AGGREGATION_QUEUES_E1H);
5911 static void bnx2x_free_skbs(struct bnx2x *bp)
5913 bnx2x_free_tx_skbs(bp);
5914 bnx2x_free_rx_skbs(bp);
5917 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5921 free_irq(bp->msix_table[0].vector, bp->dev);
5922 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5923 bp->msix_table[0].vector);
5925 for_each_queue(bp, i) {
5926 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5927 "state %x\n", i, bp->msix_table[i + offset].vector,
5928 bnx2x_fp(bp, i, state));
5930 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5931 BNX2X_ERR("IRQ of fp #%d being freed while "
5932 "state != closed\n", i);
5934 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5938 static void bnx2x_free_irq(struct bnx2x *bp)
5940 if (bp->flags & USING_MSIX_FLAG) {
5941 bnx2x_free_msix_irqs(bp);
5942 pci_disable_msix(bp->pdev);
5943 bp->flags &= ~USING_MSIX_FLAG;
5946 free_irq(bp->pdev->irq, bp->dev);
5949 static int bnx2x_enable_msix(struct bnx2x *bp)
5953 bp->msix_table[0].entry = 0;
5955 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5957 for_each_queue(bp, i) {
5958 int igu_vec = offset + i + BP_L_ID(bp);
5960 bp->msix_table[i + offset].entry = igu_vec;
5961 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5962 "(fastpath #%u)\n", i + offset, igu_vec, i);
5965 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5966 bp->num_queues + offset);
5968 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
5971 bp->flags |= USING_MSIX_FLAG;
5976 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
5978 int i, rc, offset = 1;
5980 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
5981 bp->dev->name, bp->dev);
5983 BNX2X_ERR("request sp irq failed\n");
5987 for_each_queue(bp, i) {
5988 rc = request_irq(bp->msix_table[i + offset].vector,
5989 bnx2x_msix_fp_int, 0,
5990 bp->dev->name, &bp->fp[i]);
5992 BNX2X_ERR("request fp #%d irq failed rc %d\n",
5994 bnx2x_free_msix_irqs(bp);
5998 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6004 static int bnx2x_req_irq(struct bnx2x *bp)
6008 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6009 bp->dev->name, bp->dev);
6011 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6017 * Init service functions
6020 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6022 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6023 int port = BP_PORT(bp);
6026 * unicasts 0-31:port0 32-63:port1
6027 * multicast 64-127:port0 128-191:port1
6029 config->hdr.length_6b = 2;
6030 config->hdr.offset = port ? 31 : 0;
6031 config->hdr.client_id = BP_CL_ID(bp);
6032 config->hdr.reserved1 = 0;
6035 config->config_table[0].cam_entry.msb_mac_addr =
6036 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6037 config->config_table[0].cam_entry.middle_mac_addr =
6038 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6039 config->config_table[0].cam_entry.lsb_mac_addr =
6040 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6041 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6042 config->config_table[0].target_table_entry.flags = 0;
6043 config->config_table[0].target_table_entry.client_id = 0;
6044 config->config_table[0].target_table_entry.vlan_id = 0;
6046 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6047 config->config_table[0].cam_entry.msb_mac_addr,
6048 config->config_table[0].cam_entry.middle_mac_addr,
6049 config->config_table[0].cam_entry.lsb_mac_addr);
6052 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6053 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6054 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6055 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6056 config->config_table[1].target_table_entry.flags =
6057 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6058 config->config_table[1].target_table_entry.client_id = 0;
6059 config->config_table[1].target_table_entry.vlan_id = 0;
6061 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6062 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6063 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6066 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6068 struct mac_configuration_cmd_e1h *config =
6069 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6071 if (bp->state != BNX2X_STATE_OPEN) {
6072 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6076 /* CAM allocation for E1H
6077 * unicasts: by func number
6078 * multicast: 20+FUNC*20, 20 each
6080 config->hdr.length_6b = 1;
6081 config->hdr.offset = BP_FUNC(bp);
6082 config->hdr.client_id = BP_CL_ID(bp);
6083 config->hdr.reserved1 = 0;
6086 config->config_table[0].msb_mac_addr =
6087 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6088 config->config_table[0].middle_mac_addr =
6089 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6090 config->config_table[0].lsb_mac_addr =
6091 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6092 config->config_table[0].client_id = BP_L_ID(bp);
6093 config->config_table[0].vlan_id = 0;
6094 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6095 config->config_table[0].flags = BP_PORT(bp);
6097 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6098 config->config_table[0].msb_mac_addr,
6099 config->config_table[0].middle_mac_addr,
6100 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6102 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6103 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6104 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6107 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6108 int *state_p, int poll)
6110 /* can take a while if any port is running */
6113 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6114 poll ? "polling" : "waiting", state, idx);
6119 bnx2x_rx_int(bp->fp, 10);
6120 /* if index is different from 0
6121 * the reply for some commands will
6122 * be on the none default queue
6125 bnx2x_rx_int(&bp->fp[idx], 10);
6127 mb(); /* state is changed by bnx2x_sp_event() */
6129 if (*state_p == state)
6136 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6137 poll ? "polling" : "waiting", state, idx);
6138 #ifdef BNX2X_STOP_ON_ERROR
6145 static int bnx2x_setup_leading(struct bnx2x *bp)
6149 /* reset IGU state */
6150 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6153 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6155 /* Wait for completion */
6156 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6161 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6163 /* reset IGU state */
6164 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6167 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6168 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6170 /* Wait for completion */
6171 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6172 &(bp->fp[index].state), 0);
6175 static int bnx2x_poll(struct napi_struct *napi, int budget);
6176 static void bnx2x_set_rx_mode(struct net_device *dev);
6178 /* must be called with rtnl_lock */
6179 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6184 #ifdef BNX2X_STOP_ON_ERROR
6185 if (unlikely(bp->panic))
6189 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6191 /* Send LOAD_REQUEST command to MCP
6192 Returns the type of LOAD command:
6193 if it is the first port to be initialized
6194 common blocks should be initialized, otherwise - not
6196 if (!BP_NOMCP(bp)) {
6197 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6199 BNX2X_ERR("MCP response failure, unloading\n");
6202 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6203 return -EBUSY; /* other port in diagnostic mode */
6206 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6207 load_count[0], load_count[1], load_count[2]);
6209 load_count[1 + BP_PORT(bp)]++;
6210 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6211 load_count[0], load_count[1], load_count[2]);
6212 if (load_count[0] == 1)
6213 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6214 else if (load_count[1 + BP_PORT(bp)] == 1)
6215 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6217 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6220 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6221 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6225 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6227 /* if we can't use MSI-X we only need one fp,
6228 * so try to enable MSI-X with the requested number of fp's
6229 * and fallback to inta with one fp
6235 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6236 /* user requested number */
6237 bp->num_queues = use_multi;
6240 bp->num_queues = min_t(u32, num_online_cpus(),
6245 if (bnx2x_enable_msix(bp)) {
6246 /* failed to enable MSI-X */
6249 BNX2X_ERR("Multi requested but failed"
6250 " to enable MSI-X\n");
6254 "set number of queues to %d\n", bp->num_queues);
6256 if (bnx2x_alloc_mem(bp))
6259 for_each_queue(bp, i)
6260 bnx2x_fp(bp, i, disable_tpa) =
6261 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6263 /* Disable interrupt handling until HW is initialized */
6264 atomic_set(&bp->intr_sem, 1);
6266 if (bp->flags & USING_MSIX_FLAG) {
6267 rc = bnx2x_req_msix_irqs(bp);
6269 pci_disable_msix(bp->pdev);
6274 rc = bnx2x_req_irq(bp);
6276 BNX2X_ERR("IRQ request failed, aborting\n");
6281 for_each_queue(bp, i)
6282 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6286 rc = bnx2x_init_hw(bp, load_code);
6288 BNX2X_ERR("HW init failed, aborting\n");
6292 /* Enable interrupt handling */
6293 atomic_set(&bp->intr_sem, 0);
6295 /* Setup NIC internals and enable interrupts */
6298 /* Send LOAD_DONE command to MCP */
6299 if (!BP_NOMCP(bp)) {
6300 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6302 BNX2X_ERR("MCP response failure, unloading\n");
6304 goto load_int_disable;
6308 bnx2x_stats_init(bp);
6310 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6312 /* Enable Rx interrupt handling before sending the ramrod
6313 as it's completed on Rx FP queue */
6314 for_each_queue(bp, i)
6315 napi_enable(&bnx2x_fp(bp, i, napi));
6317 rc = bnx2x_setup_leading(bp);
6319 #ifdef BNX2X_STOP_ON_ERROR
6322 goto load_stop_netif;
6325 if (CHIP_IS_E1H(bp))
6326 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6327 BNX2X_ERR("!!! mf_cfg function disabled\n");
6328 bp->state = BNX2X_STATE_DISABLED;
6331 if (bp->state == BNX2X_STATE_OPEN)
6332 for_each_nondefault_queue(bp, i) {
6333 rc = bnx2x_setup_multi(bp, i);
6335 goto load_stop_netif;
6339 bnx2x_set_mac_addr_e1(bp);
6341 bnx2x_set_mac_addr_e1h(bp);
6344 bnx2x_initial_phy_init(bp);
6346 /* Start fast path */
6347 switch (load_mode) {
6349 /* Tx queue should be only reenabled */
6350 netif_wake_queue(bp->dev);
6351 bnx2x_set_rx_mode(bp->dev);
6355 /* IRQ is only requested from bnx2x_open */
6356 netif_start_queue(bp->dev);
6357 bnx2x_set_rx_mode(bp->dev);
6358 if (bp->flags & USING_MSIX_FLAG)
6359 printk(KERN_INFO PFX "%s: using MSI-X\n",
6364 bnx2x_set_rx_mode(bp->dev);
6365 bp->state = BNX2X_STATE_DIAG;
6373 bnx2x__link_status_update(bp);
6375 /* start the timer */
6376 mod_timer(&bp->timer, jiffies + bp->current_interval);
6382 for_each_queue(bp, i)
6383 napi_disable(&bnx2x_fp(bp, i, napi));
6386 bnx2x_int_disable_sync(bp);
6391 /* Free SKBs, SGEs, TPA pool and driver internals */
6392 bnx2x_free_skbs(bp);
6393 for_each_queue(bp, i)
6394 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6395 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6399 /* TBD we really need to reset the chip
6400 if we want to recover from this */
6404 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6408 /* halt the connection */
6409 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6410 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6412 /* Wait for completion */
6413 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6414 &(bp->fp[index].state), 1);
6415 if (rc) /* timeout */
6418 /* delete cfc entry */
6419 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6421 /* Wait for completion */
6422 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6423 &(bp->fp[index].state), 1);
6427 static void bnx2x_stop_leading(struct bnx2x *bp)
6429 u16 dsb_sp_prod_idx;
6430 /* if the other port is handling traffic,
6431 this can take a lot of time */
6437 /* Send HALT ramrod */
6438 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6439 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6441 /* Wait for completion */
6442 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6443 &(bp->fp[0].state), 1);
6444 if (rc) /* timeout */
6447 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6449 /* Send PORT_DELETE ramrod */
6450 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6452 /* Wait for completion to arrive on default status block
6453 we are going to reset the chip anyway
6454 so there is not much to do if this times out
6456 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6459 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6460 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6461 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6462 #ifdef BNX2X_STOP_ON_ERROR
6469 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6470 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6473 static void bnx2x_reset_func(struct bnx2x *bp)
6475 int port = BP_PORT(bp);
6476 int func = BP_FUNC(bp);
6480 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6481 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6483 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6486 base = FUNC_ILT_BASE(func);
6487 for (i = base; i < base + ILT_PER_FUNC; i++)
6488 bnx2x_ilt_wr(bp, i, 0);
6491 static void bnx2x_reset_port(struct bnx2x *bp)
6493 int port = BP_PORT(bp);
6496 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6498 /* Do not rcv packets to BRB */
6499 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6500 /* Do not direct rcv packets that are not for MCP to the BRB */
6501 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6502 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6505 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6508 /* Check for BRB port occupancy */
6509 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6511 DP(NETIF_MSG_IFDOWN,
6512 "BRB1 is not empty %d blooks are occupied\n", val);
6514 /* TODO: Close Doorbell port? */
6517 static void bnx2x_reset_common(struct bnx2x *bp)
6520 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6522 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6525 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6527 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6528 BP_FUNC(bp), reset_code);
6530 switch (reset_code) {
6531 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6532 bnx2x_reset_port(bp);
6533 bnx2x_reset_func(bp);
6534 bnx2x_reset_common(bp);
6537 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6538 bnx2x_reset_port(bp);
6539 bnx2x_reset_func(bp);
6542 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6543 bnx2x_reset_func(bp);
6547 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6552 /* msut be called with rtnl_lock */
6553 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6558 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6560 bp->rx_mode = BNX2X_RX_MODE_NONE;
6561 bnx2x_set_storm_rx_mode(bp);
6563 if (netif_running(bp->dev)) {
6564 netif_tx_disable(bp->dev);
6565 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6568 del_timer_sync(&bp->timer);
6569 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6570 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6571 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6573 /* Wait until all fast path tasks complete */
6574 for_each_queue(bp, i) {
6575 struct bnx2x_fastpath *fp = &bp->fp[i];
6577 #ifdef BNX2X_STOP_ON_ERROR
6578 #ifdef __powerpc64__
6579 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6581 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6583 fp->tpa_queue_used);
6587 while (bnx2x_has_work(fp)) {
6590 BNX2X_ERR("timeout waiting for queue[%d]\n",
6592 #ifdef BNX2X_STOP_ON_ERROR
6604 /* Wait until all slow path tasks complete */
6606 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
6609 for_each_queue(bp, i)
6610 napi_disable(&bnx2x_fp(bp, i, napi));
6611 /* Disable interrupts after Tx and Rx are disabled on stack level */
6612 bnx2x_int_disable_sync(bp);
6617 if (bp->flags & NO_WOL_FLAG)
6618 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6621 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6622 u8 *mac_addr = bp->dev->dev_addr;
6625 /* The mac address is written to entries 1-4 to
6626 preserve entry 0 which is used by the PMF */
6627 val = (mac_addr[0] << 8) | mac_addr[1];
6628 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val);
6630 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6631 (mac_addr[4] << 8) | mac_addr[5];
6632 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4,
6635 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6638 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6640 /* Close multi and leading connections
6641 Completions for ramrods are collected in a synchronous way */
6642 for_each_nondefault_queue(bp, i)
6643 if (bnx2x_stop_multi(bp, i))
6646 if (CHIP_IS_E1H(bp))
6647 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0);
6649 bnx2x_stop_leading(bp);
6650 #ifdef BNX2X_STOP_ON_ERROR
6651 /* If ramrod completion timed out - break here! */
6653 BNX2X_ERR("Stop leading failed!\n");
6658 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6659 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
6660 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6661 "state 0x%x fp[0].state 0x%x\n",
6662 bp->state, bp->fp[0].state);
6667 reset_code = bnx2x_fw_command(bp, reset_code);
6669 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6670 load_count[0], load_count[1], load_count[2]);
6672 load_count[1 + BP_PORT(bp)]--;
6673 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6674 load_count[0], load_count[1], load_count[2]);
6675 if (load_count[0] == 0)
6676 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6677 else if (load_count[1 + BP_PORT(bp)] == 0)
6678 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6680 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6683 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6684 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6685 bnx2x__link_reset(bp);
6687 /* Reset the chip */
6688 bnx2x_reset_chip(bp, reset_code);
6690 /* Report UNLOAD_DONE to MCP */
6692 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6694 /* Free SKBs, SGEs, TPA pool and driver internals */
6695 bnx2x_free_skbs(bp);
6696 for_each_queue(bp, i)
6697 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6698 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6701 bp->state = BNX2X_STATE_CLOSED;
6703 netif_carrier_off(bp->dev);
6708 static void bnx2x_reset_task(struct work_struct *work)
6710 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6712 #ifdef BNX2X_STOP_ON_ERROR
6713 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6714 " so reset not done to allow debug dump,\n"
6715 KERN_ERR " you will need to reboot when done\n");
6721 if (!netif_running(bp->dev))
6722 goto reset_task_exit;
6724 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6725 bnx2x_nic_load(bp, LOAD_NORMAL);
6731 /* end of nic load/unload */
6736 * Init service functions
6739 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6743 /* Check if there is any driver already loaded */
6744 val = REG_RD(bp, MISC_REG_UNPREPARED);
6746 /* Check if it is the UNDI driver
6747 * UNDI driver initializes CID offset for normal bell to 0x7
6749 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6751 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6752 /* save our func and fw_seq */
6753 int func = BP_FUNC(bp);
6754 u16 fw_seq = bp->fw_seq;
6756 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6758 /* try unload UNDI on port 0 */
6760 bp->fw_seq = (SHMEM_RD(bp,
6761 func_mb[bp->func].drv_mb_header) &
6762 DRV_MSG_SEQ_NUMBER_MASK);
6764 reset_code = bnx2x_fw_command(bp, reset_code);
6765 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6767 /* if UNDI is loaded on the other port */
6768 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6771 bp->fw_seq = (SHMEM_RD(bp,
6772 func_mb[bp->func].drv_mb_header) &
6773 DRV_MSG_SEQ_NUMBER_MASK);
6775 bnx2x_fw_command(bp,
6776 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
6777 bnx2x_fw_command(bp,
6778 DRV_MSG_CODE_UNLOAD_DONE);
6780 /* restore our func and fw_seq */
6782 bp->fw_seq = fw_seq;
6787 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6790 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6796 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6798 u32 val, val2, val3, val4, id;
6800 /* Get the chip revision id and number. */
6801 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6802 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6803 id = ((val & 0xffff) << 16);
6804 val = REG_RD(bp, MISC_REG_CHIP_REV);
6805 id |= ((val & 0xf) << 12);
6806 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6807 id |= ((val & 0xff) << 4);
6808 REG_RD(bp, MISC_REG_BOND_ID);
6810 bp->common.chip_id = id;
6811 bp->link_params.chip_id = bp->common.chip_id;
6812 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6814 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6815 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6816 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6817 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6818 bp->common.flash_size, bp->common.flash_size);
6820 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6821 bp->link_params.shmem_base = bp->common.shmem_base;
6822 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6824 if (!bp->common.shmem_base ||
6825 (bp->common.shmem_base < 0xA0000) ||
6826 (bp->common.shmem_base >= 0xC0000)) {
6827 BNX2X_DEV_INFO("MCP not active\n");
6828 bp->flags |= NO_MCP_FLAG;
6832 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6833 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6834 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6835 BNX2X_ERR("BAD MCP validity signature\n");
6837 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6838 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6840 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6841 bp->common.hw_config, bp->common.board);
6843 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6844 SHARED_HW_CFG_LED_MODE_MASK) >>
6845 SHARED_HW_CFG_LED_MODE_SHIFT);
6847 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6848 bp->common.bc_ver = val;
6849 BNX2X_DEV_INFO("bc_ver %X\n", val);
6850 if (val < BNX2X_BC_VER) {
6851 /* for now only warn
6852 * later we might need to enforce this */
6853 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6854 " please upgrade BC\n", BNX2X_BC_VER, val);
6856 BNX2X_DEV_INFO("%sWoL Capable\n",
6857 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6859 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6860 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6861 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6862 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6864 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6865 val, val2, val3, val4);
6868 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6871 int port = BP_PORT(bp);
6874 switch (switch_cfg) {
6876 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6879 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6880 switch (ext_phy_type) {
6881 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6882 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6885 bp->port.supported |= (SUPPORTED_10baseT_Half |
6886 SUPPORTED_10baseT_Full |
6887 SUPPORTED_100baseT_Half |
6888 SUPPORTED_100baseT_Full |
6889 SUPPORTED_1000baseT_Full |
6890 SUPPORTED_2500baseX_Full |
6895 SUPPORTED_Asym_Pause);
6898 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6899 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6902 bp->port.supported |= (SUPPORTED_10baseT_Half |
6903 SUPPORTED_10baseT_Full |
6904 SUPPORTED_100baseT_Half |
6905 SUPPORTED_100baseT_Full |
6906 SUPPORTED_1000baseT_Full |
6911 SUPPORTED_Asym_Pause);
6915 BNX2X_ERR("NVRAM config error. "
6916 "BAD SerDes ext_phy_config 0x%x\n",
6917 bp->link_params.ext_phy_config);
6921 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6923 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6926 case SWITCH_CFG_10G:
6927 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6930 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6931 switch (ext_phy_type) {
6932 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6933 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6936 bp->port.supported |= (SUPPORTED_10baseT_Half |
6937 SUPPORTED_10baseT_Full |
6938 SUPPORTED_100baseT_Half |
6939 SUPPORTED_100baseT_Full |
6940 SUPPORTED_1000baseT_Full |
6941 SUPPORTED_2500baseX_Full |
6942 SUPPORTED_10000baseT_Full |
6947 SUPPORTED_Asym_Pause);
6950 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6951 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6954 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6957 SUPPORTED_Asym_Pause);
6960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6961 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6964 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6965 SUPPORTED_1000baseT_Full |
6968 SUPPORTED_Asym_Pause);
6971 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6972 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6975 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6976 SUPPORTED_1000baseT_Full |
6980 SUPPORTED_Asym_Pause);
6983 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6984 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6987 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6988 SUPPORTED_2500baseX_Full |
6989 SUPPORTED_1000baseT_Full |
6993 SUPPORTED_Asym_Pause);
6996 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6997 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7000 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7004 SUPPORTED_Asym_Pause);
7007 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7008 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7009 bp->link_params.ext_phy_config);
7013 BNX2X_ERR("NVRAM config error. "
7014 "BAD XGXS ext_phy_config 0x%x\n",
7015 bp->link_params.ext_phy_config);
7019 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7021 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7026 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7027 bp->port.link_config);
7030 bp->link_params.phy_addr = bp->port.phy_addr;
7032 /* mask what we support according to speed_cap_mask */
7033 if (!(bp->link_params.speed_cap_mask &
7034 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7035 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7037 if (!(bp->link_params.speed_cap_mask &
7038 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7039 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7041 if (!(bp->link_params.speed_cap_mask &
7042 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7043 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7045 if (!(bp->link_params.speed_cap_mask &
7046 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7047 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7049 if (!(bp->link_params.speed_cap_mask &
7050 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7051 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7052 SUPPORTED_1000baseT_Full);
7054 if (!(bp->link_params.speed_cap_mask &
7055 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7056 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7058 if (!(bp->link_params.speed_cap_mask &
7059 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7060 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7062 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7065 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7067 bp->link_params.req_duplex = DUPLEX_FULL;
7069 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7070 case PORT_FEATURE_LINK_SPEED_AUTO:
7071 if (bp->port.supported & SUPPORTED_Autoneg) {
7072 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7073 bp->port.advertising = bp->port.supported;
7076 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7078 if ((ext_phy_type ==
7079 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7081 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7082 /* force 10G, no AN */
7083 bp->link_params.req_line_speed = SPEED_10000;
7084 bp->port.advertising =
7085 (ADVERTISED_10000baseT_Full |
7089 BNX2X_ERR("NVRAM config error. "
7090 "Invalid link_config 0x%x"
7091 " Autoneg not supported\n",
7092 bp->port.link_config);
7097 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7098 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7099 bp->link_params.req_line_speed = SPEED_10;
7100 bp->port.advertising = (ADVERTISED_10baseT_Full |
7103 BNX2X_ERR("NVRAM config error. "
7104 "Invalid link_config 0x%x"
7105 " speed_cap_mask 0x%x\n",
7106 bp->port.link_config,
7107 bp->link_params.speed_cap_mask);
7112 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7113 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7114 bp->link_params.req_line_speed = SPEED_10;
7115 bp->link_params.req_duplex = DUPLEX_HALF;
7116 bp->port.advertising = (ADVERTISED_10baseT_Half |
7119 BNX2X_ERR("NVRAM config error. "
7120 "Invalid link_config 0x%x"
7121 " speed_cap_mask 0x%x\n",
7122 bp->port.link_config,
7123 bp->link_params.speed_cap_mask);
7128 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7129 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7130 bp->link_params.req_line_speed = SPEED_100;
7131 bp->port.advertising = (ADVERTISED_100baseT_Full |
7134 BNX2X_ERR("NVRAM config error. "
7135 "Invalid link_config 0x%x"
7136 " speed_cap_mask 0x%x\n",
7137 bp->port.link_config,
7138 bp->link_params.speed_cap_mask);
7143 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7144 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7145 bp->link_params.req_line_speed = SPEED_100;
7146 bp->link_params.req_duplex = DUPLEX_HALF;
7147 bp->port.advertising = (ADVERTISED_100baseT_Half |
7150 BNX2X_ERR("NVRAM config error. "
7151 "Invalid link_config 0x%x"
7152 " speed_cap_mask 0x%x\n",
7153 bp->port.link_config,
7154 bp->link_params.speed_cap_mask);
7159 case PORT_FEATURE_LINK_SPEED_1G:
7160 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7161 bp->link_params.req_line_speed = SPEED_1000;
7162 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7165 BNX2X_ERR("NVRAM config error. "
7166 "Invalid link_config 0x%x"
7167 " speed_cap_mask 0x%x\n",
7168 bp->port.link_config,
7169 bp->link_params.speed_cap_mask);
7174 case PORT_FEATURE_LINK_SPEED_2_5G:
7175 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7176 bp->link_params.req_line_speed = SPEED_2500;
7177 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7180 BNX2X_ERR("NVRAM config error. "
7181 "Invalid link_config 0x%x"
7182 " speed_cap_mask 0x%x\n",
7183 bp->port.link_config,
7184 bp->link_params.speed_cap_mask);
7189 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7190 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7191 case PORT_FEATURE_LINK_SPEED_10G_KR:
7192 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7193 bp->link_params.req_line_speed = SPEED_10000;
7194 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7197 BNX2X_ERR("NVRAM config error. "
7198 "Invalid link_config 0x%x"
7199 " speed_cap_mask 0x%x\n",
7200 bp->port.link_config,
7201 bp->link_params.speed_cap_mask);
7207 BNX2X_ERR("NVRAM config error. "
7208 "BAD link speed link_config 0x%x\n",
7209 bp->port.link_config);
7210 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7211 bp->port.advertising = bp->port.supported;
7215 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7216 PORT_FEATURE_FLOW_CONTROL_MASK);
7217 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7218 !(bp->port.supported & SUPPORTED_Autoneg))
7219 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7221 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7222 " advertising 0x%x\n",
7223 bp->link_params.req_line_speed,
7224 bp->link_params.req_duplex,
7225 bp->link_params.req_flow_ctrl, bp->port.advertising);
7228 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7230 int port = BP_PORT(bp);
7233 bp->link_params.bp = bp;
7234 bp->link_params.port = port;
7236 bp->link_params.serdes_config =
7237 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7238 bp->link_params.lane_config =
7239 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7240 bp->link_params.ext_phy_config =
7242 dev_info.port_hw_config[port].external_phy_config);
7243 bp->link_params.speed_cap_mask =
7245 dev_info.port_hw_config[port].speed_capability_mask);
7247 bp->port.link_config =
7248 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7250 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7251 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7252 " link_config 0x%08x\n",
7253 bp->link_params.serdes_config,
7254 bp->link_params.lane_config,
7255 bp->link_params.ext_phy_config,
7256 bp->link_params.speed_cap_mask, bp->port.link_config);
7258 bp->link_params.switch_cfg = (bp->port.link_config &
7259 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7260 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7262 bnx2x_link_settings_requested(bp);
7264 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7265 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7266 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7267 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7268 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7269 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7270 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7271 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7272 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7273 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7276 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7278 int func = BP_FUNC(bp);
7282 bnx2x_get_common_hwinfo(bp);
7286 if (CHIP_IS_E1H(bp)) {
7288 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7291 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7292 FUNC_MF_CFG_E1HOV_TAG_MASK);
7293 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7297 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7299 func, bp->e1hov, bp->e1hov);
7301 BNX2X_DEV_INFO("Single function mode\n");
7303 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7304 " aborting\n", func);
7310 if (!BP_NOMCP(bp)) {
7311 bnx2x_get_port_hwinfo(bp);
7313 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7314 DRV_MSG_SEQ_NUMBER_MASK);
7315 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7319 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7320 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7321 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7322 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7323 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7324 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7325 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7326 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7327 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7328 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7329 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7331 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7339 /* only supposed to happen on emulation/FPGA */
7340 BNX2X_ERR("warning rendom MAC workaround active\n");
7341 random_ether_addr(bp->dev->dev_addr);
7342 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7348 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7350 int func = BP_FUNC(bp);
7353 mutex_init(&bp->port.phy_mutex);
7355 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7356 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7358 rc = bnx2x_get_hwinfo(bp);
7360 /* need to reset chip if undi was active */
7362 bnx2x_undi_unload(bp);
7364 if (CHIP_REV_IS_FPGA(bp))
7365 printk(KERN_ERR PFX "FPGA detected\n");
7367 if (BP_NOMCP(bp) && (func == 0))
7369 "MCP disabled, must load devices in order!\n");
7373 bp->flags &= ~TPA_ENABLE_FLAG;
7374 bp->dev->features &= ~NETIF_F_LRO;
7376 bp->flags |= TPA_ENABLE_FLAG;
7377 bp->dev->features |= NETIF_F_LRO;
7381 bp->tx_ring_size = MAX_TX_AVAIL;
7382 bp->rx_ring_size = MAX_RX_AVAIL;
7390 bp->stats_ticks = 1000000 & 0xffff00;
7392 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7393 bp->current_interval = (poll ? poll : bp->timer_interval);
7395 init_timer(&bp->timer);
7396 bp->timer.expires = jiffies + bp->current_interval;
7397 bp->timer.data = (unsigned long) bp;
7398 bp->timer.function = bnx2x_timer;
7404 * ethtool service functions
7407 /* All ethtool functions called with rtnl_lock */
7409 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7411 struct bnx2x *bp = netdev_priv(dev);
7413 cmd->supported = bp->port.supported;
7414 cmd->advertising = bp->port.advertising;
7416 if (netif_carrier_ok(dev)) {
7417 cmd->speed = bp->link_vars.line_speed;
7418 cmd->duplex = bp->link_vars.duplex;
7420 cmd->speed = bp->link_params.req_line_speed;
7421 cmd->duplex = bp->link_params.req_duplex;
7426 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7427 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7428 if (vn_max_rate < cmd->speed)
7429 cmd->speed = vn_max_rate;
7432 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7434 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7436 switch (ext_phy_type) {
7437 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7438 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7439 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7440 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7441 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7442 cmd->port = PORT_FIBRE;
7445 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7446 cmd->port = PORT_TP;
7449 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7450 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7451 bp->link_params.ext_phy_config);
7455 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7456 bp->link_params.ext_phy_config);
7460 cmd->port = PORT_TP;
7462 cmd->phy_address = bp->port.phy_addr;
7463 cmd->transceiver = XCVR_INTERNAL;
7465 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7466 cmd->autoneg = AUTONEG_ENABLE;
7468 cmd->autoneg = AUTONEG_DISABLE;
7473 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7474 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7475 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7476 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7477 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7478 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7479 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7484 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7486 struct bnx2x *bp = netdev_priv(dev);
7492 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7493 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7494 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7495 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7496 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7497 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7498 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7500 if (cmd->autoneg == AUTONEG_ENABLE) {
7501 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7502 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7506 /* advertise the requested speed and duplex if supported */
7507 cmd->advertising &= bp->port.supported;
7509 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7510 bp->link_params.req_duplex = DUPLEX_FULL;
7511 bp->port.advertising |= (ADVERTISED_Autoneg |
7514 } else { /* forced speed */
7515 /* advertise the requested speed and duplex if supported */
7516 switch (cmd->speed) {
7518 if (cmd->duplex == DUPLEX_FULL) {
7519 if (!(bp->port.supported &
7520 SUPPORTED_10baseT_Full)) {
7522 "10M full not supported\n");
7526 advertising = (ADVERTISED_10baseT_Full |
7529 if (!(bp->port.supported &
7530 SUPPORTED_10baseT_Half)) {
7532 "10M half not supported\n");
7536 advertising = (ADVERTISED_10baseT_Half |
7542 if (cmd->duplex == DUPLEX_FULL) {
7543 if (!(bp->port.supported &
7544 SUPPORTED_100baseT_Full)) {
7546 "100M full not supported\n");
7550 advertising = (ADVERTISED_100baseT_Full |
7553 if (!(bp->port.supported &
7554 SUPPORTED_100baseT_Half)) {
7556 "100M half not supported\n");
7560 advertising = (ADVERTISED_100baseT_Half |
7566 if (cmd->duplex != DUPLEX_FULL) {
7567 DP(NETIF_MSG_LINK, "1G half not supported\n");
7571 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7572 DP(NETIF_MSG_LINK, "1G full not supported\n");
7576 advertising = (ADVERTISED_1000baseT_Full |
7581 if (cmd->duplex != DUPLEX_FULL) {
7583 "2.5G half not supported\n");
7587 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7589 "2.5G full not supported\n");
7593 advertising = (ADVERTISED_2500baseX_Full |
7598 if (cmd->duplex != DUPLEX_FULL) {
7599 DP(NETIF_MSG_LINK, "10G half not supported\n");
7603 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7604 DP(NETIF_MSG_LINK, "10G full not supported\n");
7608 advertising = (ADVERTISED_10000baseT_Full |
7613 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7617 bp->link_params.req_line_speed = cmd->speed;
7618 bp->link_params.req_duplex = cmd->duplex;
7619 bp->port.advertising = advertising;
7622 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7623 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7624 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7625 bp->port.advertising);
7627 if (netif_running(dev)) {
7628 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7635 #define PHY_FW_VER_LEN 10
7637 static void bnx2x_get_drvinfo(struct net_device *dev,
7638 struct ethtool_drvinfo *info)
7640 struct bnx2x *bp = netdev_priv(dev);
7641 char phy_fw_ver[PHY_FW_VER_LEN];
7643 strcpy(info->driver, DRV_MODULE_NAME);
7644 strcpy(info->version, DRV_MODULE_VERSION);
7646 phy_fw_ver[0] = '\0';
7648 bnx2x_phy_hw_lock(bp);
7649 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7650 (bp->state != BNX2X_STATE_CLOSED),
7651 phy_fw_ver, PHY_FW_VER_LEN);
7652 bnx2x_phy_hw_unlock(bp);
7655 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
7656 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7657 BCM_5710_FW_REVISION_VERSION,
7658 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
7659 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
7660 strcpy(info->bus_info, pci_name(bp->pdev));
7661 info->n_stats = BNX2X_NUM_STATS;
7662 info->testinfo_len = BNX2X_NUM_TESTS;
7663 info->eedump_len = bp->common.flash_size;
7664 info->regdump_len = 0;
7667 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7669 struct bnx2x *bp = netdev_priv(dev);
7671 if (bp->flags & NO_WOL_FLAG) {
7675 wol->supported = WAKE_MAGIC;
7677 wol->wolopts = WAKE_MAGIC;
7681 memset(&wol->sopass, 0, sizeof(wol->sopass));
7684 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7686 struct bnx2x *bp = netdev_priv(dev);
7688 if (wol->wolopts & ~WAKE_MAGIC)
7691 if (wol->wolopts & WAKE_MAGIC) {
7692 if (bp->flags & NO_WOL_FLAG)
7702 static u32 bnx2x_get_msglevel(struct net_device *dev)
7704 struct bnx2x *bp = netdev_priv(dev);
7706 return bp->msglevel;
7709 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7711 struct bnx2x *bp = netdev_priv(dev);
7713 if (capable(CAP_NET_ADMIN))
7714 bp->msglevel = level;
7717 static int bnx2x_nway_reset(struct net_device *dev)
7719 struct bnx2x *bp = netdev_priv(dev);
7724 if (netif_running(dev)) {
7725 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7732 static int bnx2x_get_eeprom_len(struct net_device *dev)
7734 struct bnx2x *bp = netdev_priv(dev);
7736 return bp->common.flash_size;
7739 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7741 int port = BP_PORT(bp);
7745 /* adjust timeout for emulation/FPGA */
7746 count = NVRAM_TIMEOUT_COUNT;
7747 if (CHIP_REV_IS_SLOW(bp))
7750 /* request access to nvram interface */
7751 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7752 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7754 for (i = 0; i < count*10; i++) {
7755 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7756 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7762 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7763 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7770 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7772 int port = BP_PORT(bp);
7776 /* adjust timeout for emulation/FPGA */
7777 count = NVRAM_TIMEOUT_COUNT;
7778 if (CHIP_REV_IS_SLOW(bp))
7781 /* relinquish nvram interface */
7782 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7783 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7785 for (i = 0; i < count*10; i++) {
7786 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7787 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7793 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7794 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7801 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7805 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7807 /* enable both bits, even on read */
7808 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7809 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7810 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7813 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7817 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7819 /* disable both bits, even after read */
7820 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7821 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7822 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7825 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7831 /* build the command word */
7832 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7834 /* need to clear DONE bit separately */
7835 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7837 /* address of the NVRAM to read from */
7838 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7839 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7841 /* issue a read command */
7842 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7844 /* adjust timeout for emulation/FPGA */
7845 count = NVRAM_TIMEOUT_COUNT;
7846 if (CHIP_REV_IS_SLOW(bp))
7849 /* wait for completion */
7852 for (i = 0; i < count; i++) {
7854 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7856 if (val & MCPR_NVM_COMMAND_DONE) {
7857 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7858 /* we read nvram data in cpu order
7859 * but ethtool sees it as an array of bytes
7860 * converting to big-endian will do the work */
7861 val = cpu_to_be32(val);
7871 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7878 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7880 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7885 if (offset + buf_size > bp->common.flash_size) {
7886 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7887 " buf_size (0x%x) > flash_size (0x%x)\n",
7888 offset, buf_size, bp->common.flash_size);
7892 /* request access to nvram interface */
7893 rc = bnx2x_acquire_nvram_lock(bp);
7897 /* enable access to nvram interface */
7898 bnx2x_enable_nvram_access(bp);
7900 /* read the first word(s) */
7901 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7902 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7903 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7904 memcpy(ret_buf, &val, 4);
7906 /* advance to the next dword */
7907 offset += sizeof(u32);
7908 ret_buf += sizeof(u32);
7909 buf_size -= sizeof(u32);
7914 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7915 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7916 memcpy(ret_buf, &val, 4);
7919 /* disable access to nvram interface */
7920 bnx2x_disable_nvram_access(bp);
7921 bnx2x_release_nvram_lock(bp);
7926 static int bnx2x_get_eeprom(struct net_device *dev,
7927 struct ethtool_eeprom *eeprom, u8 *eebuf)
7929 struct bnx2x *bp = netdev_priv(dev);
7932 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
7933 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7934 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7935 eeprom->len, eeprom->len);
7937 /* parameters already validated in ethtool_get_eeprom */
7939 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7944 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7949 /* build the command word */
7950 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
7952 /* need to clear DONE bit separately */
7953 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7955 /* write the data */
7956 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
7958 /* address of the NVRAM to write to */
7959 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7960 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7962 /* issue the write command */
7963 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7965 /* adjust timeout for emulation/FPGA */
7966 count = NVRAM_TIMEOUT_COUNT;
7967 if (CHIP_REV_IS_SLOW(bp))
7970 /* wait for completion */
7972 for (i = 0; i < count; i++) {
7974 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7975 if (val & MCPR_NVM_COMMAND_DONE) {
7984 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
7986 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
7994 if (offset + buf_size > bp->common.flash_size) {
7995 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7996 " buf_size (0x%x) > flash_size (0x%x)\n",
7997 offset, buf_size, bp->common.flash_size);
8001 /* request access to nvram interface */
8002 rc = bnx2x_acquire_nvram_lock(bp);
8006 /* enable access to nvram interface */
8007 bnx2x_enable_nvram_access(bp);
8009 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8010 align_offset = (offset & ~0x03);
8011 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8014 val &= ~(0xff << BYTE_OFFSET(offset));
8015 val |= (*data_buf << BYTE_OFFSET(offset));
8017 /* nvram data is returned as an array of bytes
8018 * convert it back to cpu order */
8019 val = be32_to_cpu(val);
8021 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8025 /* disable access to nvram interface */
8026 bnx2x_disable_nvram_access(bp);
8027 bnx2x_release_nvram_lock(bp);
8032 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8040 if (buf_size == 1) /* ethtool */
8041 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8043 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8045 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8050 if (offset + buf_size > bp->common.flash_size) {
8051 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8052 " buf_size (0x%x) > flash_size (0x%x)\n",
8053 offset, buf_size, bp->common.flash_size);
8057 /* request access to nvram interface */
8058 rc = bnx2x_acquire_nvram_lock(bp);
8062 /* enable access to nvram interface */
8063 bnx2x_enable_nvram_access(bp);
8066 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8067 while ((written_so_far < buf_size) && (rc == 0)) {
8068 if (written_so_far == (buf_size - sizeof(u32)))
8069 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8070 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8071 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8072 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8073 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8075 memcpy(&val, data_buf, 4);
8077 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8079 /* advance to the next dword */
8080 offset += sizeof(u32);
8081 data_buf += sizeof(u32);
8082 written_so_far += sizeof(u32);
8086 /* disable access to nvram interface */
8087 bnx2x_disable_nvram_access(bp);
8088 bnx2x_release_nvram_lock(bp);
8093 static int bnx2x_set_eeprom(struct net_device *dev,
8094 struct ethtool_eeprom *eeprom, u8 *eebuf)
8096 struct bnx2x *bp = netdev_priv(dev);
8099 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8100 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8101 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8102 eeprom->len, eeprom->len);
8104 /* parameters already validated in ethtool_set_eeprom */
8106 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8107 if (eeprom->magic == 0x00504859)
8110 bnx2x_phy_hw_lock(bp);
8111 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8112 bp->link_params.ext_phy_config,
8113 (bp->state != BNX2X_STATE_CLOSED),
8114 eebuf, eeprom->len);
8115 if ((bp->state == BNX2X_STATE_OPEN) ||
8116 (bp->state == BNX2X_STATE_DISABLED)) {
8117 rc |= bnx2x_link_reset(&bp->link_params,
8119 rc |= bnx2x_phy_init(&bp->link_params,
8122 bnx2x_phy_hw_unlock(bp);
8124 } else /* Only the PMF can access the PHY */
8127 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8132 static int bnx2x_get_coalesce(struct net_device *dev,
8133 struct ethtool_coalesce *coal)
8135 struct bnx2x *bp = netdev_priv(dev);
8137 memset(coal, 0, sizeof(struct ethtool_coalesce));
8139 coal->rx_coalesce_usecs = bp->rx_ticks;
8140 coal->tx_coalesce_usecs = bp->tx_ticks;
8141 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8146 static int bnx2x_set_coalesce(struct net_device *dev,
8147 struct ethtool_coalesce *coal)
8149 struct bnx2x *bp = netdev_priv(dev);
8151 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8152 if (bp->rx_ticks > 3000)
8153 bp->rx_ticks = 3000;
8155 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8156 if (bp->tx_ticks > 0x3000)
8157 bp->tx_ticks = 0x3000;
8159 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8160 if (bp->stats_ticks > 0xffff00)
8161 bp->stats_ticks = 0xffff00;
8162 bp->stats_ticks &= 0xffff00;
8164 if (netif_running(dev))
8165 bnx2x_update_coalesce(bp);
8170 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8172 struct bnx2x *bp = netdev_priv(dev);
8176 if (data & ETH_FLAG_LRO) {
8177 if (!(dev->features & NETIF_F_LRO)) {
8178 dev->features |= NETIF_F_LRO;
8179 bp->flags |= TPA_ENABLE_FLAG;
8183 } else if (dev->features & NETIF_F_LRO) {
8184 dev->features &= ~NETIF_F_LRO;
8185 bp->flags &= ~TPA_ENABLE_FLAG;
8189 if (changed && netif_running(dev)) {
8190 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8191 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8197 static void bnx2x_get_ringparam(struct net_device *dev,
8198 struct ethtool_ringparam *ering)
8200 struct bnx2x *bp = netdev_priv(dev);
8202 ering->rx_max_pending = MAX_RX_AVAIL;
8203 ering->rx_mini_max_pending = 0;
8204 ering->rx_jumbo_max_pending = 0;
8206 ering->rx_pending = bp->rx_ring_size;
8207 ering->rx_mini_pending = 0;
8208 ering->rx_jumbo_pending = 0;
8210 ering->tx_max_pending = MAX_TX_AVAIL;
8211 ering->tx_pending = bp->tx_ring_size;
8214 static int bnx2x_set_ringparam(struct net_device *dev,
8215 struct ethtool_ringparam *ering)
8217 struct bnx2x *bp = netdev_priv(dev);
8220 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8221 (ering->tx_pending > MAX_TX_AVAIL) ||
8222 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8225 bp->rx_ring_size = ering->rx_pending;
8226 bp->tx_ring_size = ering->tx_pending;
8228 if (netif_running(dev)) {
8229 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8230 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8236 static void bnx2x_get_pauseparam(struct net_device *dev,
8237 struct ethtool_pauseparam *epause)
8239 struct bnx2x *bp = netdev_priv(dev);
8241 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8242 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8244 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8246 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8249 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8250 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8251 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8254 static int bnx2x_set_pauseparam(struct net_device *dev,
8255 struct ethtool_pauseparam *epause)
8257 struct bnx2x *bp = netdev_priv(dev);
8262 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8263 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8264 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8266 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8268 if (epause->rx_pause)
8269 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8271 if (epause->tx_pause)
8272 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8274 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8275 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8277 if (epause->autoneg) {
8278 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8279 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8283 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8284 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8288 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8290 if (netif_running(dev)) {
8291 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8298 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8300 struct bnx2x *bp = netdev_priv(dev);
8305 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8307 struct bnx2x *bp = netdev_priv(dev);
8313 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8316 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8317 dev->features |= NETIF_F_TSO6;
8319 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8320 dev->features &= ~NETIF_F_TSO6;
8326 static const struct {
8327 char string[ETH_GSTRING_LEN];
8328 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8329 { "register_test (offline)" },
8330 { "memory_test (offline)" },
8331 { "loopback_test (offline)" },
8332 { "nvram_test (online)" },
8333 { "interrupt_test (online)" },
8334 { "link_test (online)" },
8335 { "idle check (online)" },
8336 { "MC errors (online)" }
8339 static int bnx2x_self_test_count(struct net_device *dev)
8341 return BNX2X_NUM_TESTS;
8344 static int bnx2x_test_registers(struct bnx2x *bp)
8346 int idx, i, rc = -ENODEV;
8348 static const struct {
8353 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8354 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8355 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8356 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8357 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8358 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8359 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8360 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8361 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8362 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8363 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8364 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8365 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8366 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8367 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8368 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8369 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8370 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8371 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8372 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8373 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8374 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8375 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8376 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8377 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8378 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8379 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8380 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8381 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8382 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8383 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8384 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8385 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8386 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8387 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8388 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8389 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8390 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8392 { 0xffffffff, 0, 0x00000000 }
8395 if (!netif_running(bp->dev))
8398 /* Repeat the test twice:
8399 First by writing 0x00000000, second by writing 0xffffffff */
8400 for (idx = 0; idx < 2; idx++) {
8407 wr_val = 0xffffffff;
8411 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8412 u32 offset, mask, save_val, val;
8413 int port = BP_PORT(bp);
8415 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8416 mask = reg_tbl[i].mask;
8418 save_val = REG_RD(bp, offset);
8420 REG_WR(bp, offset, wr_val);
8421 val = REG_RD(bp, offset);
8423 /* Restore the original register's value */
8424 REG_WR(bp, offset, save_val);
8426 /* verify that value is as expected value */
8427 if ((val & mask) != (wr_val & mask))
8438 static int bnx2x_test_memory(struct bnx2x *bp)
8440 int i, j, rc = -ENODEV;
8442 static const struct {
8446 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8447 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8448 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8449 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8450 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8451 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8452 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8456 static const struct {
8461 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
8462 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
8463 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
8464 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
8465 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
8466 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
8468 { NULL, 0xffffffff, 0 }
8471 if (!netif_running(bp->dev))
8474 /* Go through all the memories */
8475 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8476 for (j = 0; j < mem_tbl[i].size; j++)
8477 REG_RD(bp, mem_tbl[i].offset + j*4);
8479 /* Check the parity status */
8480 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8481 val = REG_RD(bp, prty_tbl[i].offset);
8482 if (val & ~(prty_tbl[i].mask)) {
8484 "%s is 0x%x\n", prty_tbl[i].name, val);
8495 static void bnx2x_netif_start(struct bnx2x *bp)
8499 if (atomic_dec_and_test(&bp->intr_sem)) {
8500 if (netif_running(bp->dev)) {
8501 bnx2x_int_enable(bp);
8502 for_each_queue(bp, i)
8503 napi_enable(&bnx2x_fp(bp, i, napi));
8504 if (bp->state == BNX2X_STATE_OPEN)
8505 netif_wake_queue(bp->dev);
8510 static void bnx2x_netif_stop(struct bnx2x *bp)
8514 if (netif_running(bp->dev)) {
8515 netif_tx_disable(bp->dev);
8516 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8517 for_each_queue(bp, i)
8518 napi_disable(&bnx2x_fp(bp, i, napi));
8520 bnx2x_int_disable_sync(bp);
8523 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8528 while (bnx2x_link_test(bp) && cnt--)
8532 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8534 unsigned int pkt_size, num_pkts, i;
8535 struct sk_buff *skb;
8536 unsigned char *packet;
8537 struct bnx2x_fastpath *fp = &bp->fp[0];
8538 u16 tx_start_idx, tx_idx;
8539 u16 rx_start_idx, rx_idx;
8541 struct sw_tx_bd *tx_buf;
8542 struct eth_tx_bd *tx_bd;
8544 union eth_rx_cqe *cqe;
8546 struct sw_rx_bd *rx_buf;
8550 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8551 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8552 bnx2x_phy_hw_lock(bp);
8553 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8554 bnx2x_phy_hw_unlock(bp);
8556 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8557 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8558 bnx2x_phy_hw_lock(bp);
8559 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8560 bnx2x_phy_hw_unlock(bp);
8561 /* wait until link state is restored */
8562 bnx2x_wait_for_link(bp, link_up);
8568 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8571 goto test_loopback_exit;
8573 packet = skb_put(skb, pkt_size);
8574 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8575 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8576 for (i = ETH_HLEN; i < pkt_size; i++)
8577 packet[i] = (unsigned char) (i & 0xff);
8580 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8581 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8583 pkt_prod = fp->tx_pkt_prod++;
8584 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8585 tx_buf->first_bd = fp->tx_bd_prod;
8588 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8589 mapping = pci_map_single(bp->pdev, skb->data,
8590 skb_headlen(skb), PCI_DMA_TODEVICE);
8591 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8592 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8593 tx_bd->nbd = cpu_to_le16(1);
8594 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8595 tx_bd->vlan = cpu_to_le16(pkt_prod);
8596 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8597 ETH_TX_BD_FLAGS_END_BD);
8598 tx_bd->general_data = ((UNICAST_ADDRESS <<
8599 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8601 fp->hw_tx_prods->bds_prod =
8602 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8603 mb(); /* FW restriction: must not reorder writing nbd and packets */
8604 fp->hw_tx_prods->packets_prod =
8605 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8606 DOORBELL(bp, FP_IDX(fp), 0);
8612 bp->dev->trans_start = jiffies;
8616 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8617 if (tx_idx != tx_start_idx + num_pkts)
8618 goto test_loopback_exit;
8620 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8621 if (rx_idx != rx_start_idx + num_pkts)
8622 goto test_loopback_exit;
8624 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8625 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8626 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8627 goto test_loopback_rx_exit;
8629 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8630 if (len != pkt_size)
8631 goto test_loopback_rx_exit;
8633 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8635 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8636 for (i = ETH_HLEN; i < pkt_size; i++)
8637 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8638 goto test_loopback_rx_exit;
8642 test_loopback_rx_exit:
8643 bp->dev->last_rx = jiffies;
8645 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8646 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8647 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8648 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8650 /* Update producers */
8651 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8653 mmiowb(); /* keep prod updates ordered */
8656 bp->link_params.loopback_mode = LOOPBACK_NONE;
8661 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8665 if (!netif_running(bp->dev))
8666 return BNX2X_LOOPBACK_FAILED;
8668 bnx2x_netif_stop(bp);
8670 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8671 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8672 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8675 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8676 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8677 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8680 bnx2x_netif_start(bp);
8685 #define CRC32_RESIDUAL 0xdebb20e3
8687 static int bnx2x_test_nvram(struct bnx2x *bp)
8689 static const struct {
8693 { 0, 0x14 }, /* bootstrap */
8694 { 0x14, 0xec }, /* dir */
8695 { 0x100, 0x350 }, /* manuf_info */
8696 { 0x450, 0xf0 }, /* feature_info */
8697 { 0x640, 0x64 }, /* upgrade_key_info */
8699 { 0x708, 0x70 }, /* manuf_key_info */
8704 u8 *data = (u8 *)buf;
8708 rc = bnx2x_nvram_read(bp, 0, data, 4);
8710 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8711 goto test_nvram_exit;
8714 magic = be32_to_cpu(buf[0]);
8715 if (magic != 0x669955aa) {
8716 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8718 goto test_nvram_exit;
8721 for (i = 0; nvram_tbl[i].size; i++) {
8723 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8727 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8728 goto test_nvram_exit;
8731 csum = ether_crc_le(nvram_tbl[i].size, data);
8732 if (csum != CRC32_RESIDUAL) {
8734 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8736 goto test_nvram_exit;
8744 static int bnx2x_test_intr(struct bnx2x *bp)
8746 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8749 if (!netif_running(bp->dev))
8752 config->hdr.length_6b = 0;
8753 config->hdr.offset = 0;
8754 config->hdr.client_id = BP_CL_ID(bp);
8755 config->hdr.reserved1 = 0;
8757 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8758 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8759 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8761 bp->set_mac_pending++;
8762 for (i = 0; i < 10; i++) {
8763 if (!bp->set_mac_pending)
8765 msleep_interruptible(10);
8774 static void bnx2x_self_test(struct net_device *dev,
8775 struct ethtool_test *etest, u64 *buf)
8777 struct bnx2x *bp = netdev_priv(dev);
8779 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8781 if (!netif_running(dev))
8784 /* offline tests are not suppoerted in MF mode */
8786 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8788 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8791 link_up = bp->link_vars.link_up;
8792 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8793 bnx2x_nic_load(bp, LOAD_DIAG);
8794 /* wait until link state is restored */
8795 bnx2x_wait_for_link(bp, link_up);
8797 if (bnx2x_test_registers(bp) != 0) {
8799 etest->flags |= ETH_TEST_FL_FAILED;
8801 if (bnx2x_test_memory(bp) != 0) {
8803 etest->flags |= ETH_TEST_FL_FAILED;
8805 buf[2] = bnx2x_test_loopback(bp, link_up);
8807 etest->flags |= ETH_TEST_FL_FAILED;
8809 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8810 bnx2x_nic_load(bp, LOAD_NORMAL);
8811 /* wait until link state is restored */
8812 bnx2x_wait_for_link(bp, link_up);
8814 if (bnx2x_test_nvram(bp) != 0) {
8816 etest->flags |= ETH_TEST_FL_FAILED;
8818 if (bnx2x_test_intr(bp) != 0) {
8820 etest->flags |= ETH_TEST_FL_FAILED;
8823 if (bnx2x_link_test(bp) != 0) {
8825 etest->flags |= ETH_TEST_FL_FAILED;
8827 buf[7] = bnx2x_mc_assert(bp);
8829 etest->flags |= ETH_TEST_FL_FAILED;
8831 #ifdef BNX2X_EXTRA_DEBUG
8832 bnx2x_panic_dump(bp);
8836 static const struct {
8840 char string[ETH_GSTRING_LEN];
8841 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8842 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" },
8843 { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" },
8844 { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" },
8845 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" },
8846 { STATS_OFFSET32(total_unicast_packets_received_hi),
8847 8, 1, "rx_ucast_packets" },
8848 { STATS_OFFSET32(total_multicast_packets_received_hi),
8849 8, 1, "rx_mcast_packets" },
8850 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8851 8, 1, "rx_bcast_packets" },
8852 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8853 8, 1, "tx_packets" },
8854 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8855 8, 0, "tx_mac_errors" },
8856 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8857 8, 0, "tx_carrier_errors" },
8858 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8859 8, 0, "rx_crc_errors" },
8860 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8861 8, 0, "rx_align_errors" },
8862 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8863 8, 0, "tx_single_collisions" },
8864 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8865 8, 0, "tx_multi_collisions" },
8866 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8867 8, 0, "tx_deferred" },
8868 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8869 8, 0, "tx_excess_collisions" },
8870 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8871 8, 0, "tx_late_collisions" },
8872 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8873 8, 0, "tx_total_collisions" },
8874 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8875 8, 0, "rx_fragments" },
8876 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" },
8877 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8878 8, 0, "rx_undersize_packets" },
8879 { STATS_OFFSET32(jabber_packets_received),
8880 4, 1, "rx_oversize_packets" },
8881 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8882 8, 0, "tx_64_byte_packets" },
8883 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8884 8, 0, "tx_65_to_127_byte_packets" },
8885 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8886 8, 0, "tx_128_to_255_byte_packets" },
8887 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8888 8, 0, "tx_256_to_511_byte_packets" },
8889 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8890 8, 0, "tx_512_to_1023_byte_packets" },
8891 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8892 8, 0, "tx_1024_to_1522_byte_packets" },
8893 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8894 8, 0, "tx_1523_to_9022_byte_packets" },
8895 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8896 8, 0, "rx_xon_frames" },
8897 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8898 8, 0, "rx_xoff_frames" },
8899 { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" },
8900 { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" },
8901 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8902 8, 0, "rx_mac_ctrl_frames" },
8903 { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" },
8904 { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" },
8905 { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" },
8906 { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" },
8907 /* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" }
8910 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8912 struct bnx2x *bp = netdev_priv(dev);
8915 switch (stringset) {
8917 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8918 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
8920 strcpy(buf + j*ETH_GSTRING_LEN,
8921 bnx2x_stats_arr[i].string);
8927 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8932 static int bnx2x_get_stats_count(struct net_device *dev)
8934 struct bnx2x *bp = netdev_priv(dev);
8935 int i, num_stats = 0;
8937 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8938 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
8945 static void bnx2x_get_ethtool_stats(struct net_device *dev,
8946 struct ethtool_stats *stats, u64 *buf)
8948 struct bnx2x *bp = netdev_priv(dev);
8949 u32 *hw_stats = (u32 *)&bp->eth_stats;
8952 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8953 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
8956 if (bnx2x_stats_arr[i].size == 0) {
8957 /* skip this counter */
8962 if (bnx2x_stats_arr[i].size == 4) {
8963 /* 4-byte counter */
8964 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
8968 /* 8-byte counter */
8969 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
8970 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
8975 static int bnx2x_phys_id(struct net_device *dev, u32 data)
8977 struct bnx2x *bp = netdev_priv(dev);
8978 int port = BP_PORT(bp);
8981 if (!netif_running(dev))
8990 for (i = 0; i < (data * 2); i++) {
8992 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
8993 bp->link_params.hw_led_mode,
8994 bp->link_params.chip_id);
8996 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
8997 bp->link_params.hw_led_mode,
8998 bp->link_params.chip_id);
9000 msleep_interruptible(500);
9001 if (signal_pending(current))
9005 if (bp->link_vars.link_up)
9006 bnx2x_set_led(bp, port, LED_MODE_OPER,
9007 bp->link_vars.line_speed,
9008 bp->link_params.hw_led_mode,
9009 bp->link_params.chip_id);
9014 static struct ethtool_ops bnx2x_ethtool_ops = {
9015 .get_settings = bnx2x_get_settings,
9016 .set_settings = bnx2x_set_settings,
9017 .get_drvinfo = bnx2x_get_drvinfo,
9018 .get_wol = bnx2x_get_wol,
9019 .set_wol = bnx2x_set_wol,
9020 .get_msglevel = bnx2x_get_msglevel,
9021 .set_msglevel = bnx2x_set_msglevel,
9022 .nway_reset = bnx2x_nway_reset,
9023 .get_link = ethtool_op_get_link,
9024 .get_eeprom_len = bnx2x_get_eeprom_len,
9025 .get_eeprom = bnx2x_get_eeprom,
9026 .set_eeprom = bnx2x_set_eeprom,
9027 .get_coalesce = bnx2x_get_coalesce,
9028 .set_coalesce = bnx2x_set_coalesce,
9029 .get_ringparam = bnx2x_get_ringparam,
9030 .set_ringparam = bnx2x_set_ringparam,
9031 .get_pauseparam = bnx2x_get_pauseparam,
9032 .set_pauseparam = bnx2x_set_pauseparam,
9033 .get_rx_csum = bnx2x_get_rx_csum,
9034 .set_rx_csum = bnx2x_set_rx_csum,
9035 .get_tx_csum = ethtool_op_get_tx_csum,
9036 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9037 .set_flags = bnx2x_set_flags,
9038 .get_flags = ethtool_op_get_flags,
9039 .get_sg = ethtool_op_get_sg,
9040 .set_sg = ethtool_op_set_sg,
9041 .get_tso = ethtool_op_get_tso,
9042 .set_tso = bnx2x_set_tso,
9043 .self_test_count = bnx2x_self_test_count,
9044 .self_test = bnx2x_self_test,
9045 .get_strings = bnx2x_get_strings,
9046 .phys_id = bnx2x_phys_id,
9047 .get_stats_count = bnx2x_get_stats_count,
9048 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9051 /* end of ethtool_ops */
9053 /****************************************************************************
9054 * General service functions
9055 ****************************************************************************/
9057 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9061 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9065 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9066 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9067 PCI_PM_CTRL_PME_STATUS));
9069 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9070 /* delay required during transition out of D3hot */
9075 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9079 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9081 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9084 /* No more memory access after this point until
9085 * device is brought back to D0.
9096 * net_device service functions
9099 static int bnx2x_poll(struct napi_struct *napi, int budget)
9101 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9103 struct bnx2x *bp = fp->bp;
9106 #ifdef BNX2X_STOP_ON_ERROR
9107 if (unlikely(bp->panic))
9111 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9112 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9113 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9115 bnx2x_update_fpsb_idx(fp);
9117 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
9118 (fp->tx_pkt_prod != fp->tx_pkt_cons))
9119 bnx2x_tx_int(fp, budget);
9121 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9122 work_done = bnx2x_rx_int(fp, budget);
9124 rmb(); /* bnx2x_has_work() reads the status block */
9126 /* must not complete if we consumed full budget */
9127 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9129 #ifdef BNX2X_STOP_ON_ERROR
9132 netif_rx_complete(bp->dev, napi);
9134 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9135 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9136 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9137 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9143 /* we split the first BD into headers and data BDs
9144 * to ease the pain of our fellow micocode engineers
9145 * we use one mapping for both BDs
9146 * So far this has only been observed to happen
9147 * in Other Operating Systems(TM)
9149 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9150 struct bnx2x_fastpath *fp,
9151 struct eth_tx_bd **tx_bd, u16 hlen,
9152 u16 bd_prod, int nbd)
9154 struct eth_tx_bd *h_tx_bd = *tx_bd;
9155 struct eth_tx_bd *d_tx_bd;
9157 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9159 /* first fix first BD */
9160 h_tx_bd->nbd = cpu_to_le16(nbd);
9161 h_tx_bd->nbytes = cpu_to_le16(hlen);
9163 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9164 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9165 h_tx_bd->addr_lo, h_tx_bd->nbd);
9167 /* now get a new data BD
9168 * (after the pbd) and fill it */
9169 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9170 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9172 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9173 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9175 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9176 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9177 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9179 /* this marks the BD as one that has no individual mapping
9180 * the FW ignores this flag in a BD not marked start
9182 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9183 DP(NETIF_MSG_TX_QUEUED,
9184 "TSO split data size is %d (%x:%x)\n",
9185 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9187 /* update tx_bd for marking the last BD flag */
9193 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9196 csum = (u16) ~csum_fold(csum_sub(csum,
9197 csum_partial(t_header - fix, fix, 0)));
9200 csum = (u16) ~csum_fold(csum_add(csum,
9201 csum_partial(t_header, -fix, 0)));
9203 return swab16(csum);
9206 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9210 if (skb->ip_summed != CHECKSUM_PARTIAL)
9214 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9216 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9217 rc |= XMIT_CSUM_TCP;
9221 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9222 rc |= XMIT_CSUM_TCP;
9226 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9229 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9235 /* check if packet requires linearization (packet is too fragmented) */
9236 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9241 int first_bd_sz = 0;
9243 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9244 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9246 if (xmit_type & XMIT_GSO) {
9247 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9248 /* Check if LSO packet needs to be copied:
9249 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9250 int wnd_size = MAX_FETCH_BD - 3;
9251 /* Number of widnows to check */
9252 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9257 /* Headers length */
9258 hlen = (int)(skb_transport_header(skb) - skb->data) +
9261 /* Amount of data (w/o headers) on linear part of SKB*/
9262 first_bd_sz = skb_headlen(skb) - hlen;
9264 wnd_sum = first_bd_sz;
9266 /* Calculate the first sum - it's special */
9267 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9269 skb_shinfo(skb)->frags[frag_idx].size;
9271 /* If there was data on linear skb data - check it */
9272 if (first_bd_sz > 0) {
9273 if (unlikely(wnd_sum < lso_mss)) {
9278 wnd_sum -= first_bd_sz;
9281 /* Others are easier: run through the frag list and
9282 check all windows */
9283 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9285 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9287 if (unlikely(wnd_sum < lso_mss)) {
9292 skb_shinfo(skb)->frags[wnd_idx].size;
9296 /* in non-LSO too fragmented packet should always
9303 if (unlikely(to_copy))
9304 DP(NETIF_MSG_TX_QUEUED,
9305 "Linearization IS REQUIRED for %s packet. "
9306 "num_frags %d hlen %d first_bd_sz %d\n",
9307 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9308 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9313 /* called with netif_tx_lock
9314 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9315 * netif_wake_queue()
9317 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9319 struct bnx2x *bp = netdev_priv(dev);
9320 struct bnx2x_fastpath *fp;
9321 struct sw_tx_bd *tx_buf;
9322 struct eth_tx_bd *tx_bd;
9323 struct eth_tx_parse_bd *pbd = NULL;
9324 u16 pkt_prod, bd_prod;
9327 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9328 int vlan_off = (bp->e1hov ? 4 : 0);
9332 #ifdef BNX2X_STOP_ON_ERROR
9333 if (unlikely(bp->panic))
9334 return NETDEV_TX_BUSY;
9337 fp_index = (smp_processor_id() % bp->num_queues);
9338 fp = &bp->fp[fp_index];
9340 if (unlikely(bnx2x_tx_avail(bp->fp) <
9341 (skb_shinfo(skb)->nr_frags + 3))) {
9342 bp->eth_stats.driver_xoff++,
9343 netif_stop_queue(dev);
9344 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9345 return NETDEV_TX_BUSY;
9348 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9349 " gso type %x xmit_type %x\n",
9350 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9351 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9353 /* First, check if we need to linearaize the skb
9354 (due to FW restrictions) */
9355 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9356 /* Statistics of linearization */
9358 if (skb_linearize(skb) != 0) {
9359 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9360 "silently dropping this SKB\n");
9361 dev_kfree_skb_any(skb);
9367 Please read carefully. First we use one BD which we mark as start,
9368 then for TSO or xsum we have a parsing info BD,
9369 and only then we have the rest of the TSO BDs.
9370 (don't forget to mark the last one as last,
9371 and to unmap only AFTER you write to the BD ...)
9372 And above all, all pdb sizes are in words - NOT DWORDS!
9375 pkt_prod = fp->tx_pkt_prod++;
9376 bd_prod = TX_BD(fp->tx_bd_prod);
9378 /* get a tx_buf and first BD */
9379 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9380 tx_bd = &fp->tx_desc_ring[bd_prod];
9382 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9383 tx_bd->general_data = (UNICAST_ADDRESS <<
9384 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9385 tx_bd->general_data |= 1; /* header nbd */
9387 /* remember the first BD of the packet */
9388 tx_buf->first_bd = fp->tx_bd_prod;
9391 DP(NETIF_MSG_TX_QUEUED,
9392 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9393 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9395 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9396 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9397 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9400 tx_bd->vlan = cpu_to_le16(pkt_prod);
9404 /* turn on parsing and get a BD */
9405 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9406 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9408 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9411 if (xmit_type & XMIT_CSUM) {
9412 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9414 /* for now NS flag is not used in Linux */
9415 pbd->global_data = (hlen |
9416 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9417 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9419 pbd->ip_hlen = (skb_transport_header(skb) -
9420 skb_network_header(skb)) / 2;
9422 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9424 pbd->total_hlen = cpu_to_le16(hlen);
9425 hlen = hlen*2 - vlan_off;
9427 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9429 if (xmit_type & XMIT_CSUM_V4)
9430 tx_bd->bd_flags.as_bitfield |=
9431 ETH_TX_BD_FLAGS_IP_CSUM;
9433 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9435 if (xmit_type & XMIT_CSUM_TCP) {
9436 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9439 s8 fix = SKB_CS_OFF(skb); /* signed! */
9441 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9442 pbd->cs_offset = fix / 2;
9444 DP(NETIF_MSG_TX_QUEUED,
9445 "hlen %d offset %d fix %d csum before fix %x\n",
9446 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9449 /* HW bug: fixup the CSUM */
9450 pbd->tcp_pseudo_csum =
9451 bnx2x_csum_fix(skb_transport_header(skb),
9454 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9455 pbd->tcp_pseudo_csum);
9459 mapping = pci_map_single(bp->pdev, skb->data,
9460 skb_headlen(skb), PCI_DMA_TODEVICE);
9462 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9463 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9464 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9465 tx_bd->nbd = cpu_to_le16(nbd);
9466 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9468 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9469 " nbytes %d flags %x vlan %x\n",
9470 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9471 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9472 le16_to_cpu(tx_bd->vlan));
9474 if (xmit_type & XMIT_GSO) {
9476 DP(NETIF_MSG_TX_QUEUED,
9477 "TSO packet len %d hlen %d total len %d tso size %d\n",
9478 skb->len, hlen, skb_headlen(skb),
9479 skb_shinfo(skb)->gso_size);
9481 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9483 if (unlikely(skb_headlen(skb) > hlen))
9484 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9487 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9488 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9489 pbd->tcp_flags = pbd_tcp_flags(skb);
9491 if (xmit_type & XMIT_GSO_V4) {
9492 pbd->ip_id = swab16(ip_hdr(skb)->id);
9493 pbd->tcp_pseudo_csum =
9494 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9496 0, IPPROTO_TCP, 0));
9499 pbd->tcp_pseudo_csum =
9500 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9501 &ipv6_hdr(skb)->daddr,
9502 0, IPPROTO_TCP, 0));
9504 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9507 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9508 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9510 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9511 tx_bd = &fp->tx_desc_ring[bd_prod];
9513 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9514 frag->size, PCI_DMA_TODEVICE);
9516 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9517 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9518 tx_bd->nbytes = cpu_to_le16(frag->size);
9519 tx_bd->vlan = cpu_to_le16(pkt_prod);
9520 tx_bd->bd_flags.as_bitfield = 0;
9522 DP(NETIF_MSG_TX_QUEUED,
9523 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9524 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9525 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9528 /* now at last mark the BD as the last BD */
9529 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9531 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9532 tx_bd, tx_bd->bd_flags.as_bitfield);
9534 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9536 /* now send a tx doorbell, counting the next BD
9537 * if the packet contains or ends with it
9539 if (TX_BD_POFF(bd_prod) < nbd)
9543 DP(NETIF_MSG_TX_QUEUED,
9544 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9545 " tcp_flags %x xsum %x seq %u hlen %u\n",
9546 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9547 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9548 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9550 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9552 fp->hw_tx_prods->bds_prod =
9553 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9554 mb(); /* FW restriction: must not reorder writing nbd and packets */
9555 fp->hw_tx_prods->packets_prod =
9556 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9557 DOORBELL(bp, FP_IDX(fp), 0);
9561 fp->tx_bd_prod += nbd;
9562 dev->trans_start = jiffies;
9564 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9565 netif_stop_queue(dev);
9566 bp->eth_stats.driver_xoff++;
9567 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9568 netif_wake_queue(dev);
9572 return NETDEV_TX_OK;
9575 /* called with rtnl_lock */
9576 static int bnx2x_open(struct net_device *dev)
9578 struct bnx2x *bp = netdev_priv(dev);
9580 bnx2x_set_power_state(bp, PCI_D0);
9582 return bnx2x_nic_load(bp, LOAD_OPEN);
9585 /* called with rtnl_lock */
9586 static int bnx2x_close(struct net_device *dev)
9588 struct bnx2x *bp = netdev_priv(dev);
9590 /* Unload the driver, release IRQs */
9591 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9592 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9593 if (!CHIP_REV_IS_SLOW(bp))
9594 bnx2x_set_power_state(bp, PCI_D3hot);
9599 /* called with netif_tx_lock from set_multicast */
9600 static void bnx2x_set_rx_mode(struct net_device *dev)
9602 struct bnx2x *bp = netdev_priv(dev);
9603 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9604 int port = BP_PORT(bp);
9606 if (bp->state != BNX2X_STATE_OPEN) {
9607 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9611 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9613 if (dev->flags & IFF_PROMISC)
9614 rx_mode = BNX2X_RX_MODE_PROMISC;
9616 else if ((dev->flags & IFF_ALLMULTI) ||
9617 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9618 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9620 else { /* some multicasts */
9621 if (CHIP_IS_E1(bp)) {
9623 struct dev_mc_list *mclist;
9624 struct mac_configuration_cmd *config =
9625 bnx2x_sp(bp, mcast_config);
9627 for (i = 0, mclist = dev->mc_list;
9628 mclist && (i < dev->mc_count);
9629 i++, mclist = mclist->next) {
9631 config->config_table[i].
9632 cam_entry.msb_mac_addr =
9633 swab16(*(u16 *)&mclist->dmi_addr[0]);
9634 config->config_table[i].
9635 cam_entry.middle_mac_addr =
9636 swab16(*(u16 *)&mclist->dmi_addr[2]);
9637 config->config_table[i].
9638 cam_entry.lsb_mac_addr =
9639 swab16(*(u16 *)&mclist->dmi_addr[4]);
9640 config->config_table[i].cam_entry.flags =
9642 config->config_table[i].
9643 target_table_entry.flags = 0;
9644 config->config_table[i].
9645 target_table_entry.client_id = 0;
9646 config->config_table[i].
9647 target_table_entry.vlan_id = 0;
9650 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9651 config->config_table[i].
9652 cam_entry.msb_mac_addr,
9653 config->config_table[i].
9654 cam_entry.middle_mac_addr,
9655 config->config_table[i].
9656 cam_entry.lsb_mac_addr);
9658 old = config->hdr.length_6b;
9660 for (; i < old; i++) {
9661 if (CAM_IS_INVALID(config->
9663 i--; /* already invalidated */
9667 CAM_INVALIDATE(config->
9672 if (CHIP_REV_IS_SLOW(bp))
9673 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9675 offset = BNX2X_MAX_MULTICAST*(1 + port);
9677 config->hdr.length_6b = i;
9678 config->hdr.offset = offset;
9679 config->hdr.client_id = BP_CL_ID(bp);
9680 config->hdr.reserved1 = 0;
9682 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9683 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9684 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9687 /* Accept one or more multicasts */
9688 struct dev_mc_list *mclist;
9689 u32 mc_filter[MC_HASH_SIZE];
9690 u32 crc, bit, regidx;
9693 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9695 for (i = 0, mclist = dev->mc_list;
9696 mclist && (i < dev->mc_count);
9697 i++, mclist = mclist->next) {
9699 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9700 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9701 mclist->dmi_addr[0], mclist->dmi_addr[1],
9702 mclist->dmi_addr[2], mclist->dmi_addr[3],
9703 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9705 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9706 bit = (crc >> 24) & 0xff;
9709 mc_filter[regidx] |= (1 << bit);
9712 for (i = 0; i < MC_HASH_SIZE; i++)
9713 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9718 bp->rx_mode = rx_mode;
9719 bnx2x_set_storm_rx_mode(bp);
9722 /* called with rtnl_lock */
9723 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9725 struct sockaddr *addr = p;
9726 struct bnx2x *bp = netdev_priv(dev);
9728 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9731 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9732 if (netif_running(dev)) {
9734 bnx2x_set_mac_addr_e1(bp);
9736 bnx2x_set_mac_addr_e1h(bp);
9742 /* called with rtnl_lock */
9743 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9745 struct mii_ioctl_data *data = if_mii(ifr);
9746 struct bnx2x *bp = netdev_priv(dev);
9751 data->phy_id = bp->port.phy_addr;
9758 if (!netif_running(dev))
9761 mutex_lock(&bp->port.phy_mutex);
9762 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9763 DEFAULT_PHY_DEV_ADDR,
9764 (data->reg_num & 0x1f), &mii_regval);
9765 data->val_out = mii_regval;
9766 mutex_unlock(&bp->port.phy_mutex);
9771 if (!capable(CAP_NET_ADMIN))
9774 if (!netif_running(dev))
9777 mutex_lock(&bp->port.phy_mutex);
9778 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9779 DEFAULT_PHY_DEV_ADDR,
9780 (data->reg_num & 0x1f), data->val_in);
9781 mutex_unlock(&bp->port.phy_mutex);
9792 /* called with rtnl_lock */
9793 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9795 struct bnx2x *bp = netdev_priv(dev);
9798 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9799 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9802 /* This does not race with packet allocation
9803 * because the actual alloc size is
9804 * only updated as part of load
9808 if (netif_running(dev)) {
9809 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9810 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9816 static void bnx2x_tx_timeout(struct net_device *dev)
9818 struct bnx2x *bp = netdev_priv(dev);
9820 #ifdef BNX2X_STOP_ON_ERROR
9824 /* This allows the netif to be shutdown gracefully before resetting */
9825 schedule_work(&bp->reset_task);
9829 /* called with rtnl_lock */
9830 static void bnx2x_vlan_rx_register(struct net_device *dev,
9831 struct vlan_group *vlgrp)
9833 struct bnx2x *bp = netdev_priv(dev);
9836 if (netif_running(dev))
9837 bnx2x_set_client_config(bp);
9842 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9843 static void poll_bnx2x(struct net_device *dev)
9845 struct bnx2x *bp = netdev_priv(dev);
9847 disable_irq(bp->pdev->irq);
9848 bnx2x_interrupt(bp->pdev->irq, dev);
9849 enable_irq(bp->pdev->irq);
9853 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9854 struct net_device *dev)
9859 SET_NETDEV_DEV(dev, &pdev->dev);
9860 bp = netdev_priv(dev);
9865 bp->func = PCI_FUNC(pdev->devfn);
9867 rc = pci_enable_device(pdev);
9869 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9873 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9874 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9877 goto err_out_disable;
9880 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9881 printk(KERN_ERR PFX "Cannot find second PCI device"
9882 " base address, aborting\n");
9884 goto err_out_disable;
9887 if (atomic_read(&pdev->enable_cnt) == 1) {
9888 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9890 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9892 goto err_out_disable;
9895 pci_set_master(pdev);
9896 pci_save_state(pdev);
9899 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9900 if (bp->pm_cap == 0) {
9901 printk(KERN_ERR PFX "Cannot find power management"
9902 " capability, aborting\n");
9904 goto err_out_release;
9907 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9908 if (bp->pcie_cap == 0) {
9909 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9912 goto err_out_release;
9915 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9916 bp->flags |= USING_DAC_FLAG;
9917 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9918 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9919 " failed, aborting\n");
9921 goto err_out_release;
9924 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9925 printk(KERN_ERR PFX "System does not support DMA,"
9928 goto err_out_release;
9931 dev->mem_start = pci_resource_start(pdev, 0);
9932 dev->base_addr = dev->mem_start;
9933 dev->mem_end = pci_resource_end(pdev, 0);
9935 dev->irq = pdev->irq;
9937 bp->regview = ioremap_nocache(dev->base_addr,
9938 pci_resource_len(pdev, 0));
9940 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9942 goto err_out_release;
9945 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9946 min_t(u64, BNX2X_DB_SIZE,
9947 pci_resource_len(pdev, 2)));
9948 if (!bp->doorbells) {
9949 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9954 bnx2x_set_power_state(bp, PCI_D0);
9956 /* clean indirect addresses */
9957 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9958 PCICFG_VENDOR_ID_OFFSET);
9959 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9960 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9961 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9962 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
9964 dev->hard_start_xmit = bnx2x_start_xmit;
9965 dev->watchdog_timeo = TX_TIMEOUT;
9967 dev->ethtool_ops = &bnx2x_ethtool_ops;
9968 dev->open = bnx2x_open;
9969 dev->stop = bnx2x_close;
9970 dev->set_multicast_list = bnx2x_set_rx_mode;
9971 dev->set_mac_address = bnx2x_change_mac_addr;
9972 dev->do_ioctl = bnx2x_ioctl;
9973 dev->change_mtu = bnx2x_change_mtu;
9974 dev->tx_timeout = bnx2x_tx_timeout;
9976 dev->vlan_rx_register = bnx2x_vlan_rx_register;
9978 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9979 dev->poll_controller = poll_bnx2x;
9981 dev->features |= NETIF_F_SG;
9982 dev->features |= NETIF_F_HW_CSUM;
9983 if (bp->flags & USING_DAC_FLAG)
9984 dev->features |= NETIF_F_HIGHDMA;
9986 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
9988 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9989 dev->features |= NETIF_F_TSO6;
9995 iounmap(bp->regview);
9998 if (bp->doorbells) {
9999 iounmap(bp->doorbells);
10000 bp->doorbells = NULL;
10004 if (atomic_read(&pdev->enable_cnt) == 1)
10005 pci_release_regions(pdev);
10008 pci_disable_device(pdev);
10009 pci_set_drvdata(pdev, NULL);
10015 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10017 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10019 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10023 /* return value of 1=2.5GHz 2=5GHz */
10024 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10026 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10028 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10032 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10033 const struct pci_device_id *ent)
10035 static int version_printed;
10036 struct net_device *dev = NULL;
10039 DECLARE_MAC_BUF(mac);
10041 if (version_printed++ == 0)
10042 printk(KERN_INFO "%s", version);
10044 /* dev zeroed in init_etherdev */
10045 dev = alloc_etherdev(sizeof(*bp));
10047 printk(KERN_ERR PFX "Cannot allocate net device\n");
10051 netif_carrier_off(dev);
10053 bp = netdev_priv(dev);
10054 bp->msglevel = debug;
10056 rc = bnx2x_init_dev(pdev, dev);
10062 rc = register_netdev(dev);
10064 dev_err(&pdev->dev, "Cannot register net device\n");
10065 goto init_one_exit;
10068 pci_set_drvdata(pdev, dev);
10070 rc = bnx2x_init_bp(bp);
10072 unregister_netdev(dev);
10073 goto init_one_exit;
10076 bp->common.name = board_info[ent->driver_data].name;
10077 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10078 " IRQ %d, ", dev->name, bp->common.name,
10079 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10080 bnx2x_get_pcie_width(bp),
10081 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10082 dev->base_addr, bp->pdev->irq);
10083 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10088 iounmap(bp->regview);
10091 iounmap(bp->doorbells);
10095 if (atomic_read(&pdev->enable_cnt) == 1)
10096 pci_release_regions(pdev);
10098 pci_disable_device(pdev);
10099 pci_set_drvdata(pdev, NULL);
10104 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10106 struct net_device *dev = pci_get_drvdata(pdev);
10110 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10113 bp = netdev_priv(dev);
10115 unregister_netdev(dev);
10118 iounmap(bp->regview);
10121 iounmap(bp->doorbells);
10125 if (atomic_read(&pdev->enable_cnt) == 1)
10126 pci_release_regions(pdev);
10128 pci_disable_device(pdev);
10129 pci_set_drvdata(pdev, NULL);
10132 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10134 struct net_device *dev = pci_get_drvdata(pdev);
10138 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10141 bp = netdev_priv(dev);
10145 pci_save_state(pdev);
10147 if (!netif_running(dev)) {
10152 netif_device_detach(dev);
10154 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10156 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10163 static int bnx2x_resume(struct pci_dev *pdev)
10165 struct net_device *dev = pci_get_drvdata(pdev);
10170 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10173 bp = netdev_priv(dev);
10177 pci_restore_state(pdev);
10179 if (!netif_running(dev)) {
10184 bnx2x_set_power_state(bp, PCI_D0);
10185 netif_device_attach(dev);
10187 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10195 * bnx2x_io_error_detected - called when PCI error is detected
10196 * @pdev: Pointer to PCI device
10197 * @state: The current pci connection state
10199 * This function is called after a PCI bus error affecting
10200 * this device has been detected.
10202 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10203 pci_channel_state_t state)
10205 struct net_device *dev = pci_get_drvdata(pdev);
10206 struct bnx2x *bp = netdev_priv(dev);
10210 netif_device_detach(dev);
10212 if (netif_running(dev))
10213 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10215 pci_disable_device(pdev);
10219 /* Request a slot reset */
10220 return PCI_ERS_RESULT_NEED_RESET;
10224 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10225 * @pdev: Pointer to PCI device
10227 * Restart the card from scratch, as if from a cold-boot.
10229 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10231 struct net_device *dev = pci_get_drvdata(pdev);
10232 struct bnx2x *bp = netdev_priv(dev);
10236 if (pci_enable_device(pdev)) {
10237 dev_err(&pdev->dev,
10238 "Cannot re-enable PCI device after reset\n");
10240 return PCI_ERS_RESULT_DISCONNECT;
10243 pci_set_master(pdev);
10244 pci_restore_state(pdev);
10246 if (netif_running(dev))
10247 bnx2x_set_power_state(bp, PCI_D0);
10251 return PCI_ERS_RESULT_RECOVERED;
10255 * bnx2x_io_resume - called when traffic can start flowing again
10256 * @pdev: Pointer to PCI device
10258 * This callback is called when the error recovery driver tells us that
10259 * its OK to resume normal operation.
10261 static void bnx2x_io_resume(struct pci_dev *pdev)
10263 struct net_device *dev = pci_get_drvdata(pdev);
10264 struct bnx2x *bp = netdev_priv(dev);
10268 if (netif_running(dev))
10269 bnx2x_nic_load(bp, LOAD_OPEN);
10271 netif_device_attach(dev);
10276 static struct pci_error_handlers bnx2x_err_handler = {
10277 .error_detected = bnx2x_io_error_detected,
10278 .slot_reset = bnx2x_io_slot_reset,
10279 .resume = bnx2x_io_resume,
10282 static struct pci_driver bnx2x_pci_driver = {
10283 .name = DRV_MODULE_NAME,
10284 .id_table = bnx2x_pci_tbl,
10285 .probe = bnx2x_init_one,
10286 .remove = __devexit_p(bnx2x_remove_one),
10287 .suspend = bnx2x_suspend,
10288 .resume = bnx2x_resume,
10289 .err_handler = &bnx2x_err_handler,
10292 static int __init bnx2x_init(void)
10294 return pci_register_driver(&bnx2x_pci_driver);
10297 static void __exit bnx2x_cleanup(void)
10299 pci_unregister_driver(&bnx2x_pci_driver);
10302 module_init(bnx2x_init);
10303 module_exit(bnx2x_cleanup);