1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.23"
61 #define DRV_MODULE_RELDATE "2008/11/03"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int disable_tpa;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
96 static struct workqueue_struct *bnx2x_wq;
98 enum bnx2x_board_type {
104 /* indexed by board_type, above */
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
131 * locking is done by mcp
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 struct dmae_command *dmae = &bp->init_dmae;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193 mutex_lock(&bp->dmae_mutex);
195 memset(dmae, 0, sizeof(struct dmae_command));
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 DMAE_CMD_ENDIANITY_DW_SWAP |
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_val = DMAE_COMP_VAL;
216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237 BNX2X_ERR("dmae timeout!\n");
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
248 mutex_unlock(&bp->dmae_mutex);
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 struct dmae_command *dmae = &bp->init_dmae;
254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 mutex_lock(&bp->dmae_mutex);
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 DMAE_CMD_ENDIANITY_DW_SWAP |
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_val = DMAE_COMP_VAL;
292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306 while (*wb_comp != DMAE_COMP_VAL) {
309 BNX2X_ERR("dmae timeout!\n");
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323 mutex_unlock(&bp->dmae_mutex);
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341 REG_RD_DMAE(bp, reg, wb_data, 2);
343 return HILO_U64(wb_data[0], wb_data[1]);
347 static int bnx2x_mc_assert(struct bnx2x *bp)
351 u32 row0, row1, row2, row3;
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
468 static void bnx2x_fw_dump(struct bnx2x *bp)
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 printk(KERN_CONT "%s", (char *)data);
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 printk(KERN_CONT "%s", (char *)data);
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
495 static void bnx2x_panic_dump(struct bnx2x *bp)
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503 BNX2X_ERR("begin crash dump -----------------\n");
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
584 BNX2X_ERR("end crash dump -----------------\n");
587 static void bnx2x_int_enable(struct bnx2x *bp)
589 int port = BP_PORT(bp);
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_INT_LINE_EN_0 |
602 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
604 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
605 val, port, addr, msix);
607 REG_WR(bp, addr, val);
609 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
613 val, port, addr, msix);
615 REG_WR(bp, addr, val);
617 if (CHIP_IS_E1H(bp)) {
618 /* init leading/trailing edge */
620 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622 /* enable nig attention */
627 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
632 static void bnx2x_int_disable(struct bnx2x *bp)
634 int port = BP_PORT(bp);
635 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636 u32 val = REG_RD(bp, addr);
638 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 REG_WR(bp, addr, val);
647 if (REG_RD(bp, addr) != val)
648 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
653 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656 /* disable interrupt handling */
657 atomic_inc(&bp->intr_sem);
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
662 /* make sure all ISRs are done */
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
670 synchronize_irq(bp->pdev->irq);
672 /* make sure sp_task is not running */
673 cancel_delayed_work(&bp->sp_task);
674 flush_workqueue(bnx2x_wq);
680 * General service functions
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684 u8 storm, u16 index, u8 op, u8 update)
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack;
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704 struct host_status_block *fpsb = fp->status_blk;
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
733 * fast path service functions
736 /* free skb in the packet ring at pos idx
737 * return idx of last bd freed
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
742 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743 struct eth_tx_bd *tx_bd;
744 struct sk_buff *skb = tx_buf->skb;
745 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
748 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
752 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753 tx_bd = &fp->tx_desc_ring[bd_idx];
754 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
757 nbd = le16_to_cpu(tx_bd->nbd) - 1;
758 new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760 if (nbd > (MAX_SKB_FRAGS + 2)) {
761 BNX2X_ERR("BAD nbd!\n");
766 /* Skip a parse bd and the TSO split header bd
767 since they have no mapping */
769 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
771 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772 ETH_TX_BD_FLAGS_TCP_CSUM |
773 ETH_TX_BD_FLAGS_SW_LSO)) {
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776 tx_bd = &fp->tx_desc_ring[bd_idx];
777 /* is this a TSO split header bd? */
778 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
787 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788 tx_bd = &fp->tx_desc_ring[bd_idx];
789 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
792 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 tx_buf->first_bd = 0;
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
810 barrier(); /* Tell compiler that prod and cons can change */
811 prod = fp->tx_bd_prod;
812 cons = fp->tx_bd_cons;
814 /* NUM_TX_RINGS = number of "next-page" entries
815 It will be used as a threshold */
816 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
818 #ifdef BNX2X_STOP_ON_ERROR
820 WARN_ON(used > fp->bp->tx_ring_size);
821 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
824 return (s16)(fp->bp->tx_ring_size) - used;
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
829 struct bnx2x *bp = fp->bp;
830 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
833 #ifdef BNX2X_STOP_ON_ERROR
834 if (unlikely(bp->panic))
838 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839 sw_cons = fp->tx_pkt_cons;
841 while (sw_cons != hw_cons) {
844 pkt_cons = TX_BD(sw_cons);
846 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
848 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
849 hw_cons, sw_cons, pkt_cons);
851 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
853 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
864 fp->tx_pkt_cons = sw_cons;
865 fp->tx_bd_cons = bd_cons;
867 /* Need to make the tx_cons update visible to start_xmit()
868 * before checking for netif_queue_stopped(). Without the
869 * memory barrier, there is a small possibility that start_xmit()
870 * will miss it and cause the queue to be stopped forever.
874 /* TBD need a thresh? */
875 if (unlikely(netif_queue_stopped(bp->dev))) {
877 netif_tx_lock(bp->dev);
879 if (netif_queue_stopped(bp->dev) &&
880 (bp->state == BNX2X_STATE_OPEN) &&
881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882 netif_wake_queue(bp->dev);
884 netif_tx_unlock(bp->dev);
889 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890 union eth_rx_cqe *rr_cqe)
892 struct bnx2x *bp = fp->bp;
893 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
897 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
898 FP_IDX(fp), cid, command, bp->state,
899 rr_cqe->ramrod_cqe.ramrod_type);
904 switch (command | fp->state) {
905 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906 BNX2X_FP_STATE_OPENING):
907 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
909 fp->state = BNX2X_FP_STATE_OPEN;
912 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
915 fp->state = BNX2X_FP_STATE_HALTED;
919 BNX2X_ERR("unexpected MC reply (%d) "
920 "fp->state is %x\n", command, fp->state);
923 mb(); /* force bnx2x_wait_ramrod() to see the change */
927 switch (command | bp->state) {
928 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930 bp->state = BNX2X_STATE_OPEN;
933 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936 fp->state = BNX2X_FP_STATE_HALTED;
939 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
940 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
941 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
946 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
947 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
948 bp->set_mac_pending = 0;
951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
952 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
956 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
960 mb(); /* force bnx2x_wait_ramrod() to see the change */
963 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964 struct bnx2x_fastpath *fp, u16 index)
966 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967 struct page *page = sw_buf->page;
968 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
970 /* Skip "next page" elements */
974 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976 __free_pages(page, PAGES_PER_SGE_SHIFT);
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984 struct bnx2x_fastpath *fp, int last)
988 for (i = 0; i < last; i++)
989 bnx2x_free_rx_sge(bp, fp, i);
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
995 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1000 if (unlikely(page == NULL))
1003 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1004 PCI_DMA_FROMDEVICE);
1005 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006 __free_pages(page, PAGES_PER_SGE_SHIFT);
1010 sw_buf->page = page;
1011 pci_unmap_addr_set(sw_buf, mapping, mapping);
1013 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020 struct bnx2x_fastpath *fp, u16 index)
1022 struct sk_buff *skb;
1023 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1027 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028 if (unlikely(skb == NULL))
1031 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1032 PCI_DMA_FROMDEVICE);
1033 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1039 pci_unmap_addr_set(rx_buf, mapping, mapping);
1041 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1047 /* note that we are not allocating a new skb,
1048 * we are just moving one from cons to prod
1049 * we are not creating a new mapping,
1050 * so there is no need to check for dma_mapping_error().
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053 struct sk_buff *skb, u16 cons, u16 prod)
1055 struct bnx2x *bp = fp->bp;
1056 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1061 pci_dma_sync_single_for_device(bp->pdev,
1062 pci_unmap_addr(cons_rx_buf, mapping),
1063 bp->rx_offset + RX_COPY_THRESH,
1064 PCI_DMA_FROMDEVICE);
1066 prod_rx_buf->skb = cons_rx_buf->skb;
1067 pci_unmap_addr_set(prod_rx_buf, mapping,
1068 pci_unmap_addr(cons_rx_buf, mapping));
1069 *prod_bd = *cons_bd;
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1075 u16 last_max = fp->last_max_sge;
1077 if (SUB_S16(idx, last_max) > 0)
1078 fp->last_max_sge = idx;
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1085 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086 int idx = RX_SGE_CNT * i - 1;
1088 for (j = 0; j < 2; j++) {
1089 SGE_MASK_CLEAR_BIT(fp, idx);
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096 struct eth_fast_path_rx_cqe *fp_cqe)
1098 struct bnx2x *bp = fp->bp;
1099 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100 le16_to_cpu(fp_cqe->len_on_bd)) >>
1102 u16 last_max, last_elem, first_elem;
1109 /* First mark all used pages */
1110 for (i = 0; i < sge_len; i++)
1111 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1113 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1116 /* Here we assume that the last SGE index is the biggest */
1117 prefetch((void *)(fp->sge_mask));
1118 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1120 last_max = RX_SGE(fp->last_max_sge);
1121 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1124 /* If ring is not full */
1125 if (last_elem + 1 != first_elem)
1128 /* Now update the prod */
1129 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130 if (likely(fp->sge_mask[i]))
1133 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134 delta += RX_SGE_MASK_ELEM_SZ;
1138 fp->rx_sge_prod += delta;
1139 /* clear page-end entries */
1140 bnx2x_clear_sge_mask_next_elems(fp);
1143 DP(NETIF_MSG_RX_STATUS,
1144 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1145 fp->last_max_sge, fp->rx_sge_prod);
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1150 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151 memset(fp->sge_mask, 0xff,
1152 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1154 /* Clear the two last indices in the page to 1:
1155 these are the indices that correspond to the "next" element,
1156 hence will never be indicated and should be removed from
1157 the calculations. */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162 struct sk_buff *skb, u16 cons, u16 prod)
1164 struct bnx2x *bp = fp->bp;
1165 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1170 /* move empty skb from pool to prod and map it */
1171 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1174 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1176 /* move partial skb from cons to pool (don't unmap yet) */
1177 fp->tpa_pool[queue] = *cons_rx_buf;
1179 /* mark bin state as start - print error if current state != stop */
1180 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1183 fp->tpa_state[queue] = BNX2X_TPA_START;
1185 /* point prod_bd to new skb */
1186 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1189 #ifdef BNX2X_STOP_ON_ERROR
1190 fp->tpa_queue_used |= (1 << queue);
1191 #ifdef __powerpc64__
1192 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1196 fp->tpa_queue_used);
1200 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201 struct sk_buff *skb,
1202 struct eth_fast_path_rx_cqe *fp_cqe,
1205 struct sw_rx_page *rx_pg, old_rx_pg;
1206 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1207 u32 i, frag_len, frag_size, pages;
1211 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1212 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1214 /* This is needed in order to enable forwarding support */
1216 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1217 max(frag_size, (u32)len_on_bd));
1219 #ifdef BNX2X_STOP_ON_ERROR
1221 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1222 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1224 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1225 fp_cqe->pkt_len, len_on_bd);
1231 /* Run through the SGL and compose the fragmented skb */
1232 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1235 /* FW gives the indices of the SGE as if the ring is an array
1236 (meaning that "next" element will consume 2 indices) */
1237 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1238 rx_pg = &fp->rx_page_ring[sge_idx];
1241 /* If we fail to allocate a substitute page, we simply stop
1242 where we are and drop the whole packet */
1243 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1244 if (unlikely(err)) {
1245 bp->eth_stats.rx_skb_alloc_failed++;
1249 /* Unmap the page as we r going to pass it to the stack */
1250 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1251 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253 /* Add one frag and update the appropriate fields in the skb */
1254 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256 skb->data_len += frag_len;
1257 skb->truesize += frag_len;
1258 skb->len += frag_len;
1260 frag_size -= frag_len;
1266 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1270 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1271 struct sk_buff *skb = rx_buf->skb;
1273 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275 /* Unmap skb in the pool anyway, as we are going to change
1276 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1278 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1281 if (likely(new_skb)) {
1282 /* fix ip xsum and give it to the stack */
1283 /* (no need to map the new skb) */
1286 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1287 PARSING_FLAGS_VLAN);
1288 int is_not_hwaccel_vlan_cqe =
1289 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1293 prefetch(((char *)(skb)) + 128);
1295 #ifdef BNX2X_STOP_ON_ERROR
1296 if (pad + len > bp->rx_buf_size) {
1297 BNX2X_ERR("skb_put is about to fail... "
1298 "pad %d len %d rx_buf_size %d\n",
1299 pad, len, bp->rx_buf_size);
1305 skb_reserve(skb, pad);
1308 skb->protocol = eth_type_trans(skb, bp->dev);
1309 skb->ip_summed = CHECKSUM_UNNECESSARY;
1314 iph = (struct iphdr *)skb->data;
1316 /* If there is no Rx VLAN offloading -
1317 take VLAN tag into an account */
1318 if (unlikely(is_not_hwaccel_vlan_cqe))
1319 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1322 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1325 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1326 &cqe->fast_path_cqe, cqe_idx)) {
1328 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1329 (!is_not_hwaccel_vlan_cqe))
1330 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1331 le16_to_cpu(cqe->fast_path_cqe.
1335 netif_receive_skb(skb);
1337 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1338 " - dropping packet!\n");
1343 /* put new skb in bin */
1344 fp->tpa_pool[queue].skb = new_skb;
1347 /* else drop the packet and keep the buffer in the bin */
1348 DP(NETIF_MSG_RX_STATUS,
1349 "Failed to allocate new skb - dropping packet!\n");
1350 bp->eth_stats.rx_skb_alloc_failed++;
1353 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1356 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1357 struct bnx2x_fastpath *fp,
1358 u16 bd_prod, u16 rx_comp_prod,
1361 struct tstorm_eth_rx_producers rx_prods = {0};
1364 /* Update producers */
1365 rx_prods.bd_prod = bd_prod;
1366 rx_prods.cqe_prod = rx_comp_prod;
1367 rx_prods.sge_prod = rx_sge_prod;
1370 * Make sure that the BD and SGE data is updated before updating the
1371 * producers since FW might read the BD/SGE right after the producer
1373 * This is only applicable for weak-ordered memory model archs such
1374 * as IA-64. The following barrier is also mandatory since FW will
1375 * assumes BDs must have buffers.
1379 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1380 REG_WR(bp, BAR_TSTRORM_INTMEM +
1381 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1382 ((u32 *)&rx_prods)[i]);
1384 mmiowb(); /* keep prod updates ordered */
1386 DP(NETIF_MSG_RX_STATUS,
1387 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1388 bd_prod, rx_comp_prod, rx_sge_prod);
1391 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1393 struct bnx2x *bp = fp->bp;
1394 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1395 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1398 #ifdef BNX2X_STOP_ON_ERROR
1399 if (unlikely(bp->panic))
1403 /* CQ "next element" is of the size of the regular element,
1404 that's why it's ok here */
1405 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1406 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1409 bd_cons = fp->rx_bd_cons;
1410 bd_prod = fp->rx_bd_prod;
1411 bd_prod_fw = bd_prod;
1412 sw_comp_cons = fp->rx_comp_cons;
1413 sw_comp_prod = fp->rx_comp_prod;
1415 /* Memory barrier necessary as speculative reads of the rx
1416 * buffer can be ahead of the index in the status block
1420 DP(NETIF_MSG_RX_STATUS,
1421 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1422 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1424 while (sw_comp_cons != hw_comp_cons) {
1425 struct sw_rx_bd *rx_buf = NULL;
1426 struct sk_buff *skb;
1427 union eth_rx_cqe *cqe;
1431 comp_ring_cons = RCQ_BD(sw_comp_cons);
1432 bd_prod = RX_BD(bd_prod);
1433 bd_cons = RX_BD(bd_cons);
1435 cqe = &fp->rx_comp_ring[comp_ring_cons];
1436 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1438 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1439 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1440 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1441 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1442 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1443 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1445 /* is this a slowpath msg? */
1446 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1447 bnx2x_sp_event(fp, cqe);
1450 /* this is an rx packet */
1452 rx_buf = &fp->rx_buf_ring[bd_cons];
1454 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1455 pad = cqe->fast_path_cqe.placement_offset;
1457 /* If CQE is marked both TPA_START and TPA_END
1458 it is a non-TPA CQE */
1459 if ((!fp->disable_tpa) &&
1460 (TPA_TYPE(cqe_fp_flags) !=
1461 (TPA_TYPE_START | TPA_TYPE_END))) {
1462 u16 queue = cqe->fast_path_cqe.queue_index;
1464 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1465 DP(NETIF_MSG_RX_STATUS,
1466 "calling tpa_start on queue %d\n",
1469 bnx2x_tpa_start(fp, queue, skb,
1474 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1475 DP(NETIF_MSG_RX_STATUS,
1476 "calling tpa_stop on queue %d\n",
1479 if (!BNX2X_RX_SUM_FIX(cqe))
1480 BNX2X_ERR("STOP on none TCP "
1483 /* This is a size of the linear data
1485 len = le16_to_cpu(cqe->fast_path_cqe.
1487 bnx2x_tpa_stop(bp, fp, queue, pad,
1488 len, cqe, comp_ring_cons);
1489 #ifdef BNX2X_STOP_ON_ERROR
1494 bnx2x_update_sge_prod(fp,
1495 &cqe->fast_path_cqe);
1500 pci_dma_sync_single_for_device(bp->pdev,
1501 pci_unmap_addr(rx_buf, mapping),
1502 pad + RX_COPY_THRESH,
1503 PCI_DMA_FROMDEVICE);
1505 prefetch(((char *)(skb)) + 128);
1507 /* is this an error packet? */
1508 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1509 DP(NETIF_MSG_RX_ERR,
1510 "ERROR flags %x rx packet %u\n",
1511 cqe_fp_flags, sw_comp_cons);
1512 bp->eth_stats.rx_err_discard_pkt++;
1516 /* Since we don't have a jumbo ring
1517 * copy small packets if mtu > 1500
1519 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1520 (len <= RX_COPY_THRESH)) {
1521 struct sk_buff *new_skb;
1523 new_skb = netdev_alloc_skb(bp->dev,
1525 if (new_skb == NULL) {
1526 DP(NETIF_MSG_RX_ERR,
1527 "ERROR packet dropped "
1528 "because of alloc failure\n");
1529 bp->eth_stats.rx_skb_alloc_failed++;
1534 skb_copy_from_linear_data_offset(skb, pad,
1535 new_skb->data + pad, len);
1536 skb_reserve(new_skb, pad);
1537 skb_put(new_skb, len);
1539 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1543 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1544 pci_unmap_single(bp->pdev,
1545 pci_unmap_addr(rx_buf, mapping),
1547 PCI_DMA_FROMDEVICE);
1548 skb_reserve(skb, pad);
1552 DP(NETIF_MSG_RX_ERR,
1553 "ERROR packet dropped because "
1554 "of alloc failure\n");
1555 bp->eth_stats.rx_skb_alloc_failed++;
1557 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1561 skb->protocol = eth_type_trans(skb, bp->dev);
1563 skb->ip_summed = CHECKSUM_NONE;
1565 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1566 skb->ip_summed = CHECKSUM_UNNECESSARY;
1568 bp->eth_stats.hw_csum_err++;
1573 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1574 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1575 PARSING_FLAGS_VLAN))
1576 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1577 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1580 netif_receive_skb(skb);
1586 bd_cons = NEXT_RX_IDX(bd_cons);
1587 bd_prod = NEXT_RX_IDX(bd_prod);
1588 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1591 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1592 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1594 if (rx_pkt == budget)
1598 fp->rx_bd_cons = bd_cons;
1599 fp->rx_bd_prod = bd_prod_fw;
1600 fp->rx_comp_cons = sw_comp_cons;
1601 fp->rx_comp_prod = sw_comp_prod;
1603 /* Update producers */
1604 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1607 fp->rx_pkt += rx_pkt;
1613 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1615 struct bnx2x_fastpath *fp = fp_cookie;
1616 struct bnx2x *bp = fp->bp;
1617 int index = FP_IDX(fp);
1619 /* Return here if interrupt is disabled */
1620 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1621 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1625 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1626 index, FP_SB_ID(fp));
1627 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1629 #ifdef BNX2X_STOP_ON_ERROR
1630 if (unlikely(bp->panic))
1634 prefetch(fp->rx_cons_sb);
1635 prefetch(fp->tx_cons_sb);
1636 prefetch(&fp->status_blk->c_status_block.status_block_index);
1637 prefetch(&fp->status_blk->u_status_block.status_block_index);
1639 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1644 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1646 struct net_device *dev = dev_instance;
1647 struct bnx2x *bp = netdev_priv(dev);
1648 u16 status = bnx2x_ack_int(bp);
1651 /* Return here if interrupt is shared and it's not for us */
1652 if (unlikely(status == 0)) {
1653 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1656 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1658 /* Return here if interrupt is disabled */
1659 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1660 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1664 #ifdef BNX2X_STOP_ON_ERROR
1665 if (unlikely(bp->panic))
1669 mask = 0x2 << bp->fp[0].sb_id;
1670 if (status & mask) {
1671 struct bnx2x_fastpath *fp = &bp->fp[0];
1673 prefetch(fp->rx_cons_sb);
1674 prefetch(fp->tx_cons_sb);
1675 prefetch(&fp->status_blk->c_status_block.status_block_index);
1676 prefetch(&fp->status_blk->u_status_block.status_block_index);
1678 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1684 if (unlikely(status & 0x1)) {
1685 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1693 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1699 /* end of fast path */
1701 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1706 * General service functions
1709 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1712 u32 resource_bit = (1 << resource);
1713 int func = BP_FUNC(bp);
1714 u32 hw_lock_control_reg;
1717 /* Validating that the resource is within range */
1718 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1720 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1721 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1726 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1728 hw_lock_control_reg =
1729 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1732 /* Validating that the resource is not already taken */
1733 lock_status = REG_RD(bp, hw_lock_control_reg);
1734 if (lock_status & resource_bit) {
1735 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1736 lock_status, resource_bit);
1740 /* Try for 5 second every 5ms */
1741 for (cnt = 0; cnt < 1000; cnt++) {
1742 /* Try to acquire the lock */
1743 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1744 lock_status = REG_RD(bp, hw_lock_control_reg);
1745 if (lock_status & resource_bit)
1750 DP(NETIF_MSG_HW, "Timeout\n");
1754 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1757 u32 resource_bit = (1 << resource);
1758 int func = BP_FUNC(bp);
1759 u32 hw_lock_control_reg;
1761 /* Validating that the resource is within range */
1762 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1764 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1765 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1770 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1772 hw_lock_control_reg =
1773 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1776 /* Validating that the resource is currently taken */
1777 lock_status = REG_RD(bp, hw_lock_control_reg);
1778 if (!(lock_status & resource_bit)) {
1779 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1780 lock_status, resource_bit);
1784 REG_WR(bp, hw_lock_control_reg, resource_bit);
1788 /* HW Lock for shared dual port PHYs */
1789 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1791 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1793 mutex_lock(&bp->port.phy_mutex);
1795 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1796 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1797 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1800 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1802 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1804 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1805 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1806 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1808 mutex_unlock(&bp->port.phy_mutex);
1811 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1813 /* The GPIO should be swapped if swap register is set and active */
1814 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1815 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1816 int gpio_shift = gpio_num +
1817 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1818 u32 gpio_mask = (1 << gpio_shift);
1821 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1822 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1827 /* read GPIO and mask except the float bits */
1828 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1831 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1832 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1833 gpio_num, gpio_shift);
1834 /* clear FLOAT and set CLR */
1835 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1836 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1839 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1840 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1841 gpio_num, gpio_shift);
1842 /* clear FLOAT and set SET */
1843 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1844 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1847 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1848 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1849 gpio_num, gpio_shift);
1851 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1858 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1859 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1864 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1866 u32 spio_mask = (1 << spio_num);
1869 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1870 (spio_num > MISC_REGISTERS_SPIO_7)) {
1871 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1875 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1876 /* read SPIO and mask except the float bits */
1877 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1880 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1881 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1882 /* clear FLOAT and set CLR */
1883 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1884 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1887 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1888 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1889 /* clear FLOAT and set SET */
1890 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1891 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1894 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1895 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1897 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1904 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1905 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1910 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1912 switch (bp->link_vars.ieee_fc &
1913 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1914 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1915 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1918 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1919 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1922 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1923 bp->port.advertising |= ADVERTISED_Asym_Pause;
1926 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1932 static void bnx2x_link_report(struct bnx2x *bp)
1934 if (bp->link_vars.link_up) {
1935 if (bp->state == BNX2X_STATE_OPEN)
1936 netif_carrier_on(bp->dev);
1937 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1939 printk("%d Mbps ", bp->link_vars.line_speed);
1941 if (bp->link_vars.duplex == DUPLEX_FULL)
1942 printk("full duplex");
1944 printk("half duplex");
1946 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1947 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1948 printk(", receive ");
1949 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1950 printk("& transmit ");
1952 printk(", transmit ");
1954 printk("flow control ON");
1958 } else { /* link_down */
1959 netif_carrier_off(bp->dev);
1960 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1964 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1966 if (!BP_NOMCP(bp)) {
1969 /* Initialize link parameters structure variables */
1970 /* It is recommended to turn off RX FC for jumbo frames
1971 for better performance */
1973 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1974 else if (bp->dev->mtu > 5000)
1975 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1977 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1979 bnx2x_acquire_phy_lock(bp);
1980 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1981 bnx2x_release_phy_lock(bp);
1983 bnx2x_calc_fc_adv(bp);
1985 if (bp->link_vars.link_up)
1986 bnx2x_link_report(bp);
1991 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1995 static void bnx2x_link_set(struct bnx2x *bp)
1997 if (!BP_NOMCP(bp)) {
1998 bnx2x_acquire_phy_lock(bp);
1999 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2000 bnx2x_release_phy_lock(bp);
2002 bnx2x_calc_fc_adv(bp);
2004 BNX2X_ERR("Bootcode is missing -not setting link\n");
2007 static void bnx2x__link_reset(struct bnx2x *bp)
2009 if (!BP_NOMCP(bp)) {
2010 bnx2x_acquire_phy_lock(bp);
2011 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2012 bnx2x_release_phy_lock(bp);
2014 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2017 static u8 bnx2x_link_test(struct bnx2x *bp)
2021 bnx2x_acquire_phy_lock(bp);
2022 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2023 bnx2x_release_phy_lock(bp);
2028 /* Calculates the sum of vn_min_rates.
2029 It's needed for further normalizing of the min_rates.
2034 0 - if all the min_rates are 0.
2035 In the later case fairness algorithm should be deactivated.
2036 If not all min_rates are zero then those that are zeroes will
2039 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2041 int i, port = BP_PORT(bp);
2045 for (i = 0; i < E1HVN_MAX; i++) {
2047 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2048 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2049 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2050 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2051 /* If min rate is zero - set it to 1 */
2053 vn_min_rate = DEF_MIN_RATE;
2057 wsum += vn_min_rate;
2061 /* ... only if all min rates are zeros - disable FAIRNESS */
2068 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2071 struct cmng_struct_per_port *m_cmng_port)
2073 u32 r_param = port_rate / 8;
2074 int port = BP_PORT(bp);
2077 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2079 /* Enable minmax only if we are in e1hmf mode */
2081 u32 fair_periodic_timeout_usec;
2084 /* Enable rate shaping and fairness */
2085 m_cmng_port->flags.cmng_vn_enable = 1;
2086 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2087 m_cmng_port->flags.rate_shaping_enable = 1;
2090 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2091 " fairness will be disabled\n");
2093 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2094 m_cmng_port->rs_vars.rs_periodic_timeout =
2095 RS_PERIODIC_TIMEOUT_USEC / 4;
2097 /* this is the threshold below which no timer arming will occur
2098 1.25 coefficient is for the threshold to be a little bigger
2099 than the real time, to compensate for timer in-accuracy */
2100 m_cmng_port->rs_vars.rs_threshold =
2101 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2103 /* resolution of fairness timer */
2104 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2105 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2106 t_fair = T_FAIR_COEF / port_rate;
2108 /* this is the threshold below which we won't arm
2109 the timer anymore */
2110 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2112 /* we multiply by 1e3/8 to get bytes/msec.
2113 We don't want the credits to pass a credit
2114 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2115 m_cmng_port->fair_vars.upper_bound =
2116 r_param * t_fair * FAIR_MEM;
2117 /* since each tick is 4 usec */
2118 m_cmng_port->fair_vars.fairness_timeout =
2119 fair_periodic_timeout_usec / 4;
2122 /* Disable rate shaping and fairness */
2123 m_cmng_port->flags.cmng_vn_enable = 0;
2124 m_cmng_port->flags.fairness_enable = 0;
2125 m_cmng_port->flags.rate_shaping_enable = 0;
2128 "Single function mode minmax will be disabled\n");
2131 /* Store it to internal memory */
2132 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2133 REG_WR(bp, BAR_XSTRORM_INTMEM +
2134 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2135 ((u32 *)(m_cmng_port))[i]);
2138 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2139 u32 wsum, u16 port_rate,
2140 struct cmng_struct_per_port *m_cmng_port)
2142 struct rate_shaping_vars_per_vn m_rs_vn;
2143 struct fairness_vars_per_vn m_fair_vn;
2144 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2145 u16 vn_min_rate, vn_max_rate;
2148 /* If function is hidden - set min and max to zeroes */
2149 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2154 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2155 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2156 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2157 if current min rate is zero - set it to 1.
2158 This is a requirement of the algorithm. */
2159 if ((vn_min_rate == 0) && wsum)
2160 vn_min_rate = DEF_MIN_RATE;
2161 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2162 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2165 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2166 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2168 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2169 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2171 /* global vn counter - maximal Mbps for this vn */
2172 m_rs_vn.vn_counter.rate = vn_max_rate;
2174 /* quota - number of bytes transmitted in this period */
2175 m_rs_vn.vn_counter.quota =
2176 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2178 #ifdef BNX2X_PER_PROT_QOS
2179 /* per protocol counter */
2180 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2181 /* maximal Mbps for this protocol */
2182 m_rs_vn.protocol_counters[protocol].rate =
2183 protocol_max_rate[protocol];
2184 /* the quota in each timer period -
2185 number of bytes transmitted in this period */
2186 m_rs_vn.protocol_counters[protocol].quota =
2187 (u32)(rs_periodic_timeout_usec *
2189 protocol_counters[protocol].rate/8));
2194 /* credit for each period of the fairness algorithm:
2195 number of bytes in T_FAIR (the vn share the port rate).
2196 wsum should not be larger than 10000, thus
2197 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2198 m_fair_vn.vn_credit_delta =
2199 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2200 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2201 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2202 m_fair_vn.vn_credit_delta);
2205 #ifdef BNX2X_PER_PROT_QOS
2207 u32 protocolWeightSum = 0;
2209 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2210 protocolWeightSum +=
2211 drvInit.protocol_min_rate[protocol];
2212 /* per protocol counter -
2213 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2214 if (protocolWeightSum > 0) {
2216 protocol < NUM_OF_PROTOCOLS; protocol++)
2217 /* credit for each period of the
2218 fairness algorithm - number of bytes in
2219 T_FAIR (the protocol share the vn rate) */
2220 m_fair_vn.protocol_credit_delta[protocol] =
2221 (u32)((vn_min_rate / 8) * t_fair *
2222 protocol_min_rate / protocolWeightSum);
2227 /* Store it to internal memory */
2228 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2229 REG_WR(bp, BAR_XSTRORM_INTMEM +
2230 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2231 ((u32 *)(&m_rs_vn))[i]);
2233 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2234 REG_WR(bp, BAR_XSTRORM_INTMEM +
2235 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2236 ((u32 *)(&m_fair_vn))[i]);
2239 /* This function is called upon link interrupt */
2240 static void bnx2x_link_attn(struct bnx2x *bp)
2244 /* Make sure that we are synced with the current statistics */
2245 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2247 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2249 if (bp->link_vars.link_up) {
2251 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2252 struct host_port_stats *pstats;
2254 pstats = bnx2x_sp(bp, port_stats);
2255 /* reset old bmac stats */
2256 memset(&(pstats->mac_stx[0]), 0,
2257 sizeof(struct mac_stx));
2259 if ((bp->state == BNX2X_STATE_OPEN) ||
2260 (bp->state == BNX2X_STATE_DISABLED))
2261 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2264 /* indicate link status */
2265 bnx2x_link_report(bp);
2270 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2271 if (vn == BP_E1HVN(bp))
2274 func = ((vn << 1) | BP_PORT(bp));
2276 /* Set the attention towards other drivers
2278 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2279 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2283 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2284 struct cmng_struct_per_port m_cmng_port;
2286 int port = BP_PORT(bp);
2288 /* Init RATE SHAPING and FAIRNESS contexts */
2289 wsum = bnx2x_calc_vn_wsum(bp);
2290 bnx2x_init_port_minmax(bp, (int)wsum,
2291 bp->link_vars.line_speed,
2294 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2295 bnx2x_init_vn_minmax(bp, 2*vn + port,
2296 wsum, bp->link_vars.line_speed,
2301 static void bnx2x__link_status_update(struct bnx2x *bp)
2303 if (bp->state != BNX2X_STATE_OPEN)
2306 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2308 if (bp->link_vars.link_up)
2309 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2311 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2313 /* indicate link status */
2314 bnx2x_link_report(bp);
2317 static void bnx2x_pmf_update(struct bnx2x *bp)
2319 int port = BP_PORT(bp);
2323 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2325 /* enable nig attention */
2326 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2327 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2328 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2330 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2338 * General service functions
2341 /* the slow path queue is odd since completions arrive on the fastpath ring */
2342 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2343 u32 data_hi, u32 data_lo, int common)
2345 int func = BP_FUNC(bp);
2347 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2348 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2349 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2350 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2351 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2353 #ifdef BNX2X_STOP_ON_ERROR
2354 if (unlikely(bp->panic))
2358 spin_lock_bh(&bp->spq_lock);
2360 if (!bp->spq_left) {
2361 BNX2X_ERR("BUG! SPQ ring full!\n");
2362 spin_unlock_bh(&bp->spq_lock);
2367 /* CID needs port number to be encoded int it */
2368 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2369 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2371 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2373 bp->spq_prod_bd->hdr.type |=
2374 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2376 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2377 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2381 if (bp->spq_prod_bd == bp->spq_last_bd) {
2382 bp->spq_prod_bd = bp->spq;
2383 bp->spq_prod_idx = 0;
2384 DP(NETIF_MSG_TIMER, "end of spq\n");
2391 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2394 spin_unlock_bh(&bp->spq_lock);
2398 /* acquire split MCP access lock register */
2399 static int bnx2x_acquire_alr(struct bnx2x *bp)
2406 for (j = 0; j < i*10; j++) {
2408 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2410 if (val & (1L << 31))
2415 if (!(val & (1L << 31))) {
2416 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2423 /* release split MCP access lock register */
2424 static void bnx2x_release_alr(struct bnx2x *bp)
2428 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2431 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2433 struct host_def_status_block *def_sb = bp->def_status_blk;
2436 barrier(); /* status block is written to by the chip */
2437 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2438 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2441 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2442 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2445 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2446 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2449 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2450 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2453 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2454 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2461 * slow path service functions
2464 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2466 int port = BP_PORT(bp);
2467 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2468 COMMAND_REG_ATTN_BITS_SET);
2469 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2470 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2471 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2472 NIG_REG_MASK_INTERRUPT_PORT0;
2475 if (bp->attn_state & asserted)
2476 BNX2X_ERR("IGU ERROR\n");
2478 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2479 aeu_mask = REG_RD(bp, aeu_addr);
2481 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2482 aeu_mask, asserted);
2483 aeu_mask &= ~(asserted & 0xff);
2484 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2486 REG_WR(bp, aeu_addr, aeu_mask);
2487 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2489 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2490 bp->attn_state |= asserted;
2491 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2493 if (asserted & ATTN_HARD_WIRED_MASK) {
2494 if (asserted & ATTN_NIG_FOR_FUNC) {
2496 bnx2x_acquire_phy_lock(bp);
2498 /* save nig interrupt mask */
2499 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2500 REG_WR(bp, nig_int_mask_addr, 0);
2502 bnx2x_link_attn(bp);
2504 /* handle unicore attn? */
2506 if (asserted & ATTN_SW_TIMER_4_FUNC)
2507 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2509 if (asserted & GPIO_2_FUNC)
2510 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2512 if (asserted & GPIO_3_FUNC)
2513 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2515 if (asserted & GPIO_4_FUNC)
2516 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2519 if (asserted & ATTN_GENERAL_ATTN_1) {
2520 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2521 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2523 if (asserted & ATTN_GENERAL_ATTN_2) {
2524 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2525 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2527 if (asserted & ATTN_GENERAL_ATTN_3) {
2528 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2529 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2532 if (asserted & ATTN_GENERAL_ATTN_4) {
2533 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2534 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2536 if (asserted & ATTN_GENERAL_ATTN_5) {
2537 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2538 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2540 if (asserted & ATTN_GENERAL_ATTN_6) {
2541 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2542 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2546 } /* if hardwired */
2548 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2550 REG_WR(bp, hc_addr, asserted);
2552 /* now set back the mask */
2553 if (asserted & ATTN_NIG_FOR_FUNC) {
2554 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2555 bnx2x_release_phy_lock(bp);
2559 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2561 int port = BP_PORT(bp);
2565 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2566 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2568 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2570 val = REG_RD(bp, reg_offset);
2571 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2572 REG_WR(bp, reg_offset, val);
2574 BNX2X_ERR("SPIO5 hw attention\n");
2576 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2577 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2578 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2579 /* Fan failure attention */
2581 /* The PHY reset is controlled by GPIO 1 */
2582 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2583 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2584 /* Low power mode is controlled by GPIO 2 */
2585 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2586 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2587 /* mark the failure */
2588 bp->link_params.ext_phy_config &=
2589 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2590 bp->link_params.ext_phy_config |=
2591 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2593 dev_info.port_hw_config[port].
2594 external_phy_config,
2595 bp->link_params.ext_phy_config);
2596 /* log the failure */
2597 printk(KERN_ERR PFX "Fan Failure on Network"
2598 " Controller %s has caused the driver to"
2599 " shutdown the card to prevent permanent"
2600 " damage. Please contact Dell Support for"
2601 " assistance\n", bp->dev->name);
2609 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2611 val = REG_RD(bp, reg_offset);
2612 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2613 REG_WR(bp, reg_offset, val);
2615 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2616 (attn & HW_INTERRUT_ASSERT_SET_0));
2621 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2625 if (attn & BNX2X_DOORQ_ASSERT) {
2627 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2628 BNX2X_ERR("DB hw attention 0x%x\n", val);
2629 /* DORQ discard attention */
2631 BNX2X_ERR("FATAL error from DORQ\n");
2634 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2636 int port = BP_PORT(bp);
2639 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2640 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2642 val = REG_RD(bp, reg_offset);
2643 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2644 REG_WR(bp, reg_offset, val);
2646 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2647 (attn & HW_INTERRUT_ASSERT_SET_1));
2652 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2656 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2658 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2659 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2660 /* CFC error attention */
2662 BNX2X_ERR("FATAL error from CFC\n");
2665 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2667 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2668 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2669 /* RQ_USDMDP_FIFO_OVERFLOW */
2671 BNX2X_ERR("FATAL error from PXP\n");
2674 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2676 int port = BP_PORT(bp);
2679 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2680 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2682 val = REG_RD(bp, reg_offset);
2683 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2684 REG_WR(bp, reg_offset, val);
2686 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2687 (attn & HW_INTERRUT_ASSERT_SET_2));
2692 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2696 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2698 if (attn & BNX2X_PMF_LINK_ASSERT) {
2699 int func = BP_FUNC(bp);
2701 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2702 bnx2x__link_status_update(bp);
2703 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2705 bnx2x_pmf_update(bp);
2707 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2709 BNX2X_ERR("MC assert!\n");
2710 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2711 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2712 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2713 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2716 } else if (attn & BNX2X_MCP_ASSERT) {
2718 BNX2X_ERR("MCP assert!\n");
2719 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2723 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2726 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2727 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2728 if (attn & BNX2X_GRC_TIMEOUT) {
2729 val = CHIP_IS_E1H(bp) ?
2730 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2731 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2733 if (attn & BNX2X_GRC_RSV) {
2734 val = CHIP_IS_E1H(bp) ?
2735 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2736 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2738 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2742 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2744 struct attn_route attn;
2745 struct attn_route group_mask;
2746 int port = BP_PORT(bp);
2752 /* need to take HW lock because MCP or other port might also
2753 try to handle this event */
2754 bnx2x_acquire_alr(bp);
2756 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2757 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2758 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2759 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2760 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2761 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2763 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2764 if (deasserted & (1 << index)) {
2765 group_mask = bp->attn_group[index];
2767 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2768 index, group_mask.sig[0], group_mask.sig[1],
2769 group_mask.sig[2], group_mask.sig[3]);
2771 bnx2x_attn_int_deasserted3(bp,
2772 attn.sig[3] & group_mask.sig[3]);
2773 bnx2x_attn_int_deasserted1(bp,
2774 attn.sig[1] & group_mask.sig[1]);
2775 bnx2x_attn_int_deasserted2(bp,
2776 attn.sig[2] & group_mask.sig[2]);
2777 bnx2x_attn_int_deasserted0(bp,
2778 attn.sig[0] & group_mask.sig[0]);
2780 if ((attn.sig[0] & group_mask.sig[0] &
2781 HW_PRTY_ASSERT_SET_0) ||
2782 (attn.sig[1] & group_mask.sig[1] &
2783 HW_PRTY_ASSERT_SET_1) ||
2784 (attn.sig[2] & group_mask.sig[2] &
2785 HW_PRTY_ASSERT_SET_2))
2786 BNX2X_ERR("FATAL HW block parity attention\n");
2790 bnx2x_release_alr(bp);
2792 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2795 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2797 REG_WR(bp, reg_addr, val);
2799 if (~bp->attn_state & deasserted)
2800 BNX2X_ERR("IGU ERROR\n");
2802 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2803 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2805 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2806 aeu_mask = REG_RD(bp, reg_addr);
2808 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2809 aeu_mask, deasserted);
2810 aeu_mask |= (deasserted & 0xff);
2811 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2813 REG_WR(bp, reg_addr, aeu_mask);
2814 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2816 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2817 bp->attn_state &= ~deasserted;
2818 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2821 static void bnx2x_attn_int(struct bnx2x *bp)
2823 /* read local copy of bits */
2824 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2826 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2828 u32 attn_state = bp->attn_state;
2830 /* look for changed bits */
2831 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2832 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2835 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2836 attn_bits, attn_ack, asserted, deasserted);
2838 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2839 BNX2X_ERR("BAD attention state\n");
2841 /* handle bits that were raised */
2843 bnx2x_attn_int_asserted(bp, asserted);
2846 bnx2x_attn_int_deasserted(bp, deasserted);
2849 static void bnx2x_sp_task(struct work_struct *work)
2851 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2855 /* Return here if interrupt is disabled */
2856 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2857 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2861 status = bnx2x_update_dsb_idx(bp);
2862 /* if (status == 0) */
2863 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2865 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2871 /* CStorm events: query_stats, port delete ramrod */
2873 bp->stats_pending = 0;
2875 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2877 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2879 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2881 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2883 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2888 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2890 struct net_device *dev = dev_instance;
2891 struct bnx2x *bp = netdev_priv(dev);
2893 /* Return here if interrupt is disabled */
2894 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2895 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2899 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2901 #ifdef BNX2X_STOP_ON_ERROR
2902 if (unlikely(bp->panic))
2906 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2911 /* end of slow path */
2915 /****************************************************************************
2917 ****************************************************************************/
2919 /* sum[hi:lo] += add[hi:lo] */
2920 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2923 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2926 /* difference = minuend - subtrahend */
2927 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2929 if (m_lo < s_lo) { \
2931 d_hi = m_hi - s_hi; \
2933 /* we can 'loan' 1 */ \
2935 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2937 /* m_hi <= s_hi */ \
2942 /* m_lo >= s_lo */ \
2943 if (m_hi < s_hi) { \
2947 /* m_hi >= s_hi */ \
2948 d_hi = m_hi - s_hi; \
2949 d_lo = m_lo - s_lo; \
2954 #define UPDATE_STAT64(s, t) \
2956 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2957 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2958 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2959 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2960 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2961 pstats->mac_stx[1].t##_lo, diff.lo); \
2964 #define UPDATE_STAT64_NIG(s, t) \
2966 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2967 diff.lo, new->s##_lo, old->s##_lo); \
2968 ADD_64(estats->t##_hi, diff.hi, \
2969 estats->t##_lo, diff.lo); \
2972 /* sum[hi:lo] += add */
2973 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2976 s_hi += (s_lo < a) ? 1 : 0; \
2979 #define UPDATE_EXTEND_STAT(s) \
2981 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2982 pstats->mac_stx[1].s##_lo, \
2986 #define UPDATE_EXTEND_TSTAT(s, t) \
2988 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2989 old_tclient->s = le32_to_cpu(tclient->s); \
2990 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2993 #define UPDATE_EXTEND_XSTAT(s, t) \
2995 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2996 old_xclient->s = le32_to_cpu(xclient->s); \
2997 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3001 * General service functions
3004 static inline long bnx2x_hilo(u32 *hiref)
3006 u32 lo = *(hiref + 1);
3007 #if (BITS_PER_LONG == 64)
3010 return HILO_U64(hi, lo);
3017 * Init service functions
3020 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3022 if (!bp->stats_pending) {
3023 struct eth_query_ramrod_data ramrod_data = {0};
3026 ramrod_data.drv_counter = bp->stats_counter++;
3027 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3028 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3030 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3031 ((u32 *)&ramrod_data)[1],
3032 ((u32 *)&ramrod_data)[0], 0);
3034 /* stats ramrod has it's own slot on the spq */
3036 bp->stats_pending = 1;
3041 static void bnx2x_stats_init(struct bnx2x *bp)
3043 int port = BP_PORT(bp);
3045 bp->executer_idx = 0;
3046 bp->stats_counter = 0;
3050 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3052 bp->port.port_stx = 0;
3053 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3055 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3056 bp->port.old_nig_stats.brb_discard =
3057 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3058 bp->port.old_nig_stats.brb_truncate =
3059 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3060 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3061 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3062 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3063 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3065 /* function stats */
3066 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3067 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3068 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3069 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3071 bp->stats_state = STATS_STATE_DISABLED;
3072 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3073 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3076 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3078 struct dmae_command *dmae = &bp->stats_dmae;
3079 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3081 *stats_comp = DMAE_COMP_VAL;
3084 if (bp->executer_idx) {
3085 int loader_idx = PMF_DMAE_C(bp);
3087 memset(dmae, 0, sizeof(struct dmae_command));
3089 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3090 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3091 DMAE_CMD_DST_RESET |
3093 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3095 DMAE_CMD_ENDIANITY_DW_SWAP |
3097 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3099 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3100 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3101 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3102 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3103 sizeof(struct dmae_command) *
3104 (loader_idx + 1)) >> 2;
3105 dmae->dst_addr_hi = 0;
3106 dmae->len = sizeof(struct dmae_command) >> 2;
3109 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3110 dmae->comp_addr_hi = 0;
3114 bnx2x_post_dmae(bp, dmae, loader_idx);
3116 } else if (bp->func_stx) {
3118 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3122 static int bnx2x_stats_comp(struct bnx2x *bp)
3124 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3128 while (*stats_comp != DMAE_COMP_VAL) {
3130 BNX2X_ERR("timeout waiting for stats finished\n");
3140 * Statistics service functions
3143 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3145 struct dmae_command *dmae;
3147 int loader_idx = PMF_DMAE_C(bp);
3148 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3151 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3152 BNX2X_ERR("BUG!\n");
3156 bp->executer_idx = 0;
3158 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3160 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3162 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3164 DMAE_CMD_ENDIANITY_DW_SWAP |
3166 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3167 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3169 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3170 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3171 dmae->src_addr_lo = bp->port.port_stx >> 2;
3172 dmae->src_addr_hi = 0;
3173 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3174 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3175 dmae->len = DMAE_LEN32_RD_MAX;
3176 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3177 dmae->comp_addr_hi = 0;
3180 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3182 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3183 dmae->src_addr_hi = 0;
3184 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3185 DMAE_LEN32_RD_MAX * 4);
3186 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3187 DMAE_LEN32_RD_MAX * 4);
3188 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3189 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3190 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3191 dmae->comp_val = DMAE_COMP_VAL;
3194 bnx2x_hw_stats_post(bp);
3195 bnx2x_stats_comp(bp);
3198 static void bnx2x_port_stats_init(struct bnx2x *bp)
3200 struct dmae_command *dmae;
3201 int port = BP_PORT(bp);
3202 int vn = BP_E1HVN(bp);
3204 int loader_idx = PMF_DMAE_C(bp);
3206 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3209 if (!bp->link_vars.link_up || !bp->port.pmf) {
3210 BNX2X_ERR("BUG!\n");
3214 bp->executer_idx = 0;
3217 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3218 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3219 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3221 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3223 DMAE_CMD_ENDIANITY_DW_SWAP |
3225 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3226 (vn << DMAE_CMD_E1HVN_SHIFT));
3228 if (bp->port.port_stx) {
3230 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3231 dmae->opcode = opcode;
3232 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3233 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3234 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3235 dmae->dst_addr_hi = 0;
3236 dmae->len = sizeof(struct host_port_stats) >> 2;
3237 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3238 dmae->comp_addr_hi = 0;
3244 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3245 dmae->opcode = opcode;
3246 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3247 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3248 dmae->dst_addr_lo = bp->func_stx >> 2;
3249 dmae->dst_addr_hi = 0;
3250 dmae->len = sizeof(struct host_func_stats) >> 2;
3251 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252 dmae->comp_addr_hi = 0;
3257 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3258 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3259 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3261 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3263 DMAE_CMD_ENDIANITY_DW_SWAP |
3265 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3266 (vn << DMAE_CMD_E1HVN_SHIFT));
3268 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3270 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3271 NIG_REG_INGRESS_BMAC0_MEM);
3273 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3274 BIGMAC_REGISTER_TX_STAT_GTBYT */
3275 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3276 dmae->opcode = opcode;
3277 dmae->src_addr_lo = (mac_addr +
3278 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3279 dmae->src_addr_hi = 0;
3280 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3281 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3282 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3283 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3284 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285 dmae->comp_addr_hi = 0;
3288 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3289 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3290 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3291 dmae->opcode = opcode;
3292 dmae->src_addr_lo = (mac_addr +
3293 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3294 dmae->src_addr_hi = 0;
3295 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3296 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3298 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3299 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3300 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3301 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302 dmae->comp_addr_hi = 0;
3305 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3307 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3309 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3310 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3311 dmae->opcode = opcode;
3312 dmae->src_addr_lo = (mac_addr +
3313 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3314 dmae->src_addr_hi = 0;
3315 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3316 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3317 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3318 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3319 dmae->comp_addr_hi = 0;
3322 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3323 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324 dmae->opcode = opcode;
3325 dmae->src_addr_lo = (mac_addr +
3326 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3327 dmae->src_addr_hi = 0;
3328 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3329 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3330 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3331 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3333 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3334 dmae->comp_addr_hi = 0;
3337 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3338 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339 dmae->opcode = opcode;
3340 dmae->src_addr_lo = (mac_addr +
3341 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3342 dmae->src_addr_hi = 0;
3343 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3344 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3346 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3347 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3348 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3349 dmae->comp_addr_hi = 0;
3354 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355 dmae->opcode = opcode;
3356 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3357 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3358 dmae->src_addr_hi = 0;
3359 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3360 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3361 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3366 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367 dmae->opcode = opcode;
3368 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3369 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3370 dmae->src_addr_hi = 0;
3371 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3372 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3373 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3374 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3375 dmae->len = (2*sizeof(u32)) >> 2;
3376 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3377 dmae->comp_addr_hi = 0;
3380 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3381 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3382 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3383 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3385 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3387 DMAE_CMD_ENDIANITY_DW_SWAP |
3389 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3390 (vn << DMAE_CMD_E1HVN_SHIFT));
3391 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3392 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3393 dmae->src_addr_hi = 0;
3394 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3395 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3396 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3397 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3398 dmae->len = (2*sizeof(u32)) >> 2;
3399 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3400 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3401 dmae->comp_val = DMAE_COMP_VAL;
3406 static void bnx2x_func_stats_init(struct bnx2x *bp)
3408 struct dmae_command *dmae = &bp->stats_dmae;
3409 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3412 if (!bp->func_stx) {
3413 BNX2X_ERR("BUG!\n");
3417 bp->executer_idx = 0;
3418 memset(dmae, 0, sizeof(struct dmae_command));
3420 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3421 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3422 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3424 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3426 DMAE_CMD_ENDIANITY_DW_SWAP |
3428 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3429 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3430 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3431 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3432 dmae->dst_addr_lo = bp->func_stx >> 2;
3433 dmae->dst_addr_hi = 0;
3434 dmae->len = sizeof(struct host_func_stats) >> 2;
3435 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3436 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3437 dmae->comp_val = DMAE_COMP_VAL;
3442 static void bnx2x_stats_start(struct bnx2x *bp)
3445 bnx2x_port_stats_init(bp);
3447 else if (bp->func_stx)
3448 bnx2x_func_stats_init(bp);
3450 bnx2x_hw_stats_post(bp);
3451 bnx2x_storm_stats_post(bp);
3454 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3456 bnx2x_stats_comp(bp);
3457 bnx2x_stats_pmf_update(bp);
3458 bnx2x_stats_start(bp);
3461 static void bnx2x_stats_restart(struct bnx2x *bp)
3463 bnx2x_stats_comp(bp);
3464 bnx2x_stats_start(bp);
3467 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3469 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3470 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3471 struct regpair diff;
3473 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3474 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3475 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3476 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3477 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3478 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3479 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3480 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3481 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3482 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3483 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3484 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3485 UPDATE_STAT64(tx_stat_gt127,
3486 tx_stat_etherstatspkts65octetsto127octets);
3487 UPDATE_STAT64(tx_stat_gt255,
3488 tx_stat_etherstatspkts128octetsto255octets);
3489 UPDATE_STAT64(tx_stat_gt511,
3490 tx_stat_etherstatspkts256octetsto511octets);
3491 UPDATE_STAT64(tx_stat_gt1023,
3492 tx_stat_etherstatspkts512octetsto1023octets);
3493 UPDATE_STAT64(tx_stat_gt1518,
3494 tx_stat_etherstatspkts1024octetsto1522octets);
3495 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3496 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3497 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3498 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3499 UPDATE_STAT64(tx_stat_gterr,
3500 tx_stat_dot3statsinternalmactransmiterrors);
3501 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3504 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3506 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3507 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3509 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3510 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3511 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3512 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3513 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3514 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3515 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3516 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3517 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3518 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3519 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3520 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3521 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3522 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3523 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3524 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3525 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3526 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3527 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3528 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3529 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3530 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3531 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3532 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3533 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3534 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3535 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3536 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3537 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3538 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3539 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3542 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3544 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3545 struct nig_stats *old = &(bp->port.old_nig_stats);
3546 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3547 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3548 struct regpair diff;
3550 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3551 bnx2x_bmac_stats_update(bp);
3553 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3554 bnx2x_emac_stats_update(bp);
3556 else { /* unreached */
3557 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3561 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3562 new->brb_discard - old->brb_discard);
3563 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3564 new->brb_truncate - old->brb_truncate);
3566 UPDATE_STAT64_NIG(egress_mac_pkt0,
3567 etherstatspkts1024octetsto1522octets);
3568 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3570 memcpy(old, new, sizeof(struct nig_stats));
3572 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3573 sizeof(struct mac_stx));
3574 estats->brb_drop_hi = pstats->brb_drop_hi;
3575 estats->brb_drop_lo = pstats->brb_drop_lo;
3577 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3582 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3584 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3585 int cl_id = BP_CL_ID(bp);
3586 struct tstorm_per_port_stats *tport =
3587 &stats->tstorm_common.port_statistics;
3588 struct tstorm_per_client_stats *tclient =
3589 &stats->tstorm_common.client_statistics[cl_id];
3590 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3591 struct xstorm_per_client_stats *xclient =
3592 &stats->xstorm_common.client_statistics[cl_id];
3593 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3594 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3595 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3598 /* are storm stats valid? */
3599 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3600 bp->stats_counter) {
3601 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3602 " tstorm counter (%d) != stats_counter (%d)\n",
3603 tclient->stats_counter, bp->stats_counter);
3606 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3607 bp->stats_counter) {
3608 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3609 " xstorm counter (%d) != stats_counter (%d)\n",
3610 xclient->stats_counter, bp->stats_counter);
3614 fstats->total_bytes_received_hi =
3615 fstats->valid_bytes_received_hi =
3616 le32_to_cpu(tclient->total_rcv_bytes.hi);
3617 fstats->total_bytes_received_lo =
3618 fstats->valid_bytes_received_lo =
3619 le32_to_cpu(tclient->total_rcv_bytes.lo);
3621 estats->error_bytes_received_hi =
3622 le32_to_cpu(tclient->rcv_error_bytes.hi);
3623 estats->error_bytes_received_lo =
3624 le32_to_cpu(tclient->rcv_error_bytes.lo);
3625 ADD_64(estats->error_bytes_received_hi,
3626 estats->rx_stat_ifhcinbadoctets_hi,
3627 estats->error_bytes_received_lo,
3628 estats->rx_stat_ifhcinbadoctets_lo);
3630 ADD_64(fstats->total_bytes_received_hi,
3631 estats->error_bytes_received_hi,
3632 fstats->total_bytes_received_lo,
3633 estats->error_bytes_received_lo);
3635 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3636 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3637 total_multicast_packets_received);
3638 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3639 total_broadcast_packets_received);
3641 fstats->total_bytes_transmitted_hi =
3642 le32_to_cpu(xclient->total_sent_bytes.hi);
3643 fstats->total_bytes_transmitted_lo =
3644 le32_to_cpu(xclient->total_sent_bytes.lo);
3646 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3647 total_unicast_packets_transmitted);
3648 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3649 total_multicast_packets_transmitted);
3650 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3651 total_broadcast_packets_transmitted);
3653 memcpy(estats, &(fstats->total_bytes_received_hi),
3654 sizeof(struct host_func_stats) - 2*sizeof(u32));
3656 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3657 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3658 estats->brb_truncate_discard =
3659 le32_to_cpu(tport->brb_truncate_discard);
3660 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3662 old_tclient->rcv_unicast_bytes.hi =
3663 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3664 old_tclient->rcv_unicast_bytes.lo =
3665 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3666 old_tclient->rcv_broadcast_bytes.hi =
3667 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3668 old_tclient->rcv_broadcast_bytes.lo =
3669 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3670 old_tclient->rcv_multicast_bytes.hi =
3671 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3672 old_tclient->rcv_multicast_bytes.lo =
3673 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3674 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3676 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3677 old_tclient->packets_too_big_discard =
3678 le32_to_cpu(tclient->packets_too_big_discard);
3679 estats->no_buff_discard =
3680 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3681 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3683 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3684 old_xclient->unicast_bytes_sent.hi =
3685 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3686 old_xclient->unicast_bytes_sent.lo =
3687 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3688 old_xclient->multicast_bytes_sent.hi =
3689 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3690 old_xclient->multicast_bytes_sent.lo =
3691 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3692 old_xclient->broadcast_bytes_sent.hi =
3693 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3694 old_xclient->broadcast_bytes_sent.lo =
3695 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3697 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3702 static void bnx2x_net_stats_update(struct bnx2x *bp)
3704 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3705 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3706 struct net_device_stats *nstats = &bp->dev->stats;
3708 nstats->rx_packets =
3709 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3710 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3711 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3713 nstats->tx_packets =
3714 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3715 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3716 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3718 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3720 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3722 nstats->rx_dropped = old_tclient->checksum_discard +
3723 estats->mac_discard;
3724 nstats->tx_dropped = 0;
3727 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3729 nstats->collisions =
3730 estats->tx_stat_dot3statssinglecollisionframes_lo +
3731 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3732 estats->tx_stat_dot3statslatecollisions_lo +
3733 estats->tx_stat_dot3statsexcessivecollisions_lo;
3735 estats->jabber_packets_received =
3736 old_tclient->packets_too_big_discard +
3737 estats->rx_stat_dot3statsframestoolong_lo;
3739 nstats->rx_length_errors =
3740 estats->rx_stat_etherstatsundersizepkts_lo +
3741 estats->jabber_packets_received;
3742 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3743 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3744 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3745 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3746 nstats->rx_missed_errors = estats->xxoverflow_discard;
3748 nstats->rx_errors = nstats->rx_length_errors +
3749 nstats->rx_over_errors +
3750 nstats->rx_crc_errors +
3751 nstats->rx_frame_errors +
3752 nstats->rx_fifo_errors +
3753 nstats->rx_missed_errors;
3755 nstats->tx_aborted_errors =
3756 estats->tx_stat_dot3statslatecollisions_lo +
3757 estats->tx_stat_dot3statsexcessivecollisions_lo;
3758 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3759 nstats->tx_fifo_errors = 0;
3760 nstats->tx_heartbeat_errors = 0;
3761 nstats->tx_window_errors = 0;
3763 nstats->tx_errors = nstats->tx_aborted_errors +
3764 nstats->tx_carrier_errors;
3767 static void bnx2x_stats_update(struct bnx2x *bp)
3769 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3772 if (*stats_comp != DMAE_COMP_VAL)
3776 update = (bnx2x_hw_stats_update(bp) == 0);
3778 update |= (bnx2x_storm_stats_update(bp) == 0);
3781 bnx2x_net_stats_update(bp);
3784 if (bp->stats_pending) {
3785 bp->stats_pending++;
3786 if (bp->stats_pending == 3) {
3787 BNX2X_ERR("stats not updated for 3 times\n");
3794 if (bp->msglevel & NETIF_MSG_TIMER) {
3795 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3796 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3797 struct net_device_stats *nstats = &bp->dev->stats;
3800 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3801 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3803 bnx2x_tx_avail(bp->fp),
3804 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3805 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3807 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3808 bp->fp->rx_comp_cons),
3809 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3810 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3811 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3812 estats->driver_xoff, estats->brb_drop_lo);
3813 printk(KERN_DEBUG "tstats: checksum_discard %u "
3814 "packets_too_big_discard %u no_buff_discard %u "
3815 "mac_discard %u mac_filter_discard %u "
3816 "xxovrflow_discard %u brb_truncate_discard %u "
3817 "ttl0_discard %u\n",
3818 old_tclient->checksum_discard,
3819 old_tclient->packets_too_big_discard,
3820 old_tclient->no_buff_discard, estats->mac_discard,
3821 estats->mac_filter_discard, estats->xxoverflow_discard,
3822 estats->brb_truncate_discard,
3823 old_tclient->ttl0_discard);
3825 for_each_queue(bp, i) {
3826 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3827 bnx2x_fp(bp, i, tx_pkt),
3828 bnx2x_fp(bp, i, rx_pkt),
3829 bnx2x_fp(bp, i, rx_calls));
3833 bnx2x_hw_stats_post(bp);
3834 bnx2x_storm_stats_post(bp);
3837 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3839 struct dmae_command *dmae;
3841 int loader_idx = PMF_DMAE_C(bp);
3842 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3844 bp->executer_idx = 0;
3846 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3848 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3850 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3852 DMAE_CMD_ENDIANITY_DW_SWAP |
3854 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3855 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3857 if (bp->port.port_stx) {
3859 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3861 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3863 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3864 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3865 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3866 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3867 dmae->dst_addr_hi = 0;
3868 dmae->len = sizeof(struct host_port_stats) >> 2;
3870 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3871 dmae->comp_addr_hi = 0;
3874 dmae->comp_addr_lo =
3875 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3876 dmae->comp_addr_hi =
3877 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3878 dmae->comp_val = DMAE_COMP_VAL;
3886 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3887 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3888 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3889 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3890 dmae->dst_addr_lo = bp->func_stx >> 2;
3891 dmae->dst_addr_hi = 0;
3892 dmae->len = sizeof(struct host_func_stats) >> 2;
3893 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3894 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3895 dmae->comp_val = DMAE_COMP_VAL;
3901 static void bnx2x_stats_stop(struct bnx2x *bp)
3905 bnx2x_stats_comp(bp);
3908 update = (bnx2x_hw_stats_update(bp) == 0);
3910 update |= (bnx2x_storm_stats_update(bp) == 0);
3913 bnx2x_net_stats_update(bp);
3916 bnx2x_port_stats_stop(bp);
3918 bnx2x_hw_stats_post(bp);
3919 bnx2x_stats_comp(bp);
3923 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3927 static const struct {
3928 void (*action)(struct bnx2x *bp);
3929 enum bnx2x_stats_state next_state;
3930 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3933 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3934 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3935 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3936 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3939 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3940 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3941 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3942 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3946 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3948 enum bnx2x_stats_state state = bp->stats_state;
3950 bnx2x_stats_stm[state][event].action(bp);
3951 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3953 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3954 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3955 state, event, bp->stats_state);
3958 static void bnx2x_timer(unsigned long data)
3960 struct bnx2x *bp = (struct bnx2x *) data;
3962 if (!netif_running(bp->dev))
3965 if (atomic_read(&bp->intr_sem) != 0)
3969 struct bnx2x_fastpath *fp = &bp->fp[0];
3972 bnx2x_tx_int(fp, 1000);
3973 rc = bnx2x_rx_int(fp, 1000);
3976 if (!BP_NOMCP(bp)) {
3977 int func = BP_FUNC(bp);
3981 ++bp->fw_drv_pulse_wr_seq;
3982 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3983 /* TBD - add SYSTEM_TIME */
3984 drv_pulse = bp->fw_drv_pulse_wr_seq;
3985 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3987 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3988 MCP_PULSE_SEQ_MASK);
3989 /* The delta between driver pulse and mcp response
3990 * should be 1 (before mcp response) or 0 (after mcp response)
3992 if ((drv_pulse != mcp_pulse) &&
3993 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3994 /* someone lost a heartbeat... */
3995 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3996 drv_pulse, mcp_pulse);
4000 if ((bp->state == BNX2X_STATE_OPEN) ||
4001 (bp->state == BNX2X_STATE_DISABLED))
4002 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4005 mod_timer(&bp->timer, jiffies + bp->current_interval);
4008 /* end of Statistics */
4013 * nic init service functions
4016 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4018 int port = BP_PORT(bp);
4020 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4021 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4022 sizeof(struct ustorm_status_block)/4);
4023 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4024 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4025 sizeof(struct cstorm_status_block)/4);
4028 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4029 dma_addr_t mapping, int sb_id)
4031 int port = BP_PORT(bp);
4032 int func = BP_FUNC(bp);
4037 section = ((u64)mapping) + offsetof(struct host_status_block,
4039 sb->u_status_block.status_block_id = sb_id;
4041 REG_WR(bp, BAR_USTRORM_INTMEM +
4042 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4043 REG_WR(bp, BAR_USTRORM_INTMEM +
4044 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4046 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4047 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4049 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4050 REG_WR16(bp, BAR_USTRORM_INTMEM +
4051 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4054 section = ((u64)mapping) + offsetof(struct host_status_block,
4056 sb->c_status_block.status_block_id = sb_id;
4058 REG_WR(bp, BAR_CSTRORM_INTMEM +
4059 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4060 REG_WR(bp, BAR_CSTRORM_INTMEM +
4061 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4063 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4064 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4066 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4067 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4068 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4070 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4073 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4075 int func = BP_FUNC(bp);
4077 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4078 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4079 sizeof(struct ustorm_def_status_block)/4);
4080 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4081 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4082 sizeof(struct cstorm_def_status_block)/4);
4083 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4084 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4085 sizeof(struct xstorm_def_status_block)/4);
4086 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4087 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4088 sizeof(struct tstorm_def_status_block)/4);
4091 static void bnx2x_init_def_sb(struct bnx2x *bp,
4092 struct host_def_status_block *def_sb,
4093 dma_addr_t mapping, int sb_id)
4095 int port = BP_PORT(bp);
4096 int func = BP_FUNC(bp);
4097 int index, val, reg_offset;
4101 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4102 atten_status_block);
4103 def_sb->atten_status_block.status_block_id = sb_id;
4107 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4108 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4110 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4111 bp->attn_group[index].sig[0] = REG_RD(bp,
4112 reg_offset + 0x10*index);
4113 bp->attn_group[index].sig[1] = REG_RD(bp,
4114 reg_offset + 0x4 + 0x10*index);
4115 bp->attn_group[index].sig[2] = REG_RD(bp,
4116 reg_offset + 0x8 + 0x10*index);
4117 bp->attn_group[index].sig[3] = REG_RD(bp,
4118 reg_offset + 0xc + 0x10*index);
4121 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4122 HC_REG_ATTN_MSG0_ADDR_L);
4124 REG_WR(bp, reg_offset, U64_LO(section));
4125 REG_WR(bp, reg_offset + 4, U64_HI(section));
4127 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4129 val = REG_RD(bp, reg_offset);
4131 REG_WR(bp, reg_offset, val);
4134 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135 u_def_status_block);
4136 def_sb->u_def_status_block.status_block_id = sb_id;
4138 REG_WR(bp, BAR_USTRORM_INTMEM +
4139 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4140 REG_WR(bp, BAR_USTRORM_INTMEM +
4141 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4143 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4144 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4146 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4147 REG_WR16(bp, BAR_USTRORM_INTMEM +
4148 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4151 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152 c_def_status_block);
4153 def_sb->c_def_status_block.status_block_id = sb_id;
4155 REG_WR(bp, BAR_CSTRORM_INTMEM +
4156 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4157 REG_WR(bp, BAR_CSTRORM_INTMEM +
4158 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4160 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4161 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4163 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4164 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4165 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4168 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4169 t_def_status_block);
4170 def_sb->t_def_status_block.status_block_id = sb_id;
4172 REG_WR(bp, BAR_TSTRORM_INTMEM +
4173 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4174 REG_WR(bp, BAR_TSTRORM_INTMEM +
4175 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4177 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4178 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4180 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4181 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4182 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4185 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4186 x_def_status_block);
4187 def_sb->x_def_status_block.status_block_id = sb_id;
4189 REG_WR(bp, BAR_XSTRORM_INTMEM +
4190 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4191 REG_WR(bp, BAR_XSTRORM_INTMEM +
4192 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4194 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4195 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4197 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4198 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4199 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4201 bp->stats_pending = 0;
4202 bp->set_mac_pending = 0;
4204 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4207 static void bnx2x_update_coalesce(struct bnx2x *bp)
4209 int port = BP_PORT(bp);
4212 for_each_queue(bp, i) {
4213 int sb_id = bp->fp[i].sb_id;
4215 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4216 REG_WR8(bp, BAR_USTRORM_INTMEM +
4217 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4218 U_SB_ETH_RX_CQ_INDEX),
4220 REG_WR16(bp, BAR_USTRORM_INTMEM +
4221 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4222 U_SB_ETH_RX_CQ_INDEX),
4223 bp->rx_ticks ? 0 : 1);
4224 REG_WR16(bp, BAR_USTRORM_INTMEM +
4225 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226 U_SB_ETH_RX_BD_INDEX),
4227 bp->rx_ticks ? 0 : 1);
4229 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4230 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4231 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4232 C_SB_ETH_TX_CQ_INDEX),
4234 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4235 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4236 C_SB_ETH_TX_CQ_INDEX),
4237 bp->tx_ticks ? 0 : 1);
4241 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4242 struct bnx2x_fastpath *fp, int last)
4246 for (i = 0; i < last; i++) {
4247 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4248 struct sk_buff *skb = rx_buf->skb;
4251 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4255 if (fp->tpa_state[i] == BNX2X_TPA_START)
4256 pci_unmap_single(bp->pdev,
4257 pci_unmap_addr(rx_buf, mapping),
4259 PCI_DMA_FROMDEVICE);
4266 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4268 int func = BP_FUNC(bp);
4269 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4270 ETH_MAX_AGGREGATION_QUEUES_E1H;
4271 u16 ring_prod, cqe_ring_prod;
4274 bp->rx_buf_size = bp->dev->mtu;
4275 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4276 BCM_RX_ETH_PAYLOAD_ALIGN;
4278 if (bp->flags & TPA_ENABLE_FLAG) {
4280 "rx_buf_size %d effective_mtu %d\n",
4281 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4283 for_each_queue(bp, j) {
4284 struct bnx2x_fastpath *fp = &bp->fp[j];
4286 for (i = 0; i < max_agg_queues; i++) {
4287 fp->tpa_pool[i].skb =
4288 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4289 if (!fp->tpa_pool[i].skb) {
4290 BNX2X_ERR("Failed to allocate TPA "
4291 "skb pool for queue[%d] - "
4292 "disabling TPA on this "
4294 bnx2x_free_tpa_pool(bp, fp, i);
4295 fp->disable_tpa = 1;
4298 pci_unmap_addr_set((struct sw_rx_bd *)
4299 &bp->fp->tpa_pool[i],
4301 fp->tpa_state[i] = BNX2X_TPA_STOP;
4306 for_each_queue(bp, j) {
4307 struct bnx2x_fastpath *fp = &bp->fp[j];
4310 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4311 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4313 /* "next page" elements initialization */
4315 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4316 struct eth_rx_sge *sge;
4318 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4320 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4321 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4323 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4324 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4327 bnx2x_init_sge_ring_bit_mask(fp);
4330 for (i = 1; i <= NUM_RX_RINGS; i++) {
4331 struct eth_rx_bd *rx_bd;
4333 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4335 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4336 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4338 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4339 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4343 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4344 struct eth_rx_cqe_next_page *nextpg;
4346 nextpg = (struct eth_rx_cqe_next_page *)
4347 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4349 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4350 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4352 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4353 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4356 /* Allocate SGEs and initialize the ring elements */
4357 for (i = 0, ring_prod = 0;
4358 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4360 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4361 BNX2X_ERR("was only able to allocate "
4363 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4364 /* Cleanup already allocated elements */
4365 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4366 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4367 fp->disable_tpa = 1;
4371 ring_prod = NEXT_SGE_IDX(ring_prod);
4373 fp->rx_sge_prod = ring_prod;
4375 /* Allocate BDs and initialize BD ring */
4376 fp->rx_comp_cons = 0;
4377 cqe_ring_prod = ring_prod = 0;
4378 for (i = 0; i < bp->rx_ring_size; i++) {
4379 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4380 BNX2X_ERR("was only able to allocate "
4382 bp->eth_stats.rx_skb_alloc_failed++;
4385 ring_prod = NEXT_RX_IDX(ring_prod);
4386 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4387 WARN_ON(ring_prod <= i);
4390 fp->rx_bd_prod = ring_prod;
4391 /* must not have more available CQEs than BDs */
4392 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4394 fp->rx_pkt = fp->rx_calls = 0;
4397 * this will generate an interrupt (to the TSTORM)
4398 * must only be done after chip is initialized
4400 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4405 REG_WR(bp, BAR_USTRORM_INTMEM +
4406 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4407 U64_LO(fp->rx_comp_mapping));
4408 REG_WR(bp, BAR_USTRORM_INTMEM +
4409 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4410 U64_HI(fp->rx_comp_mapping));
4414 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4418 for_each_queue(bp, j) {
4419 struct bnx2x_fastpath *fp = &bp->fp[j];
4421 for (i = 1; i <= NUM_TX_RINGS; i++) {
4422 struct eth_tx_bd *tx_bd =
4423 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4426 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4427 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4429 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4430 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4433 fp->tx_pkt_prod = 0;
4434 fp->tx_pkt_cons = 0;
4437 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4442 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4444 int func = BP_FUNC(bp);
4446 spin_lock_init(&bp->spq_lock);
4448 bp->spq_left = MAX_SPQ_PENDING;
4449 bp->spq_prod_idx = 0;
4450 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4451 bp->spq_prod_bd = bp->spq;
4452 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4454 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4455 U64_LO(bp->spq_mapping));
4457 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4458 U64_HI(bp->spq_mapping));
4460 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4464 static void bnx2x_init_context(struct bnx2x *bp)
4468 for_each_queue(bp, i) {
4469 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4470 struct bnx2x_fastpath *fp = &bp->fp[i];
4471 u8 sb_id = FP_SB_ID(fp);
4473 context->xstorm_st_context.tx_bd_page_base_hi =
4474 U64_HI(fp->tx_desc_mapping);
4475 context->xstorm_st_context.tx_bd_page_base_lo =
4476 U64_LO(fp->tx_desc_mapping);
4477 context->xstorm_st_context.db_data_addr_hi =
4478 U64_HI(fp->tx_prods_mapping);
4479 context->xstorm_st_context.db_data_addr_lo =
4480 U64_LO(fp->tx_prods_mapping);
4481 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4482 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4484 context->ustorm_st_context.common.sb_index_numbers =
4485 BNX2X_RX_SB_INDEX_NUM;
4486 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4487 context->ustorm_st_context.common.status_block_id = sb_id;
4488 context->ustorm_st_context.common.flags =
4489 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4490 context->ustorm_st_context.common.mc_alignment_size =
4491 BCM_RX_ETH_PAYLOAD_ALIGN;
4492 context->ustorm_st_context.common.bd_buff_size =
4494 context->ustorm_st_context.common.bd_page_base_hi =
4495 U64_HI(fp->rx_desc_mapping);
4496 context->ustorm_st_context.common.bd_page_base_lo =
4497 U64_LO(fp->rx_desc_mapping);
4498 if (!fp->disable_tpa) {
4499 context->ustorm_st_context.common.flags |=
4500 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4501 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4502 context->ustorm_st_context.common.sge_buff_size =
4503 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4504 context->ustorm_st_context.common.sge_page_base_hi =
4505 U64_HI(fp->rx_sge_mapping);
4506 context->ustorm_st_context.common.sge_page_base_lo =
4507 U64_LO(fp->rx_sge_mapping);
4510 context->cstorm_st_context.sb_index_number =
4511 C_SB_ETH_TX_CQ_INDEX;
4512 context->cstorm_st_context.status_block_id = sb_id;
4514 context->xstorm_ag_context.cdu_reserved =
4515 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4516 CDU_REGION_NUMBER_XCM_AG,
4517 ETH_CONNECTION_TYPE);
4518 context->ustorm_ag_context.cdu_usage =
4519 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4520 CDU_REGION_NUMBER_UCM_AG,
4521 ETH_CONNECTION_TYPE);
4525 static void bnx2x_init_ind_table(struct bnx2x *bp)
4527 int port = BP_PORT(bp);
4533 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4534 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4535 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4536 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4537 i % bp->num_queues);
4539 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4542 static void bnx2x_set_client_config(struct bnx2x *bp)
4544 struct tstorm_eth_client_config tstorm_client = {0};
4545 int port = BP_PORT(bp);
4548 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4549 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4550 tstorm_client.config_flags =
4551 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4553 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4554 tstorm_client.config_flags |=
4555 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4556 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4560 if (bp->flags & TPA_ENABLE_FLAG) {
4561 tstorm_client.max_sges_for_packet =
4562 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4563 tstorm_client.max_sges_for_packet =
4564 ((tstorm_client.max_sges_for_packet +
4565 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4566 PAGES_PER_SGE_SHIFT;
4568 tstorm_client.config_flags |=
4569 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4572 for_each_queue(bp, i) {
4573 REG_WR(bp, BAR_TSTRORM_INTMEM +
4574 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4575 ((u32 *)&tstorm_client)[0]);
4576 REG_WR(bp, BAR_TSTRORM_INTMEM +
4577 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4578 ((u32 *)&tstorm_client)[1]);
4581 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4582 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4585 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4587 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4588 int mode = bp->rx_mode;
4589 int mask = (1 << BP_L_ID(bp));
4590 int func = BP_FUNC(bp);
4593 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4596 case BNX2X_RX_MODE_NONE: /* no Rx */
4597 tstorm_mac_filter.ucast_drop_all = mask;
4598 tstorm_mac_filter.mcast_drop_all = mask;
4599 tstorm_mac_filter.bcast_drop_all = mask;
4601 case BNX2X_RX_MODE_NORMAL:
4602 tstorm_mac_filter.bcast_accept_all = mask;
4604 case BNX2X_RX_MODE_ALLMULTI:
4605 tstorm_mac_filter.mcast_accept_all = mask;
4606 tstorm_mac_filter.bcast_accept_all = mask;
4608 case BNX2X_RX_MODE_PROMISC:
4609 tstorm_mac_filter.ucast_accept_all = mask;
4610 tstorm_mac_filter.mcast_accept_all = mask;
4611 tstorm_mac_filter.bcast_accept_all = mask;
4614 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4618 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4619 REG_WR(bp, BAR_TSTRORM_INTMEM +
4620 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4621 ((u32 *)&tstorm_mac_filter)[i]);
4623 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4624 ((u32 *)&tstorm_mac_filter)[i]); */
4627 if (mode != BNX2X_RX_MODE_NONE)
4628 bnx2x_set_client_config(bp);
4631 static void bnx2x_init_internal_common(struct bnx2x *bp)
4635 if (bp->flags & TPA_ENABLE_FLAG) {
4636 struct tstorm_eth_tpa_exist tpa = {0};
4640 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4642 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4646 /* Zero this manually as its initialization is
4647 currently missing in the initTool */
4648 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4649 REG_WR(bp, BAR_USTRORM_INTMEM +
4650 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4653 static void bnx2x_init_internal_port(struct bnx2x *bp)
4655 int port = BP_PORT(bp);
4657 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4658 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4659 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4660 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4663 static void bnx2x_init_internal_func(struct bnx2x *bp)
4665 struct tstorm_eth_function_common_config tstorm_config = {0};
4666 struct stats_indication_flags stats_flags = {0};
4667 int port = BP_PORT(bp);
4668 int func = BP_FUNC(bp);
4673 tstorm_config.config_flags = MULTI_FLAGS;
4674 tstorm_config.rss_result_mask = MULTI_MASK;
4677 tstorm_config.leading_client_id = BP_L_ID(bp);
4679 REG_WR(bp, BAR_TSTRORM_INTMEM +
4680 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4681 (*(u32 *)&tstorm_config));
4683 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4684 bnx2x_set_storm_rx_mode(bp);
4686 /* reset xstorm per client statistics */
4687 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4688 REG_WR(bp, BAR_XSTRORM_INTMEM +
4689 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4692 /* reset tstorm per client statistics */
4693 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4694 REG_WR(bp, BAR_TSTRORM_INTMEM +
4695 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4699 /* Init statistics related context */
4700 stats_flags.collect_eth = 1;
4702 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4703 ((u32 *)&stats_flags)[0]);
4704 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4705 ((u32 *)&stats_flags)[1]);
4707 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4708 ((u32 *)&stats_flags)[0]);
4709 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4710 ((u32 *)&stats_flags)[1]);
4712 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4713 ((u32 *)&stats_flags)[0]);
4714 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4715 ((u32 *)&stats_flags)[1]);
4717 REG_WR(bp, BAR_XSTRORM_INTMEM +
4718 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4719 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4720 REG_WR(bp, BAR_XSTRORM_INTMEM +
4721 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4722 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4724 REG_WR(bp, BAR_TSTRORM_INTMEM +
4725 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4726 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4727 REG_WR(bp, BAR_TSTRORM_INTMEM +
4728 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4729 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4731 if (CHIP_IS_E1H(bp)) {
4732 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4734 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4736 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4738 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4741 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4745 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4747 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4748 SGE_PAGE_SIZE * PAGES_PER_SGE),
4750 for_each_queue(bp, i) {
4751 struct bnx2x_fastpath *fp = &bp->fp[i];
4753 REG_WR(bp, BAR_USTRORM_INTMEM +
4754 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4755 U64_LO(fp->rx_comp_mapping));
4756 REG_WR(bp, BAR_USTRORM_INTMEM +
4757 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4758 U64_HI(fp->rx_comp_mapping));
4760 REG_WR16(bp, BAR_USTRORM_INTMEM +
4761 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4766 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4768 switch (load_code) {
4769 case FW_MSG_CODE_DRV_LOAD_COMMON:
4770 bnx2x_init_internal_common(bp);
4773 case FW_MSG_CODE_DRV_LOAD_PORT:
4774 bnx2x_init_internal_port(bp);
4777 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4778 bnx2x_init_internal_func(bp);
4782 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4787 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4791 for_each_queue(bp, i) {
4792 struct bnx2x_fastpath *fp = &bp->fp[i];
4795 fp->state = BNX2X_FP_STATE_CLOSED;
4797 fp->cl_id = BP_L_ID(bp) + i;
4798 fp->sb_id = fp->cl_id;
4800 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4801 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4802 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4804 bnx2x_update_fpsb_idx(fp);
4807 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4809 bnx2x_update_dsb_idx(bp);
4810 bnx2x_update_coalesce(bp);
4811 bnx2x_init_rx_rings(bp);
4812 bnx2x_init_tx_ring(bp);
4813 bnx2x_init_sp_ring(bp);
4814 bnx2x_init_context(bp);
4815 bnx2x_init_internal(bp, load_code);
4816 bnx2x_init_ind_table(bp);
4817 bnx2x_int_enable(bp);
4820 /* end of nic init */
4823 * gzip service functions
4826 static int bnx2x_gunzip_init(struct bnx2x *bp)
4828 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4829 &bp->gunzip_mapping);
4830 if (bp->gunzip_buf == NULL)
4833 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4834 if (bp->strm == NULL)
4837 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4839 if (bp->strm->workspace == NULL)
4849 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4850 bp->gunzip_mapping);
4851 bp->gunzip_buf = NULL;
4854 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4855 " un-compression\n", bp->dev->name);
4859 static void bnx2x_gunzip_end(struct bnx2x *bp)
4861 kfree(bp->strm->workspace);
4866 if (bp->gunzip_buf) {
4867 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4868 bp->gunzip_mapping);
4869 bp->gunzip_buf = NULL;
4873 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4877 /* check gzip header */
4878 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4885 if (zbuf[3] & FNAME)
4886 while ((zbuf[n++] != 0) && (n < len));
4888 bp->strm->next_in = zbuf + n;
4889 bp->strm->avail_in = len - n;
4890 bp->strm->next_out = bp->gunzip_buf;
4891 bp->strm->avail_out = FW_BUF_SIZE;
4893 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4897 rc = zlib_inflate(bp->strm, Z_FINISH);
4898 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4899 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4900 bp->dev->name, bp->strm->msg);
4902 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4903 if (bp->gunzip_outlen & 0x3)
4904 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4905 " gunzip_outlen (%d) not aligned\n",
4906 bp->dev->name, bp->gunzip_outlen);
4907 bp->gunzip_outlen >>= 2;
4909 zlib_inflateEnd(bp->strm);
4911 if (rc == Z_STREAM_END)
4917 /* nic load/unload */
4920 * General service functions
4923 /* send a NIG loopback debug packet */
4924 static void bnx2x_lb_pckt(struct bnx2x *bp)
4928 /* Ethernet source and destination addresses */
4929 wb_write[0] = 0x55555555;
4930 wb_write[1] = 0x55555555;
4931 wb_write[2] = 0x20; /* SOP */
4932 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4934 /* NON-IP protocol */
4935 wb_write[0] = 0x09000000;
4936 wb_write[1] = 0x55555555;
4937 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4938 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4941 /* some of the internal memories
4942 * are not directly readable from the driver
4943 * to test them we send debug packets
4945 static int bnx2x_int_mem_test(struct bnx2x *bp)
4951 if (CHIP_REV_IS_FPGA(bp))
4953 else if (CHIP_REV_IS_EMUL(bp))
4958 DP(NETIF_MSG_HW, "start part1\n");
4960 /* Disable inputs of parser neighbor blocks */
4961 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4962 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4963 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4964 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4966 /* Write 0 to parser credits for CFC search request */
4967 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4969 /* send Ethernet packet */
4972 /* TODO do i reset NIG statistic? */
4973 /* Wait until NIG register shows 1 packet of size 0x10 */
4974 count = 1000 * factor;
4977 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4978 val = *bnx2x_sp(bp, wb_data[0]);
4986 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4990 /* Wait until PRS register shows 1 packet */
4991 count = 1000 * factor;
4993 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5001 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5005 /* Reset and init BRB, PRS */
5006 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5008 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5010 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5011 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5013 DP(NETIF_MSG_HW, "part2\n");
5015 /* Disable inputs of parser neighbor blocks */
5016 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5017 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5018 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5019 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5021 /* Write 0 to parser credits for CFC search request */
5022 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5024 /* send 10 Ethernet packets */
5025 for (i = 0; i < 10; i++)
5028 /* Wait until NIG register shows 10 + 1
5029 packets of size 11*0x10 = 0xb0 */
5030 count = 1000 * factor;
5033 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5034 val = *bnx2x_sp(bp, wb_data[0]);
5042 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5046 /* Wait until PRS register shows 2 packets */
5047 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5049 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5051 /* Write 1 to parser credits for CFC search request */
5052 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5054 /* Wait until PRS register shows 3 packets */
5055 msleep(10 * factor);
5056 /* Wait until NIG register shows 1 packet of size 0x10 */
5057 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5059 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5061 /* clear NIG EOP FIFO */
5062 for (i = 0; i < 11; i++)
5063 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5064 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5066 BNX2X_ERR("clear of NIG failed\n");
5070 /* Reset and init BRB, PRS, NIG */
5071 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5073 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5075 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5076 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5079 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5082 /* Enable inputs of parser neighbor blocks */
5083 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5084 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5085 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5086 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5088 DP(NETIF_MSG_HW, "done\n");
5093 static void enable_blocks_attention(struct bnx2x *bp)
5095 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5096 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5097 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5098 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5099 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5100 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5101 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5102 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5103 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5104 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5105 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5106 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5107 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5108 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5109 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5110 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5111 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5112 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5113 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5114 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5115 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5116 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5117 if (CHIP_REV_IS_FPGA(bp))
5118 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5120 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5121 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5122 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5123 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5124 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5125 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5126 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5127 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5128 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5129 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5133 static int bnx2x_init_common(struct bnx2x *bp)
5137 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5139 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5140 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5142 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5143 if (CHIP_IS_E1H(bp))
5144 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5146 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5148 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5150 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5151 if (CHIP_IS_E1(bp)) {
5152 /* enable HW interrupt from PXP on USDM overflow
5153 bit 16 on INT_MASK_0 */
5154 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5157 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5161 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5162 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5163 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5164 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5165 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5167 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5168 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5169 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5170 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5171 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5174 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5176 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5177 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5178 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5181 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5182 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5184 /* let the HW do it's magic ... */
5186 /* finish PXP init */
5187 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5189 BNX2X_ERR("PXP2 CFG failed\n");
5192 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5194 BNX2X_ERR("PXP2 RD_INIT failed\n");
5198 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5199 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5201 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5203 /* clean the DMAE memory */
5205 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5207 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5208 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5209 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5210 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5212 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5213 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5214 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5215 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5217 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5218 /* soft reset pulse */
5219 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5220 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5223 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5226 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5227 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5228 if (!CHIP_REV_IS_SLOW(bp)) {
5229 /* enable hw interrupt from doorbell Q */
5230 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5233 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5234 if (CHIP_REV_IS_SLOW(bp)) {
5235 /* fix for emulation and FPGA for no pause */
5236 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5237 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5238 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5239 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5242 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5244 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5245 if (CHIP_IS_E1H(bp))
5246 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5248 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5249 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5250 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5251 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5253 if (CHIP_IS_E1H(bp)) {
5254 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5255 STORM_INTMEM_SIZE_E1H/2);
5257 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5258 0, STORM_INTMEM_SIZE_E1H/2);
5259 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5260 STORM_INTMEM_SIZE_E1H/2);
5262 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5263 0, STORM_INTMEM_SIZE_E1H/2);
5264 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5265 STORM_INTMEM_SIZE_E1H/2);
5267 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5268 0, STORM_INTMEM_SIZE_E1H/2);
5269 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5270 STORM_INTMEM_SIZE_E1H/2);
5272 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5273 0, STORM_INTMEM_SIZE_E1H/2);
5275 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5276 STORM_INTMEM_SIZE_E1);
5277 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5278 STORM_INTMEM_SIZE_E1);
5279 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5280 STORM_INTMEM_SIZE_E1);
5281 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5282 STORM_INTMEM_SIZE_E1);
5285 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5286 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5287 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5288 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5291 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5293 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5296 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5297 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5298 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5300 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5301 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5302 REG_WR(bp, i, 0xc0cac01a);
5303 /* TODO: replace with something meaningful */
5305 if (CHIP_IS_E1H(bp))
5306 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5307 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5309 if (sizeof(union cdu_context) != 1024)
5310 /* we currently assume that a context is 1024 bytes */
5311 printk(KERN_ALERT PFX "please adjust the size of"
5312 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5314 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5315 val = (4 << 24) + (0 << 12) + 1024;
5316 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5317 if (CHIP_IS_E1(bp)) {
5318 /* !!! fix pxp client crdit until excel update */
5319 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5320 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5323 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5324 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5326 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5327 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5329 /* PXPCS COMMON comes here */
5330 /* Reset PCIE errors for debug */
5331 REG_WR(bp, 0x2814, 0xffffffff);
5332 REG_WR(bp, 0x3820, 0xffffffff);
5334 /* EMAC0 COMMON comes here */
5335 /* EMAC1 COMMON comes here */
5336 /* DBU COMMON comes here */
5337 /* DBG COMMON comes here */
5339 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5340 if (CHIP_IS_E1H(bp)) {
5341 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5342 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5345 if (CHIP_REV_IS_SLOW(bp))
5348 /* finish CFC init */
5349 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5351 BNX2X_ERR("CFC LL_INIT failed\n");
5354 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5356 BNX2X_ERR("CFC AC_INIT failed\n");
5359 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5361 BNX2X_ERR("CFC CAM_INIT failed\n");
5364 REG_WR(bp, CFC_REG_DEBUG0, 0);
5366 /* read NIG statistic
5367 to see if this is our first up since powerup */
5368 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5369 val = *bnx2x_sp(bp, wb_data[0]);
5371 /* do internal memory self test */
5372 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5373 BNX2X_ERR("internal mem self test failed\n");
5377 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5378 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5379 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5380 /* Fan failure is indicated by SPIO 5 */
5381 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5382 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5384 /* set to active low mode */
5385 val = REG_RD(bp, MISC_REG_SPIO_INT);
5386 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5387 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5388 REG_WR(bp, MISC_REG_SPIO_INT, val);
5390 /* enable interrupt to signal the IGU */
5391 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5392 val |= (1 << MISC_REGISTERS_SPIO_5);
5393 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5400 /* clear PXP2 attentions */
5401 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5403 enable_blocks_attention(bp);
5405 if (!BP_NOMCP(bp)) {
5406 bnx2x_acquire_phy_lock(bp);
5407 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5408 bnx2x_release_phy_lock(bp);
5410 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5415 static int bnx2x_init_port(struct bnx2x *bp)
5417 int port = BP_PORT(bp);
5420 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5422 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5424 /* Port PXP comes here */
5425 /* Port PXP2 comes here */
5430 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5431 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5432 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5433 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5438 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5439 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5440 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5441 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5446 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5447 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5448 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5449 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5451 /* Port CMs come here */
5453 /* Port QM comes here */
5455 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5456 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5458 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5459 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5461 /* Port DQ comes here */
5462 /* Port BRB1 comes here */
5463 /* Port PRS comes here */
5464 /* Port TSDM comes here */
5465 /* Port CSDM comes here */
5466 /* Port USDM comes here */
5467 /* Port XSDM comes here */
5468 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5469 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5470 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5471 port ? USEM_PORT1_END : USEM_PORT0_END);
5472 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5473 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5474 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5475 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5476 /* Port UPB comes here */
5477 /* Port XPB comes here */
5479 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5480 port ? PBF_PORT1_END : PBF_PORT0_END);
5482 /* configure PBF to work without PAUSE mtu 9000 */
5483 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5485 /* update threshold */
5486 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5487 /* update init credit */
5488 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5491 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5493 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5496 /* tell the searcher where the T2 table is */
5497 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5499 wb_write[0] = U64_LO(bp->t2_mapping);
5500 wb_write[1] = U64_HI(bp->t2_mapping);
5501 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5502 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5503 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5504 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5506 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5507 /* Port SRCH comes here */
5509 /* Port CDU comes here */
5510 /* Port CFC comes here */
5512 if (CHIP_IS_E1(bp)) {
5513 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5514 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5516 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5517 port ? HC_PORT1_END : HC_PORT0_END);
5519 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5520 MISC_AEU_PORT0_START,
5521 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5522 /* init aeu_mask_attn_func_0/1:
5523 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5524 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5525 * bits 4-7 are used for "per vn group attention" */
5526 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5527 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5529 /* Port PXPCS comes here */
5530 /* Port EMAC0 comes here */
5531 /* Port EMAC1 comes here */
5532 /* Port DBU comes here */
5533 /* Port DBG comes here */
5534 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5535 port ? NIG_PORT1_END : NIG_PORT0_END);
5537 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5539 if (CHIP_IS_E1H(bp)) {
5541 struct cmng_struct_per_port m_cmng_port;
5544 /* 0x2 disable e1hov, 0x1 enable */
5545 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5546 (IS_E1HMF(bp) ? 0x1 : 0x2));
5548 /* Init RATE SHAPING and FAIRNESS contexts.
5549 Initialize as if there is 10G link. */
5550 wsum = bnx2x_calc_vn_wsum(bp);
5551 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5553 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5554 bnx2x_init_vn_minmax(bp, 2*vn + port,
5555 wsum, 10000, &m_cmng_port);
5558 /* Port MCP comes here */
5559 /* Port DMAE comes here */
5561 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5562 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5563 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5564 /* add SPIO 5 to group 0 */
5565 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5566 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5567 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5574 bnx2x__link_reset(bp);
5579 #define ILT_PER_FUNC (768/2)
5580 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5581 /* the phys address is shifted right 12 bits and has an added
5582 1=valid bit added to the 53rd bit
5583 then since this is a wide register(TM)
5584 we split it into two 32 bit writes
5586 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5587 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5588 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5589 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5591 #define CNIC_ILT_LINES 0
5593 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5597 if (CHIP_IS_E1H(bp))
5598 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5600 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5602 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5605 static int bnx2x_init_func(struct bnx2x *bp)
5607 int port = BP_PORT(bp);
5608 int func = BP_FUNC(bp);
5611 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5613 i = FUNC_ILT_BASE(func);
5615 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5616 if (CHIP_IS_E1H(bp)) {
5617 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5618 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5620 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5621 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5624 if (CHIP_IS_E1H(bp)) {
5625 for (i = 0; i < 9; i++)
5626 bnx2x_init_block(bp,
5627 cm_start[func][i], cm_end[func][i]);
5629 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5630 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5633 /* HC init per function */
5634 if (CHIP_IS_E1H(bp)) {
5635 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5637 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5638 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5640 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5642 if (CHIP_IS_E1H(bp))
5643 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5645 /* Reset PCIE errors for debug */
5646 REG_WR(bp, 0x2114, 0xffffffff);
5647 REG_WR(bp, 0x2120, 0xffffffff);
5652 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5656 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5657 BP_FUNC(bp), load_code);
5660 mutex_init(&bp->dmae_mutex);
5661 bnx2x_gunzip_init(bp);
5663 switch (load_code) {
5664 case FW_MSG_CODE_DRV_LOAD_COMMON:
5665 rc = bnx2x_init_common(bp);
5670 case FW_MSG_CODE_DRV_LOAD_PORT:
5672 rc = bnx2x_init_port(bp);
5677 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5679 rc = bnx2x_init_func(bp);
5685 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5689 if (!BP_NOMCP(bp)) {
5690 int func = BP_FUNC(bp);
5692 bp->fw_drv_pulse_wr_seq =
5693 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5694 DRV_PULSE_SEQ_MASK);
5695 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5696 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5697 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5701 /* this needs to be done before gunzip end */
5702 bnx2x_zero_def_sb(bp);
5703 for_each_queue(bp, i)
5704 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5707 bnx2x_gunzip_end(bp);
5712 /* send the MCP a request, block until there is a reply */
5713 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5715 int func = BP_FUNC(bp);
5716 u32 seq = ++bp->fw_seq;
5719 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5721 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5722 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5725 /* let the FW do it's magic ... */
5728 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5730 /* Give the FW up to 2 second (200*10ms) */
5731 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5733 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5734 cnt*delay, rc, seq);
5736 /* is this a reply to our command? */
5737 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5738 rc &= FW_MSG_CODE_MASK;
5742 BNX2X_ERR("FW failed to respond!\n");
5750 static void bnx2x_free_mem(struct bnx2x *bp)
5753 #define BNX2X_PCI_FREE(x, y, size) \
5756 pci_free_consistent(bp->pdev, size, x, y); \
5762 #define BNX2X_FREE(x) \
5773 for_each_queue(bp, i) {
5776 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5777 bnx2x_fp(bp, i, status_blk_mapping),
5778 sizeof(struct host_status_block) +
5779 sizeof(struct eth_tx_db_data));
5781 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5782 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5783 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5784 bnx2x_fp(bp, i, tx_desc_mapping),
5785 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5787 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5788 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5789 bnx2x_fp(bp, i, rx_desc_mapping),
5790 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5792 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5793 bnx2x_fp(bp, i, rx_comp_mapping),
5794 sizeof(struct eth_fast_path_rx_cqe) *
5798 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5799 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5800 bnx2x_fp(bp, i, rx_sge_mapping),
5801 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5803 /* end of fastpath */
5805 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5806 sizeof(struct host_def_status_block));
5808 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5809 sizeof(struct bnx2x_slowpath));
5812 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5813 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5814 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5815 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5817 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5819 #undef BNX2X_PCI_FREE
5823 static int bnx2x_alloc_mem(struct bnx2x *bp)
5826 #define BNX2X_PCI_ALLOC(x, y, size) \
5828 x = pci_alloc_consistent(bp->pdev, size, y); \
5830 goto alloc_mem_err; \
5831 memset(x, 0, size); \
5834 #define BNX2X_ALLOC(x, size) \
5836 x = vmalloc(size); \
5838 goto alloc_mem_err; \
5839 memset(x, 0, size); \
5845 for_each_queue(bp, i) {
5846 bnx2x_fp(bp, i, bp) = bp;
5849 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5850 &bnx2x_fp(bp, i, status_blk_mapping),
5851 sizeof(struct host_status_block) +
5852 sizeof(struct eth_tx_db_data));
5854 bnx2x_fp(bp, i, hw_tx_prods) =
5855 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5857 bnx2x_fp(bp, i, tx_prods_mapping) =
5858 bnx2x_fp(bp, i, status_blk_mapping) +
5859 sizeof(struct host_status_block);
5861 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5862 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5863 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5864 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5865 &bnx2x_fp(bp, i, tx_desc_mapping),
5866 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5868 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5869 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5870 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5871 &bnx2x_fp(bp, i, rx_desc_mapping),
5872 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5874 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5875 &bnx2x_fp(bp, i, rx_comp_mapping),
5876 sizeof(struct eth_fast_path_rx_cqe) *
5880 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5881 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5882 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5883 &bnx2x_fp(bp, i, rx_sge_mapping),
5884 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5886 /* end of fastpath */
5888 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5889 sizeof(struct host_def_status_block));
5891 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5892 sizeof(struct bnx2x_slowpath));
5895 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5898 for (i = 0; i < 64*1024; i += 64) {
5899 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5900 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5903 /* allocate searcher T2 table
5904 we allocate 1/4 of alloc num for T2
5905 (which is not entered into the ILT) */
5906 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5909 for (i = 0; i < 16*1024; i += 64)
5910 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5912 /* now fixup the last line in the block to point to the next block */
5913 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5915 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5916 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5918 /* QM queues (128*MAX_CONN) */
5919 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5922 /* Slow path ring */
5923 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5931 #undef BNX2X_PCI_ALLOC
5935 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5939 for_each_queue(bp, i) {
5940 struct bnx2x_fastpath *fp = &bp->fp[i];
5942 u16 bd_cons = fp->tx_bd_cons;
5943 u16 sw_prod = fp->tx_pkt_prod;
5944 u16 sw_cons = fp->tx_pkt_cons;
5946 while (sw_cons != sw_prod) {
5947 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5953 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5957 for_each_queue(bp, j) {
5958 struct bnx2x_fastpath *fp = &bp->fp[j];
5960 for (i = 0; i < NUM_RX_BD; i++) {
5961 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5962 struct sk_buff *skb = rx_buf->skb;
5967 pci_unmap_single(bp->pdev,
5968 pci_unmap_addr(rx_buf, mapping),
5970 PCI_DMA_FROMDEVICE);
5975 if (!fp->disable_tpa)
5976 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5977 ETH_MAX_AGGREGATION_QUEUES_E1 :
5978 ETH_MAX_AGGREGATION_QUEUES_E1H);
5982 static void bnx2x_free_skbs(struct bnx2x *bp)
5984 bnx2x_free_tx_skbs(bp);
5985 bnx2x_free_rx_skbs(bp);
5988 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5992 free_irq(bp->msix_table[0].vector, bp->dev);
5993 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5994 bp->msix_table[0].vector);
5996 for_each_queue(bp, i) {
5997 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5998 "state %x\n", i, bp->msix_table[i + offset].vector,
5999 bnx2x_fp(bp, i, state));
6001 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6002 BNX2X_ERR("IRQ of fp #%d being freed while "
6003 "state != closed\n", i);
6005 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6009 static void bnx2x_free_irq(struct bnx2x *bp)
6011 if (bp->flags & USING_MSIX_FLAG) {
6012 bnx2x_free_msix_irqs(bp);
6013 pci_disable_msix(bp->pdev);
6014 bp->flags &= ~USING_MSIX_FLAG;
6017 free_irq(bp->pdev->irq, bp->dev);
6020 static int bnx2x_enable_msix(struct bnx2x *bp)
6024 bp->msix_table[0].entry = 0;
6026 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6028 for_each_queue(bp, i) {
6029 int igu_vec = offset + i + BP_L_ID(bp);
6031 bp->msix_table[i + offset].entry = igu_vec;
6032 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6033 "(fastpath #%u)\n", i + offset, igu_vec, i);
6036 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6037 bp->num_queues + offset);
6039 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6042 bp->flags |= USING_MSIX_FLAG;
6047 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6049 int i, rc, offset = 1;
6051 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6052 bp->dev->name, bp->dev);
6054 BNX2X_ERR("request sp irq failed\n");
6058 for_each_queue(bp, i) {
6059 rc = request_irq(bp->msix_table[i + offset].vector,
6060 bnx2x_msix_fp_int, 0,
6061 bp->dev->name, &bp->fp[i]);
6063 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6065 bnx2x_free_msix_irqs(bp);
6069 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6075 static int bnx2x_req_irq(struct bnx2x *bp)
6079 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6080 bp->dev->name, bp->dev);
6082 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6087 static void bnx2x_napi_enable(struct bnx2x *bp)
6091 for_each_queue(bp, i)
6092 napi_enable(&bnx2x_fp(bp, i, napi));
6095 static void bnx2x_napi_disable(struct bnx2x *bp)
6099 for_each_queue(bp, i)
6100 napi_disable(&bnx2x_fp(bp, i, napi));
6103 static void bnx2x_netif_start(struct bnx2x *bp)
6105 if (atomic_dec_and_test(&bp->intr_sem)) {
6106 if (netif_running(bp->dev)) {
6107 if (bp->state == BNX2X_STATE_OPEN)
6108 netif_wake_queue(bp->dev);
6109 bnx2x_napi_enable(bp);
6110 bnx2x_int_enable(bp);
6115 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6117 bnx2x_int_disable_sync(bp, disable_hw);
6118 if (netif_running(bp->dev)) {
6119 bnx2x_napi_disable(bp);
6120 netif_tx_disable(bp->dev);
6121 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6126 * Init service functions
6129 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6131 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6132 int port = BP_PORT(bp);
6135 * unicasts 0-31:port0 32-63:port1
6136 * multicast 64-127:port0 128-191:port1
6138 config->hdr.length_6b = 2;
6139 config->hdr.offset = port ? 31 : 0;
6140 config->hdr.client_id = BP_CL_ID(bp);
6141 config->hdr.reserved1 = 0;
6144 config->config_table[0].cam_entry.msb_mac_addr =
6145 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6146 config->config_table[0].cam_entry.middle_mac_addr =
6147 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6148 config->config_table[0].cam_entry.lsb_mac_addr =
6149 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6150 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6152 config->config_table[0].target_table_entry.flags = 0;
6154 CAM_INVALIDATE(config->config_table[0]);
6155 config->config_table[0].target_table_entry.client_id = 0;
6156 config->config_table[0].target_table_entry.vlan_id = 0;
6158 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6159 (set ? "setting" : "clearing"),
6160 config->config_table[0].cam_entry.msb_mac_addr,
6161 config->config_table[0].cam_entry.middle_mac_addr,
6162 config->config_table[0].cam_entry.lsb_mac_addr);
6165 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6166 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6167 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6168 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6170 config->config_table[1].target_table_entry.flags =
6171 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6173 CAM_INVALIDATE(config->config_table[1]);
6174 config->config_table[1].target_table_entry.client_id = 0;
6175 config->config_table[1].target_table_entry.vlan_id = 0;
6177 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6178 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6179 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6182 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6184 struct mac_configuration_cmd_e1h *config =
6185 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6187 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6188 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6192 /* CAM allocation for E1H
6193 * unicasts: by func number
6194 * multicast: 20+FUNC*20, 20 each
6196 config->hdr.length_6b = 1;
6197 config->hdr.offset = BP_FUNC(bp);
6198 config->hdr.client_id = BP_CL_ID(bp);
6199 config->hdr.reserved1 = 0;
6202 config->config_table[0].msb_mac_addr =
6203 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6204 config->config_table[0].middle_mac_addr =
6205 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6206 config->config_table[0].lsb_mac_addr =
6207 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6208 config->config_table[0].client_id = BP_L_ID(bp);
6209 config->config_table[0].vlan_id = 0;
6210 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6212 config->config_table[0].flags = BP_PORT(bp);
6214 config->config_table[0].flags =
6215 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6217 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6218 (set ? "setting" : "clearing"),
6219 config->config_table[0].msb_mac_addr,
6220 config->config_table[0].middle_mac_addr,
6221 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6223 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6224 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6225 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6228 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6229 int *state_p, int poll)
6231 /* can take a while if any port is running */
6234 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6235 poll ? "polling" : "waiting", state, idx);
6240 bnx2x_rx_int(bp->fp, 10);
6241 /* if index is different from 0
6242 * the reply for some commands will
6243 * be on the non default queue
6246 bnx2x_rx_int(&bp->fp[idx], 10);
6249 mb(); /* state is changed by bnx2x_sp_event() */
6250 if (*state_p == state)
6257 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6258 poll ? "polling" : "waiting", state, idx);
6259 #ifdef BNX2X_STOP_ON_ERROR
6266 static int bnx2x_setup_leading(struct bnx2x *bp)
6270 /* reset IGU state */
6271 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6274 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6276 /* Wait for completion */
6277 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6282 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6284 /* reset IGU state */
6285 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6288 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6289 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6291 /* Wait for completion */
6292 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6293 &(bp->fp[index].state), 0);
6296 static int bnx2x_poll(struct napi_struct *napi, int budget);
6297 static void bnx2x_set_rx_mode(struct net_device *dev);
6299 /* must be called with rtnl_lock */
6300 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6304 #ifdef BNX2X_STOP_ON_ERROR
6305 if (unlikely(bp->panic))
6309 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6311 /* Send LOAD_REQUEST command to MCP
6312 Returns the type of LOAD command:
6313 if it is the first port to be initialized
6314 common blocks should be initialized, otherwise - not
6316 if (!BP_NOMCP(bp)) {
6317 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6319 BNX2X_ERR("MCP response failure, aborting\n");
6322 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6323 return -EBUSY; /* other port in diagnostic mode */
6326 int port = BP_PORT(bp);
6328 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6329 load_count[0], load_count[1], load_count[2]);
6331 load_count[1 + port]++;
6332 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6333 load_count[0], load_count[1], load_count[2]);
6334 if (load_count[0] == 1)
6335 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6336 else if (load_count[1 + port] == 1)
6337 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6339 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6342 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6343 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6347 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6349 /* if we can't use MSI-X we only need one fp,
6350 * so try to enable MSI-X with the requested number of fp's
6351 * and fallback to inta with one fp
6357 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6358 /* user requested number */
6359 bp->num_queues = use_multi;
6362 bp->num_queues = min_t(u32, num_online_cpus(),
6367 if (bnx2x_enable_msix(bp)) {
6368 /* failed to enable MSI-X */
6371 BNX2X_ERR("Multi requested but failed"
6372 " to enable MSI-X\n");
6376 "set number of queues to %d\n", bp->num_queues);
6378 if (bnx2x_alloc_mem(bp))
6381 for_each_queue(bp, i)
6382 bnx2x_fp(bp, i, disable_tpa) =
6383 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6385 if (bp->flags & USING_MSIX_FLAG) {
6386 rc = bnx2x_req_msix_irqs(bp);
6388 pci_disable_msix(bp->pdev);
6393 rc = bnx2x_req_irq(bp);
6395 BNX2X_ERR("IRQ request failed, aborting\n");
6400 for_each_queue(bp, i)
6401 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6405 rc = bnx2x_init_hw(bp, load_code);
6407 BNX2X_ERR("HW init failed, aborting\n");
6408 goto load_int_disable;
6411 /* Setup NIC internals and enable interrupts */
6412 bnx2x_nic_init(bp, load_code);
6414 /* Send LOAD_DONE command to MCP */
6415 if (!BP_NOMCP(bp)) {
6416 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6418 BNX2X_ERR("MCP response failure, aborting\n");
6420 goto load_rings_free;
6424 bnx2x_stats_init(bp);
6426 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6428 /* Enable Rx interrupt handling before sending the ramrod
6429 as it's completed on Rx FP queue */
6430 bnx2x_napi_enable(bp);
6432 /* Enable interrupt handling */
6433 atomic_set(&bp->intr_sem, 0);
6435 rc = bnx2x_setup_leading(bp);
6437 BNX2X_ERR("Setup leading failed!\n");
6438 goto load_netif_stop;
6441 if (CHIP_IS_E1H(bp))
6442 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6443 BNX2X_ERR("!!! mf_cfg function disabled\n");
6444 bp->state = BNX2X_STATE_DISABLED;
6447 if (bp->state == BNX2X_STATE_OPEN)
6448 for_each_nondefault_queue(bp, i) {
6449 rc = bnx2x_setup_multi(bp, i);
6451 goto load_netif_stop;
6455 bnx2x_set_mac_addr_e1(bp, 1);
6457 bnx2x_set_mac_addr_e1h(bp, 1);
6460 bnx2x_initial_phy_init(bp);
6462 /* Start fast path */
6463 switch (load_mode) {
6465 /* Tx queue should be only reenabled */
6466 netif_wake_queue(bp->dev);
6467 bnx2x_set_rx_mode(bp->dev);
6471 netif_start_queue(bp->dev);
6472 bnx2x_set_rx_mode(bp->dev);
6473 if (bp->flags & USING_MSIX_FLAG)
6474 printk(KERN_INFO PFX "%s: using MSI-X\n",
6479 bnx2x_set_rx_mode(bp->dev);
6480 bp->state = BNX2X_STATE_DIAG;
6488 bnx2x__link_status_update(bp);
6490 /* start the timer */
6491 mod_timer(&bp->timer, jiffies + bp->current_interval);
6497 bnx2x_napi_disable(bp);
6499 /* Free SKBs, SGEs, TPA pool and driver internals */
6500 bnx2x_free_skbs(bp);
6501 for_each_queue(bp, i)
6502 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6504 bnx2x_int_disable_sync(bp, 1);
6511 /* TBD we really need to reset the chip
6512 if we want to recover from this */
6516 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6520 /* halt the connection */
6521 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6522 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6524 /* Wait for completion */
6525 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6526 &(bp->fp[index].state), 1);
6527 if (rc) /* timeout */
6530 /* delete cfc entry */
6531 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6533 /* Wait for completion */
6534 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6535 &(bp->fp[index].state), 1);
6539 static int bnx2x_stop_leading(struct bnx2x *bp)
6541 u16 dsb_sp_prod_idx;
6542 /* if the other port is handling traffic,
6543 this can take a lot of time */
6549 /* Send HALT ramrod */
6550 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6551 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6553 /* Wait for completion */
6554 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6555 &(bp->fp[0].state), 1);
6556 if (rc) /* timeout */
6559 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6561 /* Send PORT_DELETE ramrod */
6562 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6564 /* Wait for completion to arrive on default status block
6565 we are going to reset the chip anyway
6566 so there is not much to do if this times out
6568 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6570 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6571 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6572 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6573 #ifdef BNX2X_STOP_ON_ERROR
6583 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6584 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6589 static void bnx2x_reset_func(struct bnx2x *bp)
6591 int port = BP_PORT(bp);
6592 int func = BP_FUNC(bp);
6596 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6597 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6599 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6602 base = FUNC_ILT_BASE(func);
6603 for (i = base; i < base + ILT_PER_FUNC; i++)
6604 bnx2x_ilt_wr(bp, i, 0);
6607 static void bnx2x_reset_port(struct bnx2x *bp)
6609 int port = BP_PORT(bp);
6612 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6614 /* Do not rcv packets to BRB */
6615 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6616 /* Do not direct rcv packets that are not for MCP to the BRB */
6617 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6618 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6621 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6624 /* Check for BRB port occupancy */
6625 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6627 DP(NETIF_MSG_IFDOWN,
6628 "BRB1 is not empty %d blocks are occupied\n", val);
6630 /* TODO: Close Doorbell port? */
6633 static void bnx2x_reset_common(struct bnx2x *bp)
6636 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6638 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6641 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6643 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6644 BP_FUNC(bp), reset_code);
6646 switch (reset_code) {
6647 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6648 bnx2x_reset_port(bp);
6649 bnx2x_reset_func(bp);
6650 bnx2x_reset_common(bp);
6653 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6654 bnx2x_reset_port(bp);
6655 bnx2x_reset_func(bp);
6658 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6659 bnx2x_reset_func(bp);
6663 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6668 /* must be called with rtnl_lock */
6669 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6671 int port = BP_PORT(bp);
6675 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6677 bp->rx_mode = BNX2X_RX_MODE_NONE;
6678 bnx2x_set_storm_rx_mode(bp);
6680 bnx2x_netif_stop(bp, 1);
6681 if (!netif_running(bp->dev))
6682 bnx2x_napi_disable(bp);
6683 del_timer_sync(&bp->timer);
6684 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6685 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6686 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6688 /* Wait until tx fast path tasks complete */
6689 for_each_queue(bp, i) {
6690 struct bnx2x_fastpath *fp = &bp->fp[i];
6694 while (BNX2X_HAS_TX_WORK(fp)) {
6696 bnx2x_tx_int(fp, 1000);
6698 BNX2X_ERR("timeout waiting for queue[%d]\n",
6700 #ifdef BNX2X_STOP_ON_ERROR
6712 /* Give HW time to discard old tx messages */
6718 if (CHIP_IS_E1(bp)) {
6719 struct mac_configuration_cmd *config =
6720 bnx2x_sp(bp, mcast_config);
6722 bnx2x_set_mac_addr_e1(bp, 0);
6724 for (i = 0; i < config->hdr.length_6b; i++)
6725 CAM_INVALIDATE(config->config_table[i]);
6727 config->hdr.length_6b = i;
6728 if (CHIP_REV_IS_SLOW(bp))
6729 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6731 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6732 config->hdr.client_id = BP_CL_ID(bp);
6733 config->hdr.reserved1 = 0;
6735 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6736 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6737 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6740 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6742 bnx2x_set_mac_addr_e1h(bp, 0);
6744 for (i = 0; i < MC_HASH_SIZE; i++)
6745 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6748 if (unload_mode == UNLOAD_NORMAL)
6749 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6751 else if (bp->flags & NO_WOL_FLAG) {
6752 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6753 if (CHIP_IS_E1H(bp))
6754 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6756 } else if (bp->wol) {
6757 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6758 u8 *mac_addr = bp->dev->dev_addr;
6760 /* The mac address is written to entries 1-4 to
6761 preserve entry 0 which is used by the PMF */
6762 u8 entry = (BP_E1HVN(bp) + 1)*8;
6764 val = (mac_addr[0] << 8) | mac_addr[1];
6765 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6767 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6768 (mac_addr[4] << 8) | mac_addr[5];
6769 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6771 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6774 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6776 /* Close multi and leading connections
6777 Completions for ramrods are collected in a synchronous way */
6778 for_each_nondefault_queue(bp, i)
6779 if (bnx2x_stop_multi(bp, i))
6782 rc = bnx2x_stop_leading(bp);
6784 BNX2X_ERR("Stop leading failed!\n");
6785 #ifdef BNX2X_STOP_ON_ERROR
6794 reset_code = bnx2x_fw_command(bp, reset_code);
6796 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6797 load_count[0], load_count[1], load_count[2]);
6799 load_count[1 + port]--;
6800 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6801 load_count[0], load_count[1], load_count[2]);
6802 if (load_count[0] == 0)
6803 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6804 else if (load_count[1 + port] == 0)
6805 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6807 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6810 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6811 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6812 bnx2x__link_reset(bp);
6814 /* Reset the chip */
6815 bnx2x_reset_chip(bp, reset_code);
6817 /* Report UNLOAD_DONE to MCP */
6819 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6822 /* Free SKBs, SGEs, TPA pool and driver internals */
6823 bnx2x_free_skbs(bp);
6824 for_each_queue(bp, i)
6825 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6828 bp->state = BNX2X_STATE_CLOSED;
6830 netif_carrier_off(bp->dev);
6835 static void bnx2x_reset_task(struct work_struct *work)
6837 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6839 #ifdef BNX2X_STOP_ON_ERROR
6840 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6841 " so reset not done to allow debug dump,\n"
6842 KERN_ERR " you will need to reboot when done\n");
6848 if (!netif_running(bp->dev))
6849 goto reset_task_exit;
6851 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6852 bnx2x_nic_load(bp, LOAD_NORMAL);
6858 /* end of nic load/unload */
6863 * Init service functions
6866 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6870 /* Check if there is any driver already loaded */
6871 val = REG_RD(bp, MISC_REG_UNPREPARED);
6873 /* Check if it is the UNDI driver
6874 * UNDI driver initializes CID offset for normal bell to 0x7
6876 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6877 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6879 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6880 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6883 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6885 int func = BP_FUNC(bp);
6889 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6891 /* try unload UNDI on port 0 */
6894 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6895 DRV_MSG_SEQ_NUMBER_MASK);
6896 reset_code = bnx2x_fw_command(bp, reset_code);
6898 /* if UNDI is loaded on the other port */
6899 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6901 /* send "DONE" for previous unload */
6902 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6904 /* unload UNDI on port 1 */
6907 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6908 DRV_MSG_SEQ_NUMBER_MASK);
6909 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6911 bnx2x_fw_command(bp, reset_code);
6914 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6915 HC_REG_CONFIG_0), 0x1000);
6917 /* close input traffic and wait for it */
6918 /* Do not rcv packets to BRB */
6920 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6921 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6922 /* Do not direct rcv packets that are not for MCP to
6925 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6926 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6929 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6930 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6933 /* save NIG port swap info */
6934 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6935 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6938 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6941 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6943 /* take the NIG out of reset and restore swap values */
6945 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6946 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6947 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6948 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6950 /* send unload done to the MCP */
6951 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6953 /* restore our func and fw_seq */
6956 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6957 DRV_MSG_SEQ_NUMBER_MASK);
6962 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6964 u32 val, val2, val3, val4, id;
6967 /* Get the chip revision id and number. */
6968 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6969 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6970 id = ((val & 0xffff) << 16);
6971 val = REG_RD(bp, MISC_REG_CHIP_REV);
6972 id |= ((val & 0xf) << 12);
6973 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6974 id |= ((val & 0xff) << 4);
6975 REG_RD(bp, MISC_REG_BOND_ID);
6977 bp->common.chip_id = id;
6978 bp->link_params.chip_id = bp->common.chip_id;
6979 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6981 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6982 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6983 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6984 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6985 bp->common.flash_size, bp->common.flash_size);
6987 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6988 bp->link_params.shmem_base = bp->common.shmem_base;
6989 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6991 if (!bp->common.shmem_base ||
6992 (bp->common.shmem_base < 0xA0000) ||
6993 (bp->common.shmem_base >= 0xC0000)) {
6994 BNX2X_DEV_INFO("MCP not active\n");
6995 bp->flags |= NO_MCP_FLAG;
6999 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7000 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7001 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7002 BNX2X_ERR("BAD MCP validity signature\n");
7004 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7005 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7007 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7008 bp->common.hw_config, bp->common.board);
7010 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7011 SHARED_HW_CFG_LED_MODE_MASK) >>
7012 SHARED_HW_CFG_LED_MODE_SHIFT);
7014 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7015 bp->common.bc_ver = val;
7016 BNX2X_DEV_INFO("bc_ver %X\n", val);
7017 if (val < BNX2X_BC_VER) {
7018 /* for now only warn
7019 * later we might need to enforce this */
7020 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7021 " please upgrade BC\n", BNX2X_BC_VER, val);
7024 if (BP_E1HVN(bp) == 0) {
7025 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7026 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7028 /* no WOL capability for E1HVN != 0 */
7029 bp->flags |= NO_WOL_FLAG;
7031 BNX2X_DEV_INFO("%sWoL capable\n",
7032 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7034 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7035 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7036 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7037 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7039 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7040 val, val2, val3, val4);
7043 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7046 int port = BP_PORT(bp);
7049 switch (switch_cfg) {
7051 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7054 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7055 switch (ext_phy_type) {
7056 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7057 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7060 bp->port.supported |= (SUPPORTED_10baseT_Half |
7061 SUPPORTED_10baseT_Full |
7062 SUPPORTED_100baseT_Half |
7063 SUPPORTED_100baseT_Full |
7064 SUPPORTED_1000baseT_Full |
7065 SUPPORTED_2500baseX_Full |
7070 SUPPORTED_Asym_Pause);
7073 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7074 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7077 bp->port.supported |= (SUPPORTED_10baseT_Half |
7078 SUPPORTED_10baseT_Full |
7079 SUPPORTED_100baseT_Half |
7080 SUPPORTED_100baseT_Full |
7081 SUPPORTED_1000baseT_Full |
7086 SUPPORTED_Asym_Pause);
7090 BNX2X_ERR("NVRAM config error. "
7091 "BAD SerDes ext_phy_config 0x%x\n",
7092 bp->link_params.ext_phy_config);
7096 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7098 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7101 case SWITCH_CFG_10G:
7102 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7105 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7106 switch (ext_phy_type) {
7107 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7108 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7111 bp->port.supported |= (SUPPORTED_10baseT_Half |
7112 SUPPORTED_10baseT_Full |
7113 SUPPORTED_100baseT_Half |
7114 SUPPORTED_100baseT_Full |
7115 SUPPORTED_1000baseT_Full |
7116 SUPPORTED_2500baseX_Full |
7117 SUPPORTED_10000baseT_Full |
7122 SUPPORTED_Asym_Pause);
7125 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7126 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7129 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7132 SUPPORTED_Asym_Pause);
7135 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7136 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7139 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7140 SUPPORTED_1000baseT_Full |
7143 SUPPORTED_Asym_Pause);
7146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7147 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7150 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7151 SUPPORTED_1000baseT_Full |
7155 SUPPORTED_Asym_Pause);
7158 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7159 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7162 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7163 SUPPORTED_2500baseX_Full |
7164 SUPPORTED_1000baseT_Full |
7168 SUPPORTED_Asym_Pause);
7171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7172 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7175 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7179 SUPPORTED_Asym_Pause);
7182 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7183 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7184 bp->link_params.ext_phy_config);
7188 BNX2X_ERR("NVRAM config error. "
7189 "BAD XGXS ext_phy_config 0x%x\n",
7190 bp->link_params.ext_phy_config);
7194 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7196 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7201 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7202 bp->port.link_config);
7205 bp->link_params.phy_addr = bp->port.phy_addr;
7207 /* mask what we support according to speed_cap_mask */
7208 if (!(bp->link_params.speed_cap_mask &
7209 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7210 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7212 if (!(bp->link_params.speed_cap_mask &
7213 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7214 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7216 if (!(bp->link_params.speed_cap_mask &
7217 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7218 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7220 if (!(bp->link_params.speed_cap_mask &
7221 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7222 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7224 if (!(bp->link_params.speed_cap_mask &
7225 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7226 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7227 SUPPORTED_1000baseT_Full);
7229 if (!(bp->link_params.speed_cap_mask &
7230 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7231 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7233 if (!(bp->link_params.speed_cap_mask &
7234 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7235 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7237 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7240 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7242 bp->link_params.req_duplex = DUPLEX_FULL;
7244 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7245 case PORT_FEATURE_LINK_SPEED_AUTO:
7246 if (bp->port.supported & SUPPORTED_Autoneg) {
7247 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7248 bp->port.advertising = bp->port.supported;
7251 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7253 if ((ext_phy_type ==
7254 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7256 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7257 /* force 10G, no AN */
7258 bp->link_params.req_line_speed = SPEED_10000;
7259 bp->port.advertising =
7260 (ADVERTISED_10000baseT_Full |
7264 BNX2X_ERR("NVRAM config error. "
7265 "Invalid link_config 0x%x"
7266 " Autoneg not supported\n",
7267 bp->port.link_config);
7272 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7273 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7274 bp->link_params.req_line_speed = SPEED_10;
7275 bp->port.advertising = (ADVERTISED_10baseT_Full |
7278 BNX2X_ERR("NVRAM config error. "
7279 "Invalid link_config 0x%x"
7280 " speed_cap_mask 0x%x\n",
7281 bp->port.link_config,
7282 bp->link_params.speed_cap_mask);
7287 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7288 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7289 bp->link_params.req_line_speed = SPEED_10;
7290 bp->link_params.req_duplex = DUPLEX_HALF;
7291 bp->port.advertising = (ADVERTISED_10baseT_Half |
7294 BNX2X_ERR("NVRAM config error. "
7295 "Invalid link_config 0x%x"
7296 " speed_cap_mask 0x%x\n",
7297 bp->port.link_config,
7298 bp->link_params.speed_cap_mask);
7303 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7304 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7305 bp->link_params.req_line_speed = SPEED_100;
7306 bp->port.advertising = (ADVERTISED_100baseT_Full |
7309 BNX2X_ERR("NVRAM config error. "
7310 "Invalid link_config 0x%x"
7311 " speed_cap_mask 0x%x\n",
7312 bp->port.link_config,
7313 bp->link_params.speed_cap_mask);
7318 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7319 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7320 bp->link_params.req_line_speed = SPEED_100;
7321 bp->link_params.req_duplex = DUPLEX_HALF;
7322 bp->port.advertising = (ADVERTISED_100baseT_Half |
7325 BNX2X_ERR("NVRAM config error. "
7326 "Invalid link_config 0x%x"
7327 " speed_cap_mask 0x%x\n",
7328 bp->port.link_config,
7329 bp->link_params.speed_cap_mask);
7334 case PORT_FEATURE_LINK_SPEED_1G:
7335 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7336 bp->link_params.req_line_speed = SPEED_1000;
7337 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7340 BNX2X_ERR("NVRAM config error. "
7341 "Invalid link_config 0x%x"
7342 " speed_cap_mask 0x%x\n",
7343 bp->port.link_config,
7344 bp->link_params.speed_cap_mask);
7349 case PORT_FEATURE_LINK_SPEED_2_5G:
7350 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7351 bp->link_params.req_line_speed = SPEED_2500;
7352 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7355 BNX2X_ERR("NVRAM config error. "
7356 "Invalid link_config 0x%x"
7357 " speed_cap_mask 0x%x\n",
7358 bp->port.link_config,
7359 bp->link_params.speed_cap_mask);
7364 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7365 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7366 case PORT_FEATURE_LINK_SPEED_10G_KR:
7367 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7368 bp->link_params.req_line_speed = SPEED_10000;
7369 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7372 BNX2X_ERR("NVRAM config error. "
7373 "Invalid link_config 0x%x"
7374 " speed_cap_mask 0x%x\n",
7375 bp->port.link_config,
7376 bp->link_params.speed_cap_mask);
7382 BNX2X_ERR("NVRAM config error. "
7383 "BAD link speed link_config 0x%x\n",
7384 bp->port.link_config);
7385 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7386 bp->port.advertising = bp->port.supported;
7390 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7391 PORT_FEATURE_FLOW_CONTROL_MASK);
7392 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7393 !(bp->port.supported & SUPPORTED_Autoneg))
7394 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7396 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7397 " advertising 0x%x\n",
7398 bp->link_params.req_line_speed,
7399 bp->link_params.req_duplex,
7400 bp->link_params.req_flow_ctrl, bp->port.advertising);
7403 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7405 int port = BP_PORT(bp);
7408 bp->link_params.bp = bp;
7409 bp->link_params.port = port;
7411 bp->link_params.serdes_config =
7412 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7413 bp->link_params.lane_config =
7414 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7415 bp->link_params.ext_phy_config =
7417 dev_info.port_hw_config[port].external_phy_config);
7418 bp->link_params.speed_cap_mask =
7420 dev_info.port_hw_config[port].speed_capability_mask);
7422 bp->port.link_config =
7423 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7425 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7426 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7427 " link_config 0x%08x\n",
7428 bp->link_params.serdes_config,
7429 bp->link_params.lane_config,
7430 bp->link_params.ext_phy_config,
7431 bp->link_params.speed_cap_mask, bp->port.link_config);
7433 bp->link_params.switch_cfg = (bp->port.link_config &
7434 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7435 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7437 bnx2x_link_settings_requested(bp);
7439 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7440 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7441 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7442 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7443 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7444 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7445 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7446 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7447 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7448 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7451 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7453 int func = BP_FUNC(bp);
7457 bnx2x_get_common_hwinfo(bp);
7461 if (CHIP_IS_E1H(bp)) {
7463 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7465 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7466 FUNC_MF_CFG_E1HOV_TAG_MASK);
7467 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7471 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7473 func, bp->e1hov, bp->e1hov);
7475 BNX2X_DEV_INFO("Single function mode\n");
7477 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7478 " aborting\n", func);
7484 if (!BP_NOMCP(bp)) {
7485 bnx2x_get_port_hwinfo(bp);
7487 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7488 DRV_MSG_SEQ_NUMBER_MASK);
7489 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7493 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7494 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7495 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7496 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7497 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7498 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7499 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7500 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7501 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7502 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7503 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7505 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7513 /* only supposed to happen on emulation/FPGA */
7514 BNX2X_ERR("warning random MAC workaround active\n");
7515 random_ether_addr(bp->dev->dev_addr);
7516 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7522 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7524 int func = BP_FUNC(bp);
7527 /* Disable interrupt handling until HW is initialized */
7528 atomic_set(&bp->intr_sem, 1);
7530 mutex_init(&bp->port.phy_mutex);
7532 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7533 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7535 rc = bnx2x_get_hwinfo(bp);
7537 /* need to reset chip if undi was active */
7539 bnx2x_undi_unload(bp);
7541 if (CHIP_REV_IS_FPGA(bp))
7542 printk(KERN_ERR PFX "FPGA detected\n");
7544 if (BP_NOMCP(bp) && (func == 0))
7546 "MCP disabled, must load devices in order!\n");
7550 bp->flags &= ~TPA_ENABLE_FLAG;
7551 bp->dev->features &= ~NETIF_F_LRO;
7553 bp->flags |= TPA_ENABLE_FLAG;
7554 bp->dev->features |= NETIF_F_LRO;
7558 bp->tx_ring_size = MAX_TX_AVAIL;
7559 bp->rx_ring_size = MAX_RX_AVAIL;
7567 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7568 bp->current_interval = (poll ? poll : bp->timer_interval);
7570 init_timer(&bp->timer);
7571 bp->timer.expires = jiffies + bp->current_interval;
7572 bp->timer.data = (unsigned long) bp;
7573 bp->timer.function = bnx2x_timer;
7579 * ethtool service functions
7582 /* All ethtool functions called with rtnl_lock */
7584 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7586 struct bnx2x *bp = netdev_priv(dev);
7588 cmd->supported = bp->port.supported;
7589 cmd->advertising = bp->port.advertising;
7591 if (netif_carrier_ok(dev)) {
7592 cmd->speed = bp->link_vars.line_speed;
7593 cmd->duplex = bp->link_vars.duplex;
7595 cmd->speed = bp->link_params.req_line_speed;
7596 cmd->duplex = bp->link_params.req_duplex;
7601 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7602 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7603 if (vn_max_rate < cmd->speed)
7604 cmd->speed = vn_max_rate;
7607 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7609 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7611 switch (ext_phy_type) {
7612 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7613 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7614 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7615 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7616 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7617 cmd->port = PORT_FIBRE;
7620 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7621 cmd->port = PORT_TP;
7624 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7625 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7626 bp->link_params.ext_phy_config);
7630 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7631 bp->link_params.ext_phy_config);
7635 cmd->port = PORT_TP;
7637 cmd->phy_address = bp->port.phy_addr;
7638 cmd->transceiver = XCVR_INTERNAL;
7640 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7641 cmd->autoneg = AUTONEG_ENABLE;
7643 cmd->autoneg = AUTONEG_DISABLE;
7648 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7649 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7650 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7651 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7652 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7653 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7654 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7659 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7661 struct bnx2x *bp = netdev_priv(dev);
7667 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7668 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7669 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7670 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7671 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7672 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7673 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7675 if (cmd->autoneg == AUTONEG_ENABLE) {
7676 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7677 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7681 /* advertise the requested speed and duplex if supported */
7682 cmd->advertising &= bp->port.supported;
7684 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7685 bp->link_params.req_duplex = DUPLEX_FULL;
7686 bp->port.advertising |= (ADVERTISED_Autoneg |
7689 } else { /* forced speed */
7690 /* advertise the requested speed and duplex if supported */
7691 switch (cmd->speed) {
7693 if (cmd->duplex == DUPLEX_FULL) {
7694 if (!(bp->port.supported &
7695 SUPPORTED_10baseT_Full)) {
7697 "10M full not supported\n");
7701 advertising = (ADVERTISED_10baseT_Full |
7704 if (!(bp->port.supported &
7705 SUPPORTED_10baseT_Half)) {
7707 "10M half not supported\n");
7711 advertising = (ADVERTISED_10baseT_Half |
7717 if (cmd->duplex == DUPLEX_FULL) {
7718 if (!(bp->port.supported &
7719 SUPPORTED_100baseT_Full)) {
7721 "100M full not supported\n");
7725 advertising = (ADVERTISED_100baseT_Full |
7728 if (!(bp->port.supported &
7729 SUPPORTED_100baseT_Half)) {
7731 "100M half not supported\n");
7735 advertising = (ADVERTISED_100baseT_Half |
7741 if (cmd->duplex != DUPLEX_FULL) {
7742 DP(NETIF_MSG_LINK, "1G half not supported\n");
7746 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7747 DP(NETIF_MSG_LINK, "1G full not supported\n");
7751 advertising = (ADVERTISED_1000baseT_Full |
7756 if (cmd->duplex != DUPLEX_FULL) {
7758 "2.5G half not supported\n");
7762 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7764 "2.5G full not supported\n");
7768 advertising = (ADVERTISED_2500baseX_Full |
7773 if (cmd->duplex != DUPLEX_FULL) {
7774 DP(NETIF_MSG_LINK, "10G half not supported\n");
7778 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7779 DP(NETIF_MSG_LINK, "10G full not supported\n");
7783 advertising = (ADVERTISED_10000baseT_Full |
7788 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7792 bp->link_params.req_line_speed = cmd->speed;
7793 bp->link_params.req_duplex = cmd->duplex;
7794 bp->port.advertising = advertising;
7797 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7798 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7799 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7800 bp->port.advertising);
7802 if (netif_running(dev)) {
7803 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7810 #define PHY_FW_VER_LEN 10
7812 static void bnx2x_get_drvinfo(struct net_device *dev,
7813 struct ethtool_drvinfo *info)
7815 struct bnx2x *bp = netdev_priv(dev);
7816 u8 phy_fw_ver[PHY_FW_VER_LEN];
7818 strcpy(info->driver, DRV_MODULE_NAME);
7819 strcpy(info->version, DRV_MODULE_VERSION);
7821 phy_fw_ver[0] = '\0';
7823 bnx2x_acquire_phy_lock(bp);
7824 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7825 (bp->state != BNX2X_STATE_CLOSED),
7826 phy_fw_ver, PHY_FW_VER_LEN);
7827 bnx2x_release_phy_lock(bp);
7830 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7831 (bp->common.bc_ver & 0xff0000) >> 16,
7832 (bp->common.bc_ver & 0xff00) >> 8,
7833 (bp->common.bc_ver & 0xff),
7834 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7835 strcpy(info->bus_info, pci_name(bp->pdev));
7836 info->n_stats = BNX2X_NUM_STATS;
7837 info->testinfo_len = BNX2X_NUM_TESTS;
7838 info->eedump_len = bp->common.flash_size;
7839 info->regdump_len = 0;
7842 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7844 struct bnx2x *bp = netdev_priv(dev);
7846 if (bp->flags & NO_WOL_FLAG) {
7850 wol->supported = WAKE_MAGIC;
7852 wol->wolopts = WAKE_MAGIC;
7856 memset(&wol->sopass, 0, sizeof(wol->sopass));
7859 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7861 struct bnx2x *bp = netdev_priv(dev);
7863 if (wol->wolopts & ~WAKE_MAGIC)
7866 if (wol->wolopts & WAKE_MAGIC) {
7867 if (bp->flags & NO_WOL_FLAG)
7877 static u32 bnx2x_get_msglevel(struct net_device *dev)
7879 struct bnx2x *bp = netdev_priv(dev);
7881 return bp->msglevel;
7884 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7886 struct bnx2x *bp = netdev_priv(dev);
7888 if (capable(CAP_NET_ADMIN))
7889 bp->msglevel = level;
7892 static int bnx2x_nway_reset(struct net_device *dev)
7894 struct bnx2x *bp = netdev_priv(dev);
7899 if (netif_running(dev)) {
7900 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7907 static int bnx2x_get_eeprom_len(struct net_device *dev)
7909 struct bnx2x *bp = netdev_priv(dev);
7911 return bp->common.flash_size;
7914 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7916 int port = BP_PORT(bp);
7920 /* adjust timeout for emulation/FPGA */
7921 count = NVRAM_TIMEOUT_COUNT;
7922 if (CHIP_REV_IS_SLOW(bp))
7925 /* request access to nvram interface */
7926 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7927 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7929 for (i = 0; i < count*10; i++) {
7930 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7931 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7937 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7938 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7945 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7947 int port = BP_PORT(bp);
7951 /* adjust timeout for emulation/FPGA */
7952 count = NVRAM_TIMEOUT_COUNT;
7953 if (CHIP_REV_IS_SLOW(bp))
7956 /* relinquish nvram interface */
7957 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7958 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7960 for (i = 0; i < count*10; i++) {
7961 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7962 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7968 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7969 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7976 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7980 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7982 /* enable both bits, even on read */
7983 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7984 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7985 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7988 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7992 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7994 /* disable both bits, even after read */
7995 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7996 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7997 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8000 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8006 /* build the command word */
8007 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8009 /* need to clear DONE bit separately */
8010 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8012 /* address of the NVRAM to read from */
8013 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8014 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8016 /* issue a read command */
8017 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8019 /* adjust timeout for emulation/FPGA */
8020 count = NVRAM_TIMEOUT_COUNT;
8021 if (CHIP_REV_IS_SLOW(bp))
8024 /* wait for completion */
8027 for (i = 0; i < count; i++) {
8029 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8031 if (val & MCPR_NVM_COMMAND_DONE) {
8032 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8033 /* we read nvram data in cpu order
8034 * but ethtool sees it as an array of bytes
8035 * converting to big-endian will do the work */
8036 val = cpu_to_be32(val);
8046 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8053 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8055 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8060 if (offset + buf_size > bp->common.flash_size) {
8061 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8062 " buf_size (0x%x) > flash_size (0x%x)\n",
8063 offset, buf_size, bp->common.flash_size);
8067 /* request access to nvram interface */
8068 rc = bnx2x_acquire_nvram_lock(bp);
8072 /* enable access to nvram interface */
8073 bnx2x_enable_nvram_access(bp);
8075 /* read the first word(s) */
8076 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8077 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8078 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8079 memcpy(ret_buf, &val, 4);
8081 /* advance to the next dword */
8082 offset += sizeof(u32);
8083 ret_buf += sizeof(u32);
8084 buf_size -= sizeof(u32);
8089 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8090 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8091 memcpy(ret_buf, &val, 4);
8094 /* disable access to nvram interface */
8095 bnx2x_disable_nvram_access(bp);
8096 bnx2x_release_nvram_lock(bp);
8101 static int bnx2x_get_eeprom(struct net_device *dev,
8102 struct ethtool_eeprom *eeprom, u8 *eebuf)
8104 struct bnx2x *bp = netdev_priv(dev);
8107 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8108 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8109 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8110 eeprom->len, eeprom->len);
8112 /* parameters already validated in ethtool_get_eeprom */
8114 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8119 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8124 /* build the command word */
8125 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8127 /* need to clear DONE bit separately */
8128 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8130 /* write the data */
8131 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8133 /* address of the NVRAM to write to */
8134 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8135 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8137 /* issue the write command */
8138 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8140 /* adjust timeout for emulation/FPGA */
8141 count = NVRAM_TIMEOUT_COUNT;
8142 if (CHIP_REV_IS_SLOW(bp))
8145 /* wait for completion */
8147 for (i = 0; i < count; i++) {
8149 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8150 if (val & MCPR_NVM_COMMAND_DONE) {
8159 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8161 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8169 if (offset + buf_size > bp->common.flash_size) {
8170 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8171 " buf_size (0x%x) > flash_size (0x%x)\n",
8172 offset, buf_size, bp->common.flash_size);
8176 /* request access to nvram interface */
8177 rc = bnx2x_acquire_nvram_lock(bp);
8181 /* enable access to nvram interface */
8182 bnx2x_enable_nvram_access(bp);
8184 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8185 align_offset = (offset & ~0x03);
8186 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8189 val &= ~(0xff << BYTE_OFFSET(offset));
8190 val |= (*data_buf << BYTE_OFFSET(offset));
8192 /* nvram data is returned as an array of bytes
8193 * convert it back to cpu order */
8194 val = be32_to_cpu(val);
8196 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8200 /* disable access to nvram interface */
8201 bnx2x_disable_nvram_access(bp);
8202 bnx2x_release_nvram_lock(bp);
8207 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8215 if (buf_size == 1) /* ethtool */
8216 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8218 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8220 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8225 if (offset + buf_size > bp->common.flash_size) {
8226 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8227 " buf_size (0x%x) > flash_size (0x%x)\n",
8228 offset, buf_size, bp->common.flash_size);
8232 /* request access to nvram interface */
8233 rc = bnx2x_acquire_nvram_lock(bp);
8237 /* enable access to nvram interface */
8238 bnx2x_enable_nvram_access(bp);
8241 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8242 while ((written_so_far < buf_size) && (rc == 0)) {
8243 if (written_so_far == (buf_size - sizeof(u32)))
8244 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8245 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8246 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8247 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8248 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8250 memcpy(&val, data_buf, 4);
8252 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8254 /* advance to the next dword */
8255 offset += sizeof(u32);
8256 data_buf += sizeof(u32);
8257 written_so_far += sizeof(u32);
8261 /* disable access to nvram interface */
8262 bnx2x_disable_nvram_access(bp);
8263 bnx2x_release_nvram_lock(bp);
8268 static int bnx2x_set_eeprom(struct net_device *dev,
8269 struct ethtool_eeprom *eeprom, u8 *eebuf)
8271 struct bnx2x *bp = netdev_priv(dev);
8274 if (!netif_running(dev))
8277 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8278 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8279 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8280 eeprom->len, eeprom->len);
8282 /* parameters already validated in ethtool_set_eeprom */
8284 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8285 if (eeprom->magic == 0x00504859)
8288 bnx2x_acquire_phy_lock(bp);
8289 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8290 bp->link_params.ext_phy_config,
8291 (bp->state != BNX2X_STATE_CLOSED),
8292 eebuf, eeprom->len);
8293 if ((bp->state == BNX2X_STATE_OPEN) ||
8294 (bp->state == BNX2X_STATE_DISABLED)) {
8295 rc |= bnx2x_link_reset(&bp->link_params,
8297 rc |= bnx2x_phy_init(&bp->link_params,
8300 bnx2x_release_phy_lock(bp);
8302 } else /* Only the PMF can access the PHY */
8305 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8310 static int bnx2x_get_coalesce(struct net_device *dev,
8311 struct ethtool_coalesce *coal)
8313 struct bnx2x *bp = netdev_priv(dev);
8315 memset(coal, 0, sizeof(struct ethtool_coalesce));
8317 coal->rx_coalesce_usecs = bp->rx_ticks;
8318 coal->tx_coalesce_usecs = bp->tx_ticks;
8323 static int bnx2x_set_coalesce(struct net_device *dev,
8324 struct ethtool_coalesce *coal)
8326 struct bnx2x *bp = netdev_priv(dev);
8328 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8329 if (bp->rx_ticks > 3000)
8330 bp->rx_ticks = 3000;
8332 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8333 if (bp->tx_ticks > 0x3000)
8334 bp->tx_ticks = 0x3000;
8336 if (netif_running(dev))
8337 bnx2x_update_coalesce(bp);
8342 static void bnx2x_get_ringparam(struct net_device *dev,
8343 struct ethtool_ringparam *ering)
8345 struct bnx2x *bp = netdev_priv(dev);
8347 ering->rx_max_pending = MAX_RX_AVAIL;
8348 ering->rx_mini_max_pending = 0;
8349 ering->rx_jumbo_max_pending = 0;
8351 ering->rx_pending = bp->rx_ring_size;
8352 ering->rx_mini_pending = 0;
8353 ering->rx_jumbo_pending = 0;
8355 ering->tx_max_pending = MAX_TX_AVAIL;
8356 ering->tx_pending = bp->tx_ring_size;
8359 static int bnx2x_set_ringparam(struct net_device *dev,
8360 struct ethtool_ringparam *ering)
8362 struct bnx2x *bp = netdev_priv(dev);
8365 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8366 (ering->tx_pending > MAX_TX_AVAIL) ||
8367 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8370 bp->rx_ring_size = ering->rx_pending;
8371 bp->tx_ring_size = ering->tx_pending;
8373 if (netif_running(dev)) {
8374 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8375 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8381 static void bnx2x_get_pauseparam(struct net_device *dev,
8382 struct ethtool_pauseparam *epause)
8384 struct bnx2x *bp = netdev_priv(dev);
8386 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8387 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8389 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8390 BNX2X_FLOW_CTRL_RX);
8391 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8392 BNX2X_FLOW_CTRL_TX);
8394 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8395 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8396 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8399 static int bnx2x_set_pauseparam(struct net_device *dev,
8400 struct ethtool_pauseparam *epause)
8402 struct bnx2x *bp = netdev_priv(dev);
8407 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8408 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8409 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8411 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8413 if (epause->rx_pause)
8414 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8416 if (epause->tx_pause)
8417 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8419 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8420 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8422 if (epause->autoneg) {
8423 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8424 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8428 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8429 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8433 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8435 if (netif_running(dev)) {
8436 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8443 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8445 struct bnx2x *bp = netdev_priv(dev);
8449 /* TPA requires Rx CSUM offloading */
8450 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8451 if (!(dev->features & NETIF_F_LRO)) {
8452 dev->features |= NETIF_F_LRO;
8453 bp->flags |= TPA_ENABLE_FLAG;
8457 } else if (dev->features & NETIF_F_LRO) {
8458 dev->features &= ~NETIF_F_LRO;
8459 bp->flags &= ~TPA_ENABLE_FLAG;
8463 if (changed && netif_running(dev)) {
8464 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8465 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8471 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8473 struct bnx2x *bp = netdev_priv(dev);
8478 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8480 struct bnx2x *bp = netdev_priv(dev);
8485 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8486 TPA'ed packets will be discarded due to wrong TCP CSUM */
8488 u32 flags = ethtool_op_get_flags(dev);
8490 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8496 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8499 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8500 dev->features |= NETIF_F_TSO6;
8502 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8503 dev->features &= ~NETIF_F_TSO6;
8509 static const struct {
8510 char string[ETH_GSTRING_LEN];
8511 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8512 { "register_test (offline)" },
8513 { "memory_test (offline)" },
8514 { "loopback_test (offline)" },
8515 { "nvram_test (online)" },
8516 { "interrupt_test (online)" },
8517 { "link_test (online)" },
8518 { "idle check (online)" },
8519 { "MC errors (online)" }
8522 static int bnx2x_self_test_count(struct net_device *dev)
8524 return BNX2X_NUM_TESTS;
8527 static int bnx2x_test_registers(struct bnx2x *bp)
8529 int idx, i, rc = -ENODEV;
8531 int port = BP_PORT(bp);
8532 static const struct {
8537 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8538 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8539 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8540 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8541 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8542 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8543 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8544 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8545 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8546 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8547 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8548 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8549 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8550 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8551 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8552 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8553 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8554 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8555 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8556 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8557 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8558 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8559 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8560 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8561 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8562 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8563 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8564 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8565 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8566 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8567 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8568 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8569 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8570 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8571 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8572 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8573 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8574 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8576 { 0xffffffff, 0, 0x00000000 }
8579 if (!netif_running(bp->dev))
8582 /* Repeat the test twice:
8583 First by writing 0x00000000, second by writing 0xffffffff */
8584 for (idx = 0; idx < 2; idx++) {
8591 wr_val = 0xffffffff;
8595 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8596 u32 offset, mask, save_val, val;
8598 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8599 mask = reg_tbl[i].mask;
8601 save_val = REG_RD(bp, offset);
8603 REG_WR(bp, offset, wr_val);
8604 val = REG_RD(bp, offset);
8606 /* Restore the original register's value */
8607 REG_WR(bp, offset, save_val);
8609 /* verify that value is as expected value */
8610 if ((val & mask) != (wr_val & mask))
8621 static int bnx2x_test_memory(struct bnx2x *bp)
8623 int i, j, rc = -ENODEV;
8625 static const struct {
8629 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8630 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8631 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8632 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8633 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8634 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8635 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8639 static const struct {
8645 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8646 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8647 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8648 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8649 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8650 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8652 { NULL, 0xffffffff, 0, 0 }
8655 if (!netif_running(bp->dev))
8658 /* Go through all the memories */
8659 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8660 for (j = 0; j < mem_tbl[i].size; j++)
8661 REG_RD(bp, mem_tbl[i].offset + j*4);
8663 /* Check the parity status */
8664 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8665 val = REG_RD(bp, prty_tbl[i].offset);
8666 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8667 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8669 "%s is 0x%x\n", prty_tbl[i].name, val);
8680 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8685 while (bnx2x_link_test(bp) && cnt--)
8689 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8691 unsigned int pkt_size, num_pkts, i;
8692 struct sk_buff *skb;
8693 unsigned char *packet;
8694 struct bnx2x_fastpath *fp = &bp->fp[0];
8695 u16 tx_start_idx, tx_idx;
8696 u16 rx_start_idx, rx_idx;
8698 struct sw_tx_bd *tx_buf;
8699 struct eth_tx_bd *tx_bd;
8701 union eth_rx_cqe *cqe;
8703 struct sw_rx_bd *rx_buf;
8707 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8708 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8709 bnx2x_acquire_phy_lock(bp);
8710 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8711 bnx2x_release_phy_lock(bp);
8713 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8714 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8715 bnx2x_acquire_phy_lock(bp);
8716 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8717 bnx2x_release_phy_lock(bp);
8718 /* wait until link state is restored */
8719 bnx2x_wait_for_link(bp, link_up);
8725 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8728 goto test_loopback_exit;
8730 packet = skb_put(skb, pkt_size);
8731 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8732 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8733 for (i = ETH_HLEN; i < pkt_size; i++)
8734 packet[i] = (unsigned char) (i & 0xff);
8737 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8738 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8740 pkt_prod = fp->tx_pkt_prod++;
8741 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8742 tx_buf->first_bd = fp->tx_bd_prod;
8745 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8746 mapping = pci_map_single(bp->pdev, skb->data,
8747 skb_headlen(skb), PCI_DMA_TODEVICE);
8748 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8749 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8750 tx_bd->nbd = cpu_to_le16(1);
8751 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8752 tx_bd->vlan = cpu_to_le16(pkt_prod);
8753 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8754 ETH_TX_BD_FLAGS_END_BD);
8755 tx_bd->general_data = ((UNICAST_ADDRESS <<
8756 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8760 fp->hw_tx_prods->bds_prod =
8761 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8762 mb(); /* FW restriction: must not reorder writing nbd and packets */
8763 fp->hw_tx_prods->packets_prod =
8764 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8765 DOORBELL(bp, FP_IDX(fp), 0);
8771 bp->dev->trans_start = jiffies;
8775 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8776 if (tx_idx != tx_start_idx + num_pkts)
8777 goto test_loopback_exit;
8779 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8780 if (rx_idx != rx_start_idx + num_pkts)
8781 goto test_loopback_exit;
8783 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8784 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8785 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8786 goto test_loopback_rx_exit;
8788 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8789 if (len != pkt_size)
8790 goto test_loopback_rx_exit;
8792 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8794 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8795 for (i = ETH_HLEN; i < pkt_size; i++)
8796 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8797 goto test_loopback_rx_exit;
8801 test_loopback_rx_exit:
8803 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8804 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8805 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8806 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8808 /* Update producers */
8809 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8813 bp->link_params.loopback_mode = LOOPBACK_NONE;
8818 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8822 if (!netif_running(bp->dev))
8823 return BNX2X_LOOPBACK_FAILED;
8825 bnx2x_netif_stop(bp, 1);
8827 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8828 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8829 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8832 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8833 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8834 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8837 bnx2x_netif_start(bp);
8842 #define CRC32_RESIDUAL 0xdebb20e3
8844 static int bnx2x_test_nvram(struct bnx2x *bp)
8846 static const struct {
8850 { 0, 0x14 }, /* bootstrap */
8851 { 0x14, 0xec }, /* dir */
8852 { 0x100, 0x350 }, /* manuf_info */
8853 { 0x450, 0xf0 }, /* feature_info */
8854 { 0x640, 0x64 }, /* upgrade_key_info */
8856 { 0x708, 0x70 }, /* manuf_key_info */
8861 u8 *data = (u8 *)buf;
8865 rc = bnx2x_nvram_read(bp, 0, data, 4);
8867 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8868 goto test_nvram_exit;
8871 magic = be32_to_cpu(buf[0]);
8872 if (magic != 0x669955aa) {
8873 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8875 goto test_nvram_exit;
8878 for (i = 0; nvram_tbl[i].size; i++) {
8880 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8884 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8885 goto test_nvram_exit;
8888 csum = ether_crc_le(nvram_tbl[i].size, data);
8889 if (csum != CRC32_RESIDUAL) {
8891 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8893 goto test_nvram_exit;
8901 static int bnx2x_test_intr(struct bnx2x *bp)
8903 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8906 if (!netif_running(bp->dev))
8909 config->hdr.length_6b = 0;
8910 config->hdr.offset = 0;
8911 config->hdr.client_id = BP_CL_ID(bp);
8912 config->hdr.reserved1 = 0;
8914 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8915 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8916 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8918 bp->set_mac_pending++;
8919 for (i = 0; i < 10; i++) {
8920 if (!bp->set_mac_pending)
8922 msleep_interruptible(10);
8931 static void bnx2x_self_test(struct net_device *dev,
8932 struct ethtool_test *etest, u64 *buf)
8934 struct bnx2x *bp = netdev_priv(dev);
8936 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8938 if (!netif_running(dev))
8941 /* offline tests are not supported in MF mode */
8943 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8945 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8948 link_up = bp->link_vars.link_up;
8949 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8950 bnx2x_nic_load(bp, LOAD_DIAG);
8951 /* wait until link state is restored */
8952 bnx2x_wait_for_link(bp, link_up);
8954 if (bnx2x_test_registers(bp) != 0) {
8956 etest->flags |= ETH_TEST_FL_FAILED;
8958 if (bnx2x_test_memory(bp) != 0) {
8960 etest->flags |= ETH_TEST_FL_FAILED;
8962 buf[2] = bnx2x_test_loopback(bp, link_up);
8964 etest->flags |= ETH_TEST_FL_FAILED;
8966 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8967 bnx2x_nic_load(bp, LOAD_NORMAL);
8968 /* wait until link state is restored */
8969 bnx2x_wait_for_link(bp, link_up);
8971 if (bnx2x_test_nvram(bp) != 0) {
8973 etest->flags |= ETH_TEST_FL_FAILED;
8975 if (bnx2x_test_intr(bp) != 0) {
8977 etest->flags |= ETH_TEST_FL_FAILED;
8980 if (bnx2x_link_test(bp) != 0) {
8982 etest->flags |= ETH_TEST_FL_FAILED;
8984 buf[7] = bnx2x_mc_assert(bp);
8986 etest->flags |= ETH_TEST_FL_FAILED;
8988 #ifdef BNX2X_EXTRA_DEBUG
8989 bnx2x_panic_dump(bp);
8993 static const struct {
8997 #define STATS_FLAGS_PORT 1
8998 #define STATS_FLAGS_FUNC 2
8999 u8 string[ETH_GSTRING_LEN];
9000 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9001 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9002 8, STATS_FLAGS_FUNC, "rx_bytes" },
9003 { STATS_OFFSET32(error_bytes_received_hi),
9004 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9005 { STATS_OFFSET32(total_bytes_transmitted_hi),
9006 8, STATS_FLAGS_FUNC, "tx_bytes" },
9007 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9008 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9009 { STATS_OFFSET32(total_unicast_packets_received_hi),
9010 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9011 { STATS_OFFSET32(total_multicast_packets_received_hi),
9012 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9013 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9014 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9015 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9016 8, STATS_FLAGS_FUNC, "tx_packets" },
9017 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9018 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9019 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9020 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9021 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9022 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9023 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9024 8, STATS_FLAGS_PORT, "rx_align_errors" },
9025 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9026 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9027 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9028 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9029 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9030 8, STATS_FLAGS_PORT, "tx_deferred" },
9031 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9032 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9033 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9034 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9035 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9036 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9037 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9038 8, STATS_FLAGS_PORT, "rx_fragments" },
9039 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9040 8, STATS_FLAGS_PORT, "rx_jabbers" },
9041 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9042 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9043 { STATS_OFFSET32(jabber_packets_received),
9044 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9045 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9046 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9047 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9048 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9049 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9050 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9051 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9052 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9053 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9054 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9055 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9056 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9057 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9058 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9059 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9060 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9061 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9062 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9063 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9064 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9065 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9066 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9067 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9068 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9069 { STATS_OFFSET32(mac_filter_discard),
9070 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9071 { STATS_OFFSET32(no_buff_discard),
9072 4, STATS_FLAGS_FUNC, "rx_discards" },
9073 { STATS_OFFSET32(xxoverflow_discard),
9074 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9075 { STATS_OFFSET32(brb_drop_hi),
9076 8, STATS_FLAGS_PORT, "brb_discard" },
9077 { STATS_OFFSET32(brb_truncate_hi),
9078 8, STATS_FLAGS_PORT, "brb_truncate" },
9079 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9080 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9081 { STATS_OFFSET32(rx_skb_alloc_failed),
9082 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9083 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9084 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9087 #define IS_NOT_E1HMF_STAT(bp, i) \
9088 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9090 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9092 struct bnx2x *bp = netdev_priv(dev);
9095 switch (stringset) {
9097 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9098 if (IS_NOT_E1HMF_STAT(bp, i))
9100 strcpy(buf + j*ETH_GSTRING_LEN,
9101 bnx2x_stats_arr[i].string);
9107 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9112 static int bnx2x_get_stats_count(struct net_device *dev)
9114 struct bnx2x *bp = netdev_priv(dev);
9115 int i, num_stats = 0;
9117 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9118 if (IS_NOT_E1HMF_STAT(bp, i))
9125 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9126 struct ethtool_stats *stats, u64 *buf)
9128 struct bnx2x *bp = netdev_priv(dev);
9129 u32 *hw_stats = (u32 *)&bp->eth_stats;
9132 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9133 if (IS_NOT_E1HMF_STAT(bp, i))
9136 if (bnx2x_stats_arr[i].size == 0) {
9137 /* skip this counter */
9142 if (bnx2x_stats_arr[i].size == 4) {
9143 /* 4-byte counter */
9144 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9148 /* 8-byte counter */
9149 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9150 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9155 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9157 struct bnx2x *bp = netdev_priv(dev);
9158 int port = BP_PORT(bp);
9161 if (!netif_running(dev))
9170 for (i = 0; i < (data * 2); i++) {
9172 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9173 bp->link_params.hw_led_mode,
9174 bp->link_params.chip_id);
9176 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9177 bp->link_params.hw_led_mode,
9178 bp->link_params.chip_id);
9180 msleep_interruptible(500);
9181 if (signal_pending(current))
9185 if (bp->link_vars.link_up)
9186 bnx2x_set_led(bp, port, LED_MODE_OPER,
9187 bp->link_vars.line_speed,
9188 bp->link_params.hw_led_mode,
9189 bp->link_params.chip_id);
9194 static struct ethtool_ops bnx2x_ethtool_ops = {
9195 .get_settings = bnx2x_get_settings,
9196 .set_settings = bnx2x_set_settings,
9197 .get_drvinfo = bnx2x_get_drvinfo,
9198 .get_wol = bnx2x_get_wol,
9199 .set_wol = bnx2x_set_wol,
9200 .get_msglevel = bnx2x_get_msglevel,
9201 .set_msglevel = bnx2x_set_msglevel,
9202 .nway_reset = bnx2x_nway_reset,
9203 .get_link = ethtool_op_get_link,
9204 .get_eeprom_len = bnx2x_get_eeprom_len,
9205 .get_eeprom = bnx2x_get_eeprom,
9206 .set_eeprom = bnx2x_set_eeprom,
9207 .get_coalesce = bnx2x_get_coalesce,
9208 .set_coalesce = bnx2x_set_coalesce,
9209 .get_ringparam = bnx2x_get_ringparam,
9210 .set_ringparam = bnx2x_set_ringparam,
9211 .get_pauseparam = bnx2x_get_pauseparam,
9212 .set_pauseparam = bnx2x_set_pauseparam,
9213 .get_rx_csum = bnx2x_get_rx_csum,
9214 .set_rx_csum = bnx2x_set_rx_csum,
9215 .get_tx_csum = ethtool_op_get_tx_csum,
9216 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9217 .set_flags = bnx2x_set_flags,
9218 .get_flags = ethtool_op_get_flags,
9219 .get_sg = ethtool_op_get_sg,
9220 .set_sg = ethtool_op_set_sg,
9221 .get_tso = ethtool_op_get_tso,
9222 .set_tso = bnx2x_set_tso,
9223 .self_test_count = bnx2x_self_test_count,
9224 .self_test = bnx2x_self_test,
9225 .get_strings = bnx2x_get_strings,
9226 .phys_id = bnx2x_phys_id,
9227 .get_stats_count = bnx2x_get_stats_count,
9228 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9231 /* end of ethtool_ops */
9233 /****************************************************************************
9234 * General service functions
9235 ****************************************************************************/
9237 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9241 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9245 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9246 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9247 PCI_PM_CTRL_PME_STATUS));
9249 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9250 /* delay required during transition out of D3hot */
9255 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9259 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9261 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9264 /* No more memory access after this point until
9265 * device is brought back to D0.
9276 * net_device service functions
9279 static int bnx2x_poll(struct napi_struct *napi, int budget)
9281 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9283 struct bnx2x *bp = fp->bp;
9287 #ifdef BNX2X_STOP_ON_ERROR
9288 if (unlikely(bp->panic))
9292 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9293 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9294 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9296 bnx2x_update_fpsb_idx(fp);
9298 if (BNX2X_HAS_TX_WORK(fp))
9299 bnx2x_tx_int(fp, budget);
9301 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9302 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9304 if (BNX2X_HAS_RX_WORK(fp))
9305 work_done = bnx2x_rx_int(fp, budget);
9307 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9308 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9309 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9312 /* must not complete if we consumed full budget */
9313 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9315 #ifdef BNX2X_STOP_ON_ERROR
9318 netif_rx_complete(napi);
9320 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9321 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9322 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9323 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9329 /* we split the first BD into headers and data BDs
9330 * to ease the pain of our fellow microcode engineers
9331 * we use one mapping for both BDs
9332 * So far this has only been observed to happen
9333 * in Other Operating Systems(TM)
9335 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9336 struct bnx2x_fastpath *fp,
9337 struct eth_tx_bd **tx_bd, u16 hlen,
9338 u16 bd_prod, int nbd)
9340 struct eth_tx_bd *h_tx_bd = *tx_bd;
9341 struct eth_tx_bd *d_tx_bd;
9343 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9345 /* first fix first BD */
9346 h_tx_bd->nbd = cpu_to_le16(nbd);
9347 h_tx_bd->nbytes = cpu_to_le16(hlen);
9349 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9350 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9351 h_tx_bd->addr_lo, h_tx_bd->nbd);
9353 /* now get a new data BD
9354 * (after the pbd) and fill it */
9355 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9356 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9358 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9359 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9361 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9362 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9363 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9365 /* this marks the BD as one that has no individual mapping
9366 * the FW ignores this flag in a BD not marked start
9368 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9369 DP(NETIF_MSG_TX_QUEUED,
9370 "TSO split data size is %d (%x:%x)\n",
9371 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9373 /* update tx_bd for marking the last BD flag */
9379 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9382 csum = (u16) ~csum_fold(csum_sub(csum,
9383 csum_partial(t_header - fix, fix, 0)));
9386 csum = (u16) ~csum_fold(csum_add(csum,
9387 csum_partial(t_header, -fix, 0)));
9389 return swab16(csum);
9392 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9396 if (skb->ip_summed != CHECKSUM_PARTIAL)
9400 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9402 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9403 rc |= XMIT_CSUM_TCP;
9407 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9408 rc |= XMIT_CSUM_TCP;
9412 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9415 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9421 /* check if packet requires linearization (packet is too fragmented) */
9422 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9427 int first_bd_sz = 0;
9429 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9430 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9432 if (xmit_type & XMIT_GSO) {
9433 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9434 /* Check if LSO packet needs to be copied:
9435 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9436 int wnd_size = MAX_FETCH_BD - 3;
9437 /* Number of windows to check */
9438 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9443 /* Headers length */
9444 hlen = (int)(skb_transport_header(skb) - skb->data) +
9447 /* Amount of data (w/o headers) on linear part of SKB*/
9448 first_bd_sz = skb_headlen(skb) - hlen;
9450 wnd_sum = first_bd_sz;
9452 /* Calculate the first sum - it's special */
9453 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9455 skb_shinfo(skb)->frags[frag_idx].size;
9457 /* If there was data on linear skb data - check it */
9458 if (first_bd_sz > 0) {
9459 if (unlikely(wnd_sum < lso_mss)) {
9464 wnd_sum -= first_bd_sz;
9467 /* Others are easier: run through the frag list and
9468 check all windows */
9469 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9471 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9473 if (unlikely(wnd_sum < lso_mss)) {
9478 skb_shinfo(skb)->frags[wnd_idx].size;
9482 /* in non-LSO too fragmented packet should always
9489 if (unlikely(to_copy))
9490 DP(NETIF_MSG_TX_QUEUED,
9491 "Linearization IS REQUIRED for %s packet. "
9492 "num_frags %d hlen %d first_bd_sz %d\n",
9493 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9494 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9499 /* called with netif_tx_lock
9500 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9501 * netif_wake_queue()
9503 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9505 struct bnx2x *bp = netdev_priv(dev);
9506 struct bnx2x_fastpath *fp;
9507 struct sw_tx_bd *tx_buf;
9508 struct eth_tx_bd *tx_bd;
9509 struct eth_tx_parse_bd *pbd = NULL;
9510 u16 pkt_prod, bd_prod;
9513 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9514 int vlan_off = (bp->e1hov ? 4 : 0);
9518 #ifdef BNX2X_STOP_ON_ERROR
9519 if (unlikely(bp->panic))
9520 return NETDEV_TX_BUSY;
9523 fp_index = (smp_processor_id() % bp->num_queues);
9524 fp = &bp->fp[fp_index];
9526 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9527 bp->eth_stats.driver_xoff++,
9528 netif_stop_queue(dev);
9529 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9530 return NETDEV_TX_BUSY;
9533 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9534 " gso type %x xmit_type %x\n",
9535 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9536 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9538 /* First, check if we need to linearize the skb
9539 (due to FW restrictions) */
9540 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9541 /* Statistics of linearization */
9543 if (skb_linearize(skb) != 0) {
9544 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9545 "silently dropping this SKB\n");
9546 dev_kfree_skb_any(skb);
9547 return NETDEV_TX_OK;
9552 Please read carefully. First we use one BD which we mark as start,
9553 then for TSO or xsum we have a parsing info BD,
9554 and only then we have the rest of the TSO BDs.
9555 (don't forget to mark the last one as last,
9556 and to unmap only AFTER you write to the BD ...)
9557 And above all, all pdb sizes are in words - NOT DWORDS!
9560 pkt_prod = fp->tx_pkt_prod++;
9561 bd_prod = TX_BD(fp->tx_bd_prod);
9563 /* get a tx_buf and first BD */
9564 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9565 tx_bd = &fp->tx_desc_ring[bd_prod];
9567 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9568 tx_bd->general_data = (UNICAST_ADDRESS <<
9569 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9571 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9573 /* remember the first BD of the packet */
9574 tx_buf->first_bd = fp->tx_bd_prod;
9577 DP(NETIF_MSG_TX_QUEUED,
9578 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9579 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9582 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9583 (bp->flags & HW_VLAN_TX_FLAG)) {
9584 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9585 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9589 tx_bd->vlan = cpu_to_le16(pkt_prod);
9592 /* turn on parsing and get a BD */
9593 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9594 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9596 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9599 if (xmit_type & XMIT_CSUM) {
9600 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9602 /* for now NS flag is not used in Linux */
9603 pbd->global_data = (hlen |
9604 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9605 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9607 pbd->ip_hlen = (skb_transport_header(skb) -
9608 skb_network_header(skb)) / 2;
9610 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9612 pbd->total_hlen = cpu_to_le16(hlen);
9613 hlen = hlen*2 - vlan_off;
9615 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9617 if (xmit_type & XMIT_CSUM_V4)
9618 tx_bd->bd_flags.as_bitfield |=
9619 ETH_TX_BD_FLAGS_IP_CSUM;
9621 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9623 if (xmit_type & XMIT_CSUM_TCP) {
9624 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9627 s8 fix = SKB_CS_OFF(skb); /* signed! */
9629 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9630 pbd->cs_offset = fix / 2;
9632 DP(NETIF_MSG_TX_QUEUED,
9633 "hlen %d offset %d fix %d csum before fix %x\n",
9634 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9637 /* HW bug: fixup the CSUM */
9638 pbd->tcp_pseudo_csum =
9639 bnx2x_csum_fix(skb_transport_header(skb),
9642 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9643 pbd->tcp_pseudo_csum);
9647 mapping = pci_map_single(bp->pdev, skb->data,
9648 skb_headlen(skb), PCI_DMA_TODEVICE);
9650 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9651 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9652 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9653 tx_bd->nbd = cpu_to_le16(nbd);
9654 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9656 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9657 " nbytes %d flags %x vlan %x\n",
9658 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9659 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9660 le16_to_cpu(tx_bd->vlan));
9662 if (xmit_type & XMIT_GSO) {
9664 DP(NETIF_MSG_TX_QUEUED,
9665 "TSO packet len %d hlen %d total len %d tso size %d\n",
9666 skb->len, hlen, skb_headlen(skb),
9667 skb_shinfo(skb)->gso_size);
9669 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9671 if (unlikely(skb_headlen(skb) > hlen))
9672 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9675 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9676 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9677 pbd->tcp_flags = pbd_tcp_flags(skb);
9679 if (xmit_type & XMIT_GSO_V4) {
9680 pbd->ip_id = swab16(ip_hdr(skb)->id);
9681 pbd->tcp_pseudo_csum =
9682 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9684 0, IPPROTO_TCP, 0));
9687 pbd->tcp_pseudo_csum =
9688 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9689 &ipv6_hdr(skb)->daddr,
9690 0, IPPROTO_TCP, 0));
9692 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9695 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9696 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9698 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9699 tx_bd = &fp->tx_desc_ring[bd_prod];
9701 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9702 frag->size, PCI_DMA_TODEVICE);
9704 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9705 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9706 tx_bd->nbytes = cpu_to_le16(frag->size);
9707 tx_bd->vlan = cpu_to_le16(pkt_prod);
9708 tx_bd->bd_flags.as_bitfield = 0;
9710 DP(NETIF_MSG_TX_QUEUED,
9711 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9712 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9713 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9716 /* now at last mark the BD as the last BD */
9717 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9719 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9720 tx_bd, tx_bd->bd_flags.as_bitfield);
9722 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9724 /* now send a tx doorbell, counting the next BD
9725 * if the packet contains or ends with it
9727 if (TX_BD_POFF(bd_prod) < nbd)
9731 DP(NETIF_MSG_TX_QUEUED,
9732 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9733 " tcp_flags %x xsum %x seq %u hlen %u\n",
9734 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9735 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9736 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9738 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9741 * Make sure that the BD data is updated before updating the producer
9742 * since FW might read the BD right after the producer is updated.
9743 * This is only applicable for weak-ordered memory model archs such
9744 * as IA-64. The following barrier is also mandatory since FW will
9745 * assumes packets must have BDs.
9749 fp->hw_tx_prods->bds_prod =
9750 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9751 mb(); /* FW restriction: must not reorder writing nbd and packets */
9752 fp->hw_tx_prods->packets_prod =
9753 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9754 DOORBELL(bp, FP_IDX(fp), 0);
9758 fp->tx_bd_prod += nbd;
9759 dev->trans_start = jiffies;
9761 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9762 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9763 if we put Tx into XOFF state. */
9765 netif_stop_queue(dev);
9766 bp->eth_stats.driver_xoff++;
9767 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9768 netif_wake_queue(dev);
9772 return NETDEV_TX_OK;
9775 /* called with rtnl_lock */
9776 static int bnx2x_open(struct net_device *dev)
9778 struct bnx2x *bp = netdev_priv(dev);
9780 bnx2x_set_power_state(bp, PCI_D0);
9782 return bnx2x_nic_load(bp, LOAD_OPEN);
9785 /* called with rtnl_lock */
9786 static int bnx2x_close(struct net_device *dev)
9788 struct bnx2x *bp = netdev_priv(dev);
9790 /* Unload the driver, release IRQs */
9791 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9792 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9793 if (!CHIP_REV_IS_SLOW(bp))
9794 bnx2x_set_power_state(bp, PCI_D3hot);
9799 /* called with netif_tx_lock from set_multicast */
9800 static void bnx2x_set_rx_mode(struct net_device *dev)
9802 struct bnx2x *bp = netdev_priv(dev);
9803 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9804 int port = BP_PORT(bp);
9806 if (bp->state != BNX2X_STATE_OPEN) {
9807 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9811 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9813 if (dev->flags & IFF_PROMISC)
9814 rx_mode = BNX2X_RX_MODE_PROMISC;
9816 else if ((dev->flags & IFF_ALLMULTI) ||
9817 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9818 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9820 else { /* some multicasts */
9821 if (CHIP_IS_E1(bp)) {
9823 struct dev_mc_list *mclist;
9824 struct mac_configuration_cmd *config =
9825 bnx2x_sp(bp, mcast_config);
9827 for (i = 0, mclist = dev->mc_list;
9828 mclist && (i < dev->mc_count);
9829 i++, mclist = mclist->next) {
9831 config->config_table[i].
9832 cam_entry.msb_mac_addr =
9833 swab16(*(u16 *)&mclist->dmi_addr[0]);
9834 config->config_table[i].
9835 cam_entry.middle_mac_addr =
9836 swab16(*(u16 *)&mclist->dmi_addr[2]);
9837 config->config_table[i].
9838 cam_entry.lsb_mac_addr =
9839 swab16(*(u16 *)&mclist->dmi_addr[4]);
9840 config->config_table[i].cam_entry.flags =
9842 config->config_table[i].
9843 target_table_entry.flags = 0;
9844 config->config_table[i].
9845 target_table_entry.client_id = 0;
9846 config->config_table[i].
9847 target_table_entry.vlan_id = 0;
9850 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9851 config->config_table[i].
9852 cam_entry.msb_mac_addr,
9853 config->config_table[i].
9854 cam_entry.middle_mac_addr,
9855 config->config_table[i].
9856 cam_entry.lsb_mac_addr);
9858 old = config->hdr.length_6b;
9860 for (; i < old; i++) {
9861 if (CAM_IS_INVALID(config->
9863 i--; /* already invalidated */
9867 CAM_INVALIDATE(config->
9872 if (CHIP_REV_IS_SLOW(bp))
9873 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9875 offset = BNX2X_MAX_MULTICAST*(1 + port);
9877 config->hdr.length_6b = i;
9878 config->hdr.offset = offset;
9879 config->hdr.client_id = BP_CL_ID(bp);
9880 config->hdr.reserved1 = 0;
9882 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9883 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9884 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9887 /* Accept one or more multicasts */
9888 struct dev_mc_list *mclist;
9889 u32 mc_filter[MC_HASH_SIZE];
9890 u32 crc, bit, regidx;
9893 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9895 for (i = 0, mclist = dev->mc_list;
9896 mclist && (i < dev->mc_count);
9897 i++, mclist = mclist->next) {
9899 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9902 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9903 bit = (crc >> 24) & 0xff;
9906 mc_filter[regidx] |= (1 << bit);
9909 for (i = 0; i < MC_HASH_SIZE; i++)
9910 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9915 bp->rx_mode = rx_mode;
9916 bnx2x_set_storm_rx_mode(bp);
9919 /* called with rtnl_lock */
9920 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9922 struct sockaddr *addr = p;
9923 struct bnx2x *bp = netdev_priv(dev);
9925 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9928 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9929 if (netif_running(dev)) {
9931 bnx2x_set_mac_addr_e1(bp, 1);
9933 bnx2x_set_mac_addr_e1h(bp, 1);
9939 /* called with rtnl_lock */
9940 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9942 struct mii_ioctl_data *data = if_mii(ifr);
9943 struct bnx2x *bp = netdev_priv(dev);
9944 int port = BP_PORT(bp);
9949 data->phy_id = bp->port.phy_addr;
9956 if (!netif_running(dev))
9959 mutex_lock(&bp->port.phy_mutex);
9960 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9961 DEFAULT_PHY_DEV_ADDR,
9962 (data->reg_num & 0x1f), &mii_regval);
9963 data->val_out = mii_regval;
9964 mutex_unlock(&bp->port.phy_mutex);
9969 if (!capable(CAP_NET_ADMIN))
9972 if (!netif_running(dev))
9975 mutex_lock(&bp->port.phy_mutex);
9976 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9977 DEFAULT_PHY_DEV_ADDR,
9978 (data->reg_num & 0x1f), data->val_in);
9979 mutex_unlock(&bp->port.phy_mutex);
9990 /* called with rtnl_lock */
9991 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9993 struct bnx2x *bp = netdev_priv(dev);
9996 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9997 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10000 /* This does not race with packet allocation
10001 * because the actual alloc size is
10002 * only updated as part of load
10004 dev->mtu = new_mtu;
10006 if (netif_running(dev)) {
10007 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10008 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10014 static void bnx2x_tx_timeout(struct net_device *dev)
10016 struct bnx2x *bp = netdev_priv(dev);
10018 #ifdef BNX2X_STOP_ON_ERROR
10022 /* This allows the netif to be shutdown gracefully before resetting */
10023 schedule_work(&bp->reset_task);
10027 /* called with rtnl_lock */
10028 static void bnx2x_vlan_rx_register(struct net_device *dev,
10029 struct vlan_group *vlgrp)
10031 struct bnx2x *bp = netdev_priv(dev);
10035 /* Set flags according to the required capabilities */
10036 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10038 if (dev->features & NETIF_F_HW_VLAN_TX)
10039 bp->flags |= HW_VLAN_TX_FLAG;
10041 if (dev->features & NETIF_F_HW_VLAN_RX)
10042 bp->flags |= HW_VLAN_RX_FLAG;
10044 if (netif_running(dev))
10045 bnx2x_set_client_config(bp);
10050 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10051 static void poll_bnx2x(struct net_device *dev)
10053 struct bnx2x *bp = netdev_priv(dev);
10055 disable_irq(bp->pdev->irq);
10056 bnx2x_interrupt(bp->pdev->irq, dev);
10057 enable_irq(bp->pdev->irq);
10061 static const struct net_device_ops bnx2x_netdev_ops = {
10062 .ndo_open = bnx2x_open,
10063 .ndo_stop = bnx2x_close,
10064 .ndo_start_xmit = bnx2x_start_xmit,
10065 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10066 .ndo_set_mac_address = bnx2x_change_mac_addr,
10067 .ndo_validate_addr = eth_validate_addr,
10068 .ndo_do_ioctl = bnx2x_ioctl,
10069 .ndo_change_mtu = bnx2x_change_mtu,
10070 .ndo_tx_timeout = bnx2x_tx_timeout,
10072 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10074 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10075 .ndo_poll_controller = poll_bnx2x,
10080 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10081 struct net_device *dev)
10086 SET_NETDEV_DEV(dev, &pdev->dev);
10087 bp = netdev_priv(dev);
10092 bp->func = PCI_FUNC(pdev->devfn);
10094 rc = pci_enable_device(pdev);
10096 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10100 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10101 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10104 goto err_out_disable;
10107 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10108 printk(KERN_ERR PFX "Cannot find second PCI device"
10109 " base address, aborting\n");
10111 goto err_out_disable;
10114 if (atomic_read(&pdev->enable_cnt) == 1) {
10115 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10117 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10119 goto err_out_disable;
10122 pci_set_master(pdev);
10123 pci_save_state(pdev);
10126 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10127 if (bp->pm_cap == 0) {
10128 printk(KERN_ERR PFX "Cannot find power management"
10129 " capability, aborting\n");
10131 goto err_out_release;
10134 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10135 if (bp->pcie_cap == 0) {
10136 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10139 goto err_out_release;
10142 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10143 bp->flags |= USING_DAC_FLAG;
10144 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10145 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10146 " failed, aborting\n");
10148 goto err_out_release;
10151 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10152 printk(KERN_ERR PFX "System does not support DMA,"
10155 goto err_out_release;
10158 dev->mem_start = pci_resource_start(pdev, 0);
10159 dev->base_addr = dev->mem_start;
10160 dev->mem_end = pci_resource_end(pdev, 0);
10162 dev->irq = pdev->irq;
10164 bp->regview = pci_ioremap_bar(pdev, 0);
10165 if (!bp->regview) {
10166 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10168 goto err_out_release;
10171 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10172 min_t(u64, BNX2X_DB_SIZE,
10173 pci_resource_len(pdev, 2)));
10174 if (!bp->doorbells) {
10175 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10177 goto err_out_unmap;
10180 bnx2x_set_power_state(bp, PCI_D0);
10182 /* clean indirect addresses */
10183 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10184 PCICFG_VENDOR_ID_OFFSET);
10185 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10186 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10187 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10188 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10190 dev->watchdog_timeo = TX_TIMEOUT;
10192 dev->netdev_ops = &bnx2x_netdev_ops;
10193 dev->ethtool_ops = &bnx2x_ethtool_ops;
10194 dev->features |= NETIF_F_SG;
10195 dev->features |= NETIF_F_HW_CSUM;
10196 if (bp->flags & USING_DAC_FLAG)
10197 dev->features |= NETIF_F_HIGHDMA;
10199 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10200 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10202 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10203 dev->features |= NETIF_F_TSO6;
10209 iounmap(bp->regview);
10210 bp->regview = NULL;
10212 if (bp->doorbells) {
10213 iounmap(bp->doorbells);
10214 bp->doorbells = NULL;
10218 if (atomic_read(&pdev->enable_cnt) == 1)
10219 pci_release_regions(pdev);
10222 pci_disable_device(pdev);
10223 pci_set_drvdata(pdev, NULL);
10229 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10231 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10233 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10237 /* return value of 1=2.5GHz 2=5GHz */
10238 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10240 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10242 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10246 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10247 const struct pci_device_id *ent)
10249 static int version_printed;
10250 struct net_device *dev = NULL;
10254 if (version_printed++ == 0)
10255 printk(KERN_INFO "%s", version);
10257 /* dev zeroed in init_etherdev */
10258 dev = alloc_etherdev(sizeof(*bp));
10260 printk(KERN_ERR PFX "Cannot allocate net device\n");
10264 bp = netdev_priv(dev);
10265 bp->msglevel = debug;
10267 rc = bnx2x_init_dev(pdev, dev);
10273 rc = register_netdev(dev);
10275 dev_err(&pdev->dev, "Cannot register net device\n");
10276 goto init_one_exit;
10279 pci_set_drvdata(pdev, dev);
10281 rc = bnx2x_init_bp(bp);
10283 unregister_netdev(dev);
10284 goto init_one_exit;
10287 netif_carrier_off(dev);
10289 bp->common.name = board_info[ent->driver_data].name;
10290 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10291 " IRQ %d, ", dev->name, bp->common.name,
10292 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10293 bnx2x_get_pcie_width(bp),
10294 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10295 dev->base_addr, bp->pdev->irq);
10296 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10301 iounmap(bp->regview);
10304 iounmap(bp->doorbells);
10308 if (atomic_read(&pdev->enable_cnt) == 1)
10309 pci_release_regions(pdev);
10311 pci_disable_device(pdev);
10312 pci_set_drvdata(pdev, NULL);
10317 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10319 struct net_device *dev = pci_get_drvdata(pdev);
10323 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10326 bp = netdev_priv(dev);
10328 unregister_netdev(dev);
10331 iounmap(bp->regview);
10334 iounmap(bp->doorbells);
10338 if (atomic_read(&pdev->enable_cnt) == 1)
10339 pci_release_regions(pdev);
10341 pci_disable_device(pdev);
10342 pci_set_drvdata(pdev, NULL);
10345 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10347 struct net_device *dev = pci_get_drvdata(pdev);
10351 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10354 bp = netdev_priv(dev);
10358 pci_save_state(pdev);
10360 if (!netif_running(dev)) {
10365 netif_device_detach(dev);
10367 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10369 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10376 static int bnx2x_resume(struct pci_dev *pdev)
10378 struct net_device *dev = pci_get_drvdata(pdev);
10383 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10386 bp = netdev_priv(dev);
10390 pci_restore_state(pdev);
10392 if (!netif_running(dev)) {
10397 bnx2x_set_power_state(bp, PCI_D0);
10398 netif_device_attach(dev);
10400 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10407 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10411 bp->state = BNX2X_STATE_ERROR;
10413 bp->rx_mode = BNX2X_RX_MODE_NONE;
10415 bnx2x_netif_stop(bp, 0);
10417 del_timer_sync(&bp->timer);
10418 bp->stats_state = STATS_STATE_DISABLED;
10419 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10422 bnx2x_free_irq(bp);
10424 if (CHIP_IS_E1(bp)) {
10425 struct mac_configuration_cmd *config =
10426 bnx2x_sp(bp, mcast_config);
10428 for (i = 0; i < config->hdr.length_6b; i++)
10429 CAM_INVALIDATE(config->config_table[i]);
10432 /* Free SKBs, SGEs, TPA pool and driver internals */
10433 bnx2x_free_skbs(bp);
10434 for_each_queue(bp, i)
10435 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10436 bnx2x_free_mem(bp);
10438 bp->state = BNX2X_STATE_CLOSED;
10440 netif_carrier_off(bp->dev);
10445 static void bnx2x_eeh_recover(struct bnx2x *bp)
10449 mutex_init(&bp->port.phy_mutex);
10451 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10452 bp->link_params.shmem_base = bp->common.shmem_base;
10453 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10455 if (!bp->common.shmem_base ||
10456 (bp->common.shmem_base < 0xA0000) ||
10457 (bp->common.shmem_base >= 0xC0000)) {
10458 BNX2X_DEV_INFO("MCP not active\n");
10459 bp->flags |= NO_MCP_FLAG;
10463 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10464 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10465 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10466 BNX2X_ERR("BAD MCP validity signature\n");
10468 if (!BP_NOMCP(bp)) {
10469 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10470 & DRV_MSG_SEQ_NUMBER_MASK);
10471 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10476 * bnx2x_io_error_detected - called when PCI error is detected
10477 * @pdev: Pointer to PCI device
10478 * @state: The current pci connection state
10480 * This function is called after a PCI bus error affecting
10481 * this device has been detected.
10483 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10484 pci_channel_state_t state)
10486 struct net_device *dev = pci_get_drvdata(pdev);
10487 struct bnx2x *bp = netdev_priv(dev);
10491 netif_device_detach(dev);
10493 if (netif_running(dev))
10494 bnx2x_eeh_nic_unload(bp);
10496 pci_disable_device(pdev);
10500 /* Request a slot reset */
10501 return PCI_ERS_RESULT_NEED_RESET;
10505 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10506 * @pdev: Pointer to PCI device
10508 * Restart the card from scratch, as if from a cold-boot.
10510 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10512 struct net_device *dev = pci_get_drvdata(pdev);
10513 struct bnx2x *bp = netdev_priv(dev);
10517 if (pci_enable_device(pdev)) {
10518 dev_err(&pdev->dev,
10519 "Cannot re-enable PCI device after reset\n");
10521 return PCI_ERS_RESULT_DISCONNECT;
10524 pci_set_master(pdev);
10525 pci_restore_state(pdev);
10527 if (netif_running(dev))
10528 bnx2x_set_power_state(bp, PCI_D0);
10532 return PCI_ERS_RESULT_RECOVERED;
10536 * bnx2x_io_resume - called when traffic can start flowing again
10537 * @pdev: Pointer to PCI device
10539 * This callback is called when the error recovery driver tells us that
10540 * its OK to resume normal operation.
10542 static void bnx2x_io_resume(struct pci_dev *pdev)
10544 struct net_device *dev = pci_get_drvdata(pdev);
10545 struct bnx2x *bp = netdev_priv(dev);
10549 bnx2x_eeh_recover(bp);
10551 if (netif_running(dev))
10552 bnx2x_nic_load(bp, LOAD_NORMAL);
10554 netif_device_attach(dev);
10559 static struct pci_error_handlers bnx2x_err_handler = {
10560 .error_detected = bnx2x_io_error_detected,
10561 .slot_reset = bnx2x_io_slot_reset,
10562 .resume = bnx2x_io_resume,
10565 static struct pci_driver bnx2x_pci_driver = {
10566 .name = DRV_MODULE_NAME,
10567 .id_table = bnx2x_pci_tbl,
10568 .probe = bnx2x_init_one,
10569 .remove = __devexit_p(bnx2x_remove_one),
10570 .suspend = bnx2x_suspend,
10571 .resume = bnx2x_resume,
10572 .err_handler = &bnx2x_err_handler,
10575 static int __init bnx2x_init(void)
10577 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10578 if (bnx2x_wq == NULL) {
10579 printk(KERN_ERR PFX "Cannot create workqueue\n");
10583 return pci_register_driver(&bnx2x_pci_driver);
10586 static void __exit bnx2x_cleanup(void)
10588 pci_unregister_driver(&bnx2x_pci_driver);
10590 destroy_workqueue(bnx2x_wq);
10593 module_init(bnx2x_init);
10594 module_exit(bnx2x_cleanup);