1 /* bnx2x.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 /* define this to make the driver freeze on error
19 * to allow getting debug info
20 * (you will need to reboot afterwards)
22 /*#define BNX2X_STOP_ON_ERROR*/
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kernel.h>
27 #include <linux/device.h> /* for dev_info() */
28 #include <linux/timer.h>
29 #include <linux/errno.h>
30 #include <linux/ioport.h>
31 #include <linux/slab.h>
32 #include <linux/vmalloc.h>
33 #include <linux/interrupt.h>
34 #include <linux/pci.h>
35 #include <linux/init.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/bitops.h>
41 #include <linux/irq.h>
42 #include <linux/delay.h>
43 #include <asm/byteorder.h>
44 #include <linux/time.h>
45 #include <linux/ethtool.h>
46 #include <linux/mii.h>
47 #ifdef NETIF_F_HW_VLAN_TX
48 #include <linux/if_vlan.h>
53 #include <net/checksum.h>
54 #include <linux/workqueue.h>
55 #include <linux/crc32.h>
56 #include <linux/prefetch.h>
57 #include <linux/zlib.h>
58 #include <linux/version.h>
61 #include "bnx2x_reg.h"
62 #include "bnx2x_fw_defs.h"
63 #include "bnx2x_hsi.h"
64 #include "bnx2x_link.h"
66 #include "bnx2x_init.h"
68 #define DRV_MODULE_VERSION "1.42.4"
69 #define DRV_MODULE_RELDATE "2008/4/9"
70 #define BNX2X_BC_VER 0x040200
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT (5*HZ)
75 static char version[] __devinitdata =
76 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
91 module_param(use_inta, int, 0);
92 module_param(poll, int, 0);
93 module_param(onefunc, int, 0);
94 module_param(debug, int, 0);
95 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
96 MODULE_PARM_DESC(poll, "use polling (for debug)");
97 MODULE_PARM_DESC(onefunc, "enable only first function");
98 MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
99 MODULE_PARM_DESC(debug, "default debug msglevel");
102 module_param(use_multi, int, 0);
103 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
106 enum bnx2x_board_type {
110 /* indexed by board_t, above */
113 } board_info[] __devinitdata = {
114 { "Broadcom NetXtreme II BCM57710 XGb" }
117 static const struct pci_device_id bnx2x_pci_tbl[] = {
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
123 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125 /****************************************************************************
126 * General service functions
127 ****************************************************************************/
130 * locking is done by mcp
132 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
137 PCICFG_VENDOR_ID_OFFSET);
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
154 static const u32 dmae_reg_go_c[] = {
155 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
156 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
157 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
158 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
161 /* copy command into DMAE command memory and set DMAE command go */
162 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
168 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
169 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
170 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172 /* DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
175 REG_WR(bp, dmae_reg_go_c[idx], 1);
178 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
179 u32 dst_addr, u32 len32)
181 struct dmae_command *dmae = &bp->dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 memset(dmae, 0, sizeof(struct dmae_command));
188 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
189 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
190 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
192 DMAE_CMD_ENDIANITY_B_DW_SWAP |
194 DMAE_CMD_ENDIANITY_DW_SWAP |
196 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
197 dmae->src_addr_lo = U64_LO(dma_addr);
198 dmae->src_addr_hi = U64_HI(dma_addr);
199 dmae->dst_addr_lo = dst_addr >> 2;
200 dmae->dst_addr_hi = 0;
202 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
203 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
204 dmae->comp_val = BNX2X_WB_COMP_VAL;
207 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
208 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
209 "dst_addr [%x:%08x (%08x)]\n"
210 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
211 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
212 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
213 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
216 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
217 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
218 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
223 bnx2x_post_dmae(bp, dmae, port * 8);
226 /* adjust timeout for emulation/FPGA */
227 if (CHIP_REV_IS_SLOW(bp))
229 while (*wb_comp != BNX2X_WB_COMP_VAL) {
230 /* DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
233 BNX2X_ERR("dmae timeout!\n");
240 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
242 struct dmae_command *dmae = &bp->dmae;
244 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
247 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248 memset(dmae, 0, sizeof(struct dmae_command));
250 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
254 DMAE_CMD_ENDIANITY_B_DW_SWAP |
256 DMAE_CMD_ENDIANITY_DW_SWAP |
258 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259 dmae->src_addr_lo = src_addr >> 2;
260 dmae->src_addr_hi = 0;
261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
264 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266 dmae->comp_val = BNX2X_WB_COMP_VAL;
269 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
271 "dst_addr [%x:%08x (%08x)]\n"
272 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
273 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
280 bnx2x_post_dmae(bp, dmae, port * 8);
283 while (*wb_comp != BNX2X_WB_COMP_VAL) {
286 BNX2X_ERR("dmae timeout!\n");
292 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
298 static int bnx2x_mc_assert(struct bnx2x *bp)
302 const char storm[] = {"XTCU"};
303 const u32 intmem_base[] = {
310 /* Go through all instances of all SEMIs */
311 for (i = 0; i < 4; i++) {
312 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
315 BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
318 /* print the asserts */
319 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
320 u32 row0, row1, row2, row3;
322 row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
324 row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
326 row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
328 row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
331 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
332 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
333 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
334 storm[i], j, row3, row2, row1, row0);
344 static void bnx2x_fw_dump(struct bnx2x *bp)
350 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
351 mark = ((mark + 0x3) & ~0x3);
352 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
354 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
355 for (word = 0; word < 8; word++)
356 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
359 printk(KERN_CONT "%s", (char *)data);
361 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
362 for (word = 0; word < 8; word++)
363 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
366 printk(KERN_CONT "%s", (char *)data);
368 printk("\n" KERN_ERR PFX "end of fw dump\n");
371 static void bnx2x_panic_dump(struct bnx2x *bp)
376 BNX2X_ERR("begin crash dump -----------------\n");
378 for_each_queue(bp, i) {
379 struct bnx2x_fastpath *fp = &bp->fp[i];
380 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
382 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
383 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
384 " *rx_cons_sb(%x) rx_comp_prod(%x)"
385 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
387 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
388 fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
389 fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
390 fp->fp_u_idx, hw_prods->packets_prod,
393 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
394 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
395 for (j = start; j < end; j++) {
396 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
398 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
399 sw_bd->skb, sw_bd->first_bd);
402 start = TX_BD(fp->tx_bd_cons - 10);
403 end = TX_BD(fp->tx_bd_cons + 254);
404 for (j = start; j < end; j++) {
405 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
407 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
408 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
411 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
412 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
413 for (j = start; j < end; j++) {
414 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
415 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
417 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
418 j, rx_bd[0], rx_bd[1], sw_bd->skb);
421 start = RCQ_BD(fp->rx_comp_cons - 10);
422 end = RCQ_BD(fp->rx_comp_cons + 503);
423 for (j = start; j < end; j++) {
424 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
426 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
427 j, cqe[0], cqe[1], cqe[2], cqe[3]);
431 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
432 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
433 " spq_prod_idx(%u)\n",
434 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
435 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
439 BNX2X_ERR("end crash dump -----------------\n");
441 bp->stats_state = STATS_STATE_DISABLE;
442 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
445 static void bnx2x_int_enable(struct bnx2x *bp)
448 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
449 u32 val = REG_RD(bp, addr);
450 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
453 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
454 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
455 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
457 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
458 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
459 HC_CONFIG_0_REG_INT_LINE_EN_0 |
460 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
462 /* Errata A0.158 workaround */
463 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
464 val, port, addr, msix);
466 REG_WR(bp, addr, val);
468 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
471 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
472 val, port, addr, msix);
474 REG_WR(bp, addr, val);
477 static void bnx2x_int_disable(struct bnx2x *bp)
480 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
481 u32 val = REG_RD(bp, addr);
483 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
484 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
485 HC_CONFIG_0_REG_INT_LINE_EN_0 |
486 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
488 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
491 REG_WR(bp, addr, val);
492 if (REG_RD(bp, addr) != val)
493 BNX2X_ERR("BUG! proper val not read from IGU!\n");
496 static void bnx2x_int_disable_sync(struct bnx2x *bp)
499 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
502 atomic_inc(&bp->intr_sem);
503 /* prevent the HW from sending interrupts */
504 bnx2x_int_disable(bp);
506 /* make sure all ISRs are done */
508 for_each_queue(bp, i)
509 synchronize_irq(bp->msix_table[i].vector);
511 /* one more for the Slow Path IRQ */
512 synchronize_irq(bp->msix_table[i].vector);
514 synchronize_irq(bp->pdev->irq);
516 /* make sure sp_task is not running */
517 cancel_work_sync(&bp->sp_task);
524 * general service functions
527 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
528 u8 storm, u16 index, u8 op, u8 update)
530 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
531 struct igu_ack_register igu_ack;
533 igu_ack.status_block_index = index;
534 igu_ack.sb_id_and_flags =
535 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
536 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
537 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
538 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
540 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
541 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
542 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
545 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
547 struct host_status_block *fpsb = fp->status_blk;
550 barrier(); /* status block is written to by the chip */
551 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
552 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
555 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
556 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
562 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
564 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
566 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
569 if ((rx_cons_sb != fp->rx_comp_cons) ||
570 (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
576 static u16 bnx2x_ack_int(struct bnx2x *bp)
578 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
579 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
581 /* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
582 result, BAR_IGU_INTMEM + igu_addr); */
585 #warning IGU_DEBUG active
587 BNX2X_ERR("read %x from IGU\n", result);
588 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
596 * fast path service functions
599 /* free skb in the packet ring at pos idx
600 * return idx of last bd freed
602 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
605 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
606 struct eth_tx_bd *tx_bd;
607 struct sk_buff *skb = tx_buf->skb;
608 u16 bd_idx = tx_buf->first_bd;
611 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
615 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
616 tx_bd = &fp->tx_desc_ring[bd_idx];
617 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
618 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
620 nbd = le16_to_cpu(tx_bd->nbd) - 1;
621 #ifdef BNX2X_STOP_ON_ERROR
622 if (nbd > (MAX_SKB_FRAGS + 2)) {
623 BNX2X_ERR("bad nbd!\n");
628 /* Skip a parse bd and the TSO split header bd
629 since they have no mapping */
631 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
633 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
634 ETH_TX_BD_FLAGS_TCP_CSUM |
635 ETH_TX_BD_FLAGS_SW_LSO)) {
637 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
638 tx_bd = &fp->tx_desc_ring[bd_idx];
639 /* is this a TSO split header bd? */
640 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
642 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
649 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
650 tx_bd = &fp->tx_desc_ring[bd_idx];
651 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
652 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
654 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
660 tx_buf->first_bd = 0;
666 static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
672 /* Tell compiler that prod and cons can change */
674 prod = fp->tx_bd_prod;
675 cons = fp->tx_bd_cons;
677 used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
678 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
681 /* used = prod - cons - prod/size + cons/size */
682 used -= NUM_TX_BD - NUM_TX_RINGS;
685 BUG_TRAP(used <= fp->bp->tx_ring_size);
686 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
688 return (fp->bp->tx_ring_size - used);
691 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
693 struct bnx2x *bp = fp->bp;
694 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
697 #ifdef BNX2X_STOP_ON_ERROR
698 if (unlikely(bp->panic))
702 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
703 sw_cons = fp->tx_pkt_cons;
705 while (sw_cons != hw_cons) {
708 pkt_cons = TX_BD(sw_cons);
710 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
712 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
713 hw_cons, sw_cons, pkt_cons);
715 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
717 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
720 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
728 fp->tx_pkt_cons = sw_cons;
729 fp->tx_bd_cons = bd_cons;
731 /* Need to make the tx_cons update visible to start_xmit()
732 * before checking for netif_queue_stopped(). Without the
733 * memory barrier, there is a small possibility that start_xmit()
734 * will miss it and cause the queue to be stopped forever.
738 /* TBD need a thresh? */
739 if (unlikely(netif_queue_stopped(bp->dev))) {
741 netif_tx_lock(bp->dev);
743 if (netif_queue_stopped(bp->dev) &&
744 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
745 netif_wake_queue(bp->dev);
747 netif_tx_unlock(bp->dev);
752 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
753 union eth_rx_cqe *rr_cqe)
755 struct bnx2x *bp = fp->bp;
756 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
757 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
759 DP(NETIF_MSG_RX_STATUS,
760 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
761 fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
766 switch (command | fp->state) {
767 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
768 BNX2X_FP_STATE_OPENING):
769 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
771 fp->state = BNX2X_FP_STATE_OPEN;
774 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
775 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
777 fp->state = BNX2X_FP_STATE_HALTED;
781 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
784 mb(); /* force bnx2x_wait_ramrod to see the change */
788 switch (command | bp->state) {
789 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
790 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
791 bp->state = BNX2X_STATE_OPEN;
794 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
795 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
796 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
797 fp->state = BNX2X_FP_STATE_HALTED;
800 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
801 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
803 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
806 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
807 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
810 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
811 DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
815 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
819 mb(); /* force bnx2x_wait_ramrod to see the change */
822 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
823 struct bnx2x_fastpath *fp, u16 index)
826 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
827 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
830 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
831 if (unlikely(skb == NULL))
834 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
836 if (unlikely(dma_mapping_error(mapping))) {
843 pci_unmap_addr_set(rx_buf, mapping, mapping);
845 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
846 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
851 /* note that we are not allocating a new skb,
852 * we are just moving one from cons to prod
853 * we are not creating a new mapping,
854 * so there is no need to check for dma_mapping_error().
856 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
857 struct sk_buff *skb, u16 cons, u16 prod)
859 struct bnx2x *bp = fp->bp;
860 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
861 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
862 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
863 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
865 pci_dma_sync_single_for_device(bp->pdev,
866 pci_unmap_addr(cons_rx_buf, mapping),
867 bp->rx_offset + RX_COPY_THRESH,
870 prod_rx_buf->skb = cons_rx_buf->skb;
871 pci_unmap_addr_set(prod_rx_buf, mapping,
872 pci_unmap_addr(cons_rx_buf, mapping));
876 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
878 struct bnx2x *bp = fp->bp;
879 u16 bd_cons, bd_prod, comp_ring_cons;
880 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
883 #ifdef BNX2X_STOP_ON_ERROR
884 if (unlikely(bp->panic))
888 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
889 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
892 bd_cons = fp->rx_bd_cons;
893 bd_prod = fp->rx_bd_prod;
894 sw_comp_cons = fp->rx_comp_cons;
895 sw_comp_prod = fp->rx_comp_prod;
897 /* Memory barrier necessary as speculative reads of the rx
898 * buffer can be ahead of the index in the status block
902 DP(NETIF_MSG_RX_STATUS,
903 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
904 fp->index, hw_comp_cons, sw_comp_cons);
906 while (sw_comp_cons != hw_comp_cons) {
907 unsigned int len, pad;
908 struct sw_rx_bd *rx_buf;
910 union eth_rx_cqe *cqe;
912 comp_ring_cons = RCQ_BD(sw_comp_cons);
913 bd_prod = RX_BD(bd_prod);
914 bd_cons = RX_BD(bd_cons);
916 cqe = &fp->rx_comp_ring[comp_ring_cons];
918 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
919 " comp_ring (%u) bd_ring (%u,%u)\n",
920 hw_comp_cons, sw_comp_cons,
921 comp_ring_cons, bd_prod, bd_cons);
922 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
923 " queue %x vlan %x len %x\n",
924 cqe->fast_path_cqe.type,
925 cqe->fast_path_cqe.error_type_flags,
926 cqe->fast_path_cqe.status_flags,
927 cqe->fast_path_cqe.rss_hash_result,
928 cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
930 /* is this a slowpath msg? */
931 if (unlikely(cqe->fast_path_cqe.type)) {
932 bnx2x_sp_event(fp, cqe);
935 /* this is an rx packet */
937 rx_buf = &fp->rx_buf_ring[bd_cons];
940 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
941 pad = cqe->fast_path_cqe.placement_offset;
943 pci_dma_sync_single_for_device(bp->pdev,
944 pci_unmap_addr(rx_buf, mapping),
945 pad + RX_COPY_THRESH,
948 prefetch(((char *)(skb)) + 128);
950 /* is this an error packet? */
951 if (unlikely(cqe->fast_path_cqe.error_type_flags &
952 ETH_RX_ERROR_FALGS)) {
953 /* do we sometimes forward error packets anyway? */
955 "ERROR flags(%u) Rx packet(%u)\n",
956 cqe->fast_path_cqe.error_type_flags,
958 /* TBD make sure MC counts this as a drop */
962 /* Since we don't have a jumbo ring
963 * copy small packets if mtu > 1500
965 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
966 (len <= RX_COPY_THRESH)) {
967 struct sk_buff *new_skb;
969 new_skb = netdev_alloc_skb(bp->dev,
971 if (new_skb == NULL) {
973 "ERROR packet dropped "
974 "because of alloc failure\n");
975 /* TBD count this as a drop? */
980 skb_copy_from_linear_data_offset(skb, pad,
981 new_skb->data + pad, len);
982 skb_reserve(new_skb, pad);
983 skb_put(new_skb, len);
985 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
989 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
990 pci_unmap_single(bp->pdev,
991 pci_unmap_addr(rx_buf, mapping),
994 skb_reserve(skb, pad);
999 "ERROR packet dropped because "
1000 "of alloc failure\n");
1002 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1006 skb->protocol = eth_type_trans(skb, bp->dev);
1008 skb->ip_summed = CHECKSUM_NONE;
1009 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1010 skb->ip_summed = CHECKSUM_UNNECESSARY;
1012 /* TBD do we pass bad csum packets in promisc */
1016 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1017 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1018 && (bp->vlgrp != NULL))
1019 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1020 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1023 netif_receive_skb(skb);
1025 bp->dev->last_rx = jiffies;
1030 bd_cons = NEXT_RX_IDX(bd_cons);
1031 bd_prod = NEXT_RX_IDX(bd_prod);
1033 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1034 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1037 if ((rx_pkt == budget))
1041 fp->rx_bd_cons = bd_cons;
1042 fp->rx_bd_prod = bd_prod;
1043 fp->rx_comp_cons = sw_comp_cons;
1044 fp->rx_comp_prod = sw_comp_prod;
1046 REG_WR(bp, BAR_TSTRORM_INTMEM +
1047 TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1049 mmiowb(); /* keep prod updates ordered */
1051 fp->rx_pkt += rx_pkt;
1057 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1059 struct bnx2x_fastpath *fp = fp_cookie;
1060 struct bnx2x *bp = fp->bp;
1061 struct net_device *dev = bp->dev;
1062 int index = fp->index;
1064 DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1065 bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1067 #ifdef BNX2X_STOP_ON_ERROR
1068 if (unlikely(bp->panic))
1072 prefetch(fp->rx_cons_sb);
1073 prefetch(fp->tx_cons_sb);
1074 prefetch(&fp->status_blk->c_status_block.status_block_index);
1075 prefetch(&fp->status_blk->u_status_block.status_block_index);
1077 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1081 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1083 struct net_device *dev = dev_instance;
1084 struct bnx2x *bp = netdev_priv(dev);
1085 u16 status = bnx2x_ack_int(bp);
1087 if (unlikely(status == 0)) {
1088 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1092 DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1094 #ifdef BNX2X_STOP_ON_ERROR
1095 if (unlikely(bp->panic))
1099 /* Return here if interrupt is shared and is disabled */
1100 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1101 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1106 struct bnx2x_fastpath *fp = &bp->fp[0];
1108 prefetch(fp->rx_cons_sb);
1109 prefetch(fp->tx_cons_sb);
1110 prefetch(&fp->status_blk->c_status_block.status_block_index);
1111 prefetch(&fp->status_blk->u_status_block.status_block_index);
1113 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1120 if (unlikely(status & 0x1)) {
1122 schedule_work(&bp->sp_task);
1129 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1135 /* end of fast path */
1141 * General service functions
1144 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1147 u32 resource_bit = (1 << resource);
1151 /* Validating that the resource is within range */
1152 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1154 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1155 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1159 /* Validating that the resource is not already taken */
1160 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1161 if (lock_status & resource_bit) {
1162 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1163 lock_status, resource_bit);
1167 /* Try for 1 second every 5ms */
1168 for (cnt = 0; cnt < 200; cnt++) {
1169 /* Try to acquire the lock */
1170 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1172 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1173 if (lock_status & resource_bit)
1178 DP(NETIF_MSG_HW, "Timeout\n");
1182 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1185 u32 resource_bit = (1 << resource);
1188 /* Validating that the resource is within range */
1189 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1191 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1192 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1196 /* Validating that the resource is currently taken */
1197 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1198 if (!(lock_status & resource_bit)) {
1199 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1200 lock_status, resource_bit);
1204 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1208 /* HW Lock for shared dual port PHYs */
1209 static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1211 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1213 mutex_lock(&bp->phy_mutex);
1215 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1216 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1217 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1220 static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1222 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1224 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1225 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1226 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1228 mutex_unlock(&bp->phy_mutex);
1231 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1233 /* The GPIO should be swapped if swap register is set and active */
1234 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1235 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1236 int gpio_shift = gpio_num +
1237 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1238 u32 gpio_mask = (1 << gpio_shift);
1241 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1242 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1246 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1247 /* read GPIO and mask except the float bits */
1248 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1251 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1252 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1253 gpio_num, gpio_shift);
1254 /* clear FLOAT and set CLR */
1255 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1256 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1259 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1260 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1261 gpio_num, gpio_shift);
1262 /* clear FLOAT and set SET */
1263 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1264 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1267 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1268 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1269 gpio_num, gpio_shift);
1271 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1278 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1279 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1284 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1286 u32 spio_mask = (1 << spio_num);
1289 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1290 (spio_num > MISC_REGISTERS_SPIO_7)) {
1291 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1295 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1296 /* read SPIO and mask except the float bits */
1297 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1300 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1301 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1302 /* clear FLOAT and set CLR */
1303 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1304 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1307 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1308 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1309 /* clear FLOAT and set SET */
1310 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1311 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1314 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1315 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1317 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1324 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1325 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1330 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1332 switch (bp->link_vars.ieee_fc) {
1333 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1334 bp->advertising &= ~(ADVERTISED_Asym_Pause |
1337 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1338 bp->advertising |= (ADVERTISED_Asym_Pause |
1341 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1342 bp->advertising |= ADVERTISED_Asym_Pause;
1345 bp->advertising &= ~(ADVERTISED_Asym_Pause |
1351 static void bnx2x_link_report(struct bnx2x *bp)
1353 if (bp->link_vars.link_up) {
1354 if (bp->state == BNX2X_STATE_OPEN)
1355 netif_carrier_on(bp->dev);
1356 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1358 printk("%d Mbps ", bp->link_vars.line_speed);
1360 if (bp->link_vars.duplex == DUPLEX_FULL)
1361 printk("full duplex");
1363 printk("half duplex");
1365 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1366 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1367 printk(", receive ");
1368 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1369 printk("& transmit ");
1371 printk(", transmit ");
1373 printk("flow control ON");
1377 } else { /* link_down */
1378 netif_carrier_off(bp->dev);
1379 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1383 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1387 /* Initialize link parameters structure variables */
1388 bp->link_params.mtu = bp->dev->mtu;
1390 bnx2x_phy_hw_lock(bp);
1391 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1392 bnx2x_phy_hw_unlock(bp);
1394 if (bp->link_vars.link_up)
1395 bnx2x_link_report(bp);
1397 bnx2x_calc_fc_adv(bp);
1401 static void bnx2x_link_set(struct bnx2x *bp)
1403 bnx2x_phy_hw_lock(bp);
1404 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1405 bnx2x_phy_hw_unlock(bp);
1407 bnx2x_calc_fc_adv(bp);
1410 static void bnx2x__link_reset(struct bnx2x *bp)
1412 bnx2x_phy_hw_lock(bp);
1413 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1414 bnx2x_phy_hw_unlock(bp);
1417 static u8 bnx2x_link_test(struct bnx2x *bp)
1421 bnx2x_phy_hw_lock(bp);
1422 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1423 bnx2x_phy_hw_unlock(bp);
1428 /* This function is called upon link interrupt */
1429 static void bnx2x_link_attn(struct bnx2x *bp)
1431 bnx2x_phy_hw_lock(bp);
1432 bnx2x_link_update(&bp->link_params, &bp->link_vars);
1433 bnx2x_phy_hw_unlock(bp);
1435 /* indicate link status */
1436 bnx2x_link_report(bp);
1439 static void bnx2x__link_status_update(struct bnx2x *bp)
1441 if (bp->state != BNX2X_STATE_OPEN)
1444 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1446 /* indicate link status */
1447 bnx2x_link_report(bp);
1455 * General service functions
1458 /* the slow path queue is odd since completions arrive on the fastpath ring */
1459 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1460 u32 data_hi, u32 data_lo, int common)
1462 int port = bp->port;
1465 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1466 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
1467 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1468 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1470 #ifdef BNX2X_STOP_ON_ERROR
1471 if (unlikely(bp->panic))
1475 spin_lock(&bp->spq_lock);
1477 if (!bp->spq_left) {
1478 BNX2X_ERR("BUG! SPQ ring full!\n");
1479 spin_unlock(&bp->spq_lock);
1484 /* CID needs port number to be encoded int it */
1485 bp->spq_prod_bd->hdr.conn_and_cmd_data =
1486 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
1488 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1490 bp->spq_prod_bd->hdr.type |=
1491 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1493 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1494 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1498 if (bp->spq_prod_bd == bp->spq_last_bd) {
1499 bp->spq_prod_bd = bp->spq;
1500 bp->spq_prod_idx = 0;
1501 DP(NETIF_MSG_TIMER, "end of spq\n");
1508 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
1511 spin_unlock(&bp->spq_lock);
1515 /* acquire split MCP access lock register */
1516 static int bnx2x_lock_alr(struct bnx2x *bp)
1523 for (j = 0; j < i*10; j++) {
1525 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1526 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1527 if (val & (1L << 31))
1533 if (!(val & (1L << 31))) {
1534 BNX2X_ERR("Cannot acquire nvram interface\n");
1542 /* Release split MCP access lock register */
1543 static void bnx2x_unlock_alr(struct bnx2x *bp)
1547 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1550 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1552 struct host_def_status_block *def_sb = bp->def_status_blk;
1555 barrier(); /* status block is written to by the chip */
1557 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1558 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1561 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1562 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1565 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1566 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1569 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1570 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1573 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1574 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1581 * slow path service functions
1584 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1586 int port = bp->port;
1587 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
1588 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1589 MISC_REG_AEU_MASK_ATTN_FUNC_0;
1590 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1591 NIG_REG_MASK_INTERRUPT_PORT0;
1593 if (~bp->aeu_mask & (asserted & 0xff))
1594 BNX2X_ERR("IGU ERROR\n");
1595 if (bp->attn_state & asserted)
1596 BNX2X_ERR("IGU ERROR\n");
1598 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
1599 bp->aeu_mask, asserted);
1600 bp->aeu_mask &= ~(asserted & 0xff);
1601 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
1603 REG_WR(bp, aeu_addr, bp->aeu_mask);
1605 bp->attn_state |= asserted;
1607 if (asserted & ATTN_HARD_WIRED_MASK) {
1608 if (asserted & ATTN_NIG_FOR_FUNC) {
1610 /* save nig interrupt mask */
1611 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
1612 REG_WR(bp, nig_int_mask_addr, 0);
1614 bnx2x_link_attn(bp);
1616 /* handle unicore attn? */
1618 if (asserted & ATTN_SW_TIMER_4_FUNC)
1619 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1621 if (asserted & GPIO_2_FUNC)
1622 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1624 if (asserted & GPIO_3_FUNC)
1625 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1627 if (asserted & GPIO_4_FUNC)
1628 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1631 if (asserted & ATTN_GENERAL_ATTN_1) {
1632 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1633 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1635 if (asserted & ATTN_GENERAL_ATTN_2) {
1636 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1637 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1639 if (asserted & ATTN_GENERAL_ATTN_3) {
1640 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1641 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1644 if (asserted & ATTN_GENERAL_ATTN_4) {
1645 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1646 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1648 if (asserted & ATTN_GENERAL_ATTN_5) {
1649 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1650 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1652 if (asserted & ATTN_GENERAL_ATTN_6) {
1653 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1654 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1658 } /* if hardwired */
1660 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
1661 asserted, BAR_IGU_INTMEM + igu_addr);
1662 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
1664 /* now set back the mask */
1665 if (asserted & ATTN_NIG_FOR_FUNC)
1666 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
1669 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1671 int port = bp->port;
1675 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1677 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1678 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1680 val = REG_RD(bp, reg_offset);
1681 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1682 REG_WR(bp, reg_offset, val);
1684 BNX2X_ERR("SPIO5 hw attention\n");
1686 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
1687 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
1688 /* Fan failure attention */
1690 /* The PHY reset is controled by GPIO 1 */
1691 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1692 MISC_REGISTERS_GPIO_OUTPUT_LOW);
1693 /* Low power mode is controled by GPIO 2 */
1694 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1695 MISC_REGISTERS_GPIO_OUTPUT_LOW);
1696 /* mark the failure */
1697 bp->link_params.ext_phy_config &=
1698 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1699 bp->link_params.ext_phy_config |=
1700 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1702 dev_info.port_hw_config[port].
1703 external_phy_config,
1704 bp->link_params.ext_phy_config);
1705 /* log the failure */
1706 printk(KERN_ERR PFX "Fan Failure on Network"
1707 " Controller %s has caused the driver to"
1708 " shutdown the card to prevent permanent"
1709 " damage. Please contact Dell Support for"
1710 " assistance\n", bp->dev->name);
1719 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
1723 if (attn & BNX2X_DOORQ_ASSERT) {
1725 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
1726 BNX2X_ERR("DB hw attention 0x%x\n", val);
1727 /* DORQ discard attention */
1729 BNX2X_ERR("FATAL error from DORQ\n");
1733 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
1737 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
1739 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
1740 BNX2X_ERR("CFC hw attention 0x%x\n", val);
1741 /* CFC error attention */
1743 BNX2X_ERR("FATAL error from CFC\n");
1746 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
1748 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
1749 BNX2X_ERR("PXP hw attention 0x%x\n", val);
1750 /* RQ_USDMDP_FIFO_OVERFLOW */
1752 BNX2X_ERR("FATAL error from PXP\n");
1756 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
1758 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
1760 if (attn & BNX2X_MC_ASSERT_BITS) {
1762 BNX2X_ERR("MC assert!\n");
1763 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
1764 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
1765 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
1766 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
1769 } else if (attn & BNX2X_MCP_ASSERT) {
1771 BNX2X_ERR("MCP assert!\n");
1772 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
1773 bnx2x_mc_assert(bp);
1776 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
1779 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
1781 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
1782 BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn);
1786 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
1788 struct attn_route attn;
1789 struct attn_route group_mask;
1790 int port = bp->port;
1795 /* need to take HW lock because MCP or other port might also
1796 try to handle this event */
1799 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
1800 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
1801 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
1802 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
1803 DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
1805 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
1806 if (deasserted & (1 << index)) {
1807 group_mask = bp->attn_group[index];
1809 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
1810 (unsigned long long)group_mask.sig[0]);
1812 bnx2x_attn_int_deasserted3(bp,
1813 attn.sig[3] & group_mask.sig[3]);
1814 bnx2x_attn_int_deasserted1(bp,
1815 attn.sig[1] & group_mask.sig[1]);
1816 bnx2x_attn_int_deasserted2(bp,
1817 attn.sig[2] & group_mask.sig[2]);
1818 bnx2x_attn_int_deasserted0(bp,
1819 attn.sig[0] & group_mask.sig[0]);
1821 if ((attn.sig[0] & group_mask.sig[0] &
1822 HW_INTERRUT_ASSERT_SET_0) ||
1823 (attn.sig[1] & group_mask.sig[1] &
1824 HW_INTERRUT_ASSERT_SET_1) ||
1825 (attn.sig[2] & group_mask.sig[2] &
1826 HW_INTERRUT_ASSERT_SET_2))
1827 BNX2X_ERR("FATAL HW block attention"
1828 " set0 0x%x set1 0x%x"
1830 (attn.sig[0] & group_mask.sig[0] &
1831 HW_INTERRUT_ASSERT_SET_0),
1832 (attn.sig[1] & group_mask.sig[1] &
1833 HW_INTERRUT_ASSERT_SET_1),
1834 (attn.sig[2] & group_mask.sig[2] &
1835 HW_INTERRUT_ASSERT_SET_2));
1837 if ((attn.sig[0] & group_mask.sig[0] &
1838 HW_PRTY_ASSERT_SET_0) ||
1839 (attn.sig[1] & group_mask.sig[1] &
1840 HW_PRTY_ASSERT_SET_1) ||
1841 (attn.sig[2] & group_mask.sig[2] &
1842 HW_PRTY_ASSERT_SET_2))
1843 BNX2X_ERR("FATAL HW block parity attention\n");
1847 bnx2x_unlock_alr(bp);
1849 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
1852 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
1853 val, BAR_IGU_INTMEM + reg_addr); */
1854 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
1856 if (bp->aeu_mask & (deasserted & 0xff))
1857 BNX2X_ERR("IGU BUG\n");
1858 if (~bp->attn_state & deasserted)
1859 BNX2X_ERR("IGU BUG\n");
1861 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1862 MISC_REG_AEU_MASK_ATTN_FUNC_0;
1864 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
1865 bp->aeu_mask |= (deasserted & 0xff);
1867 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
1868 REG_WR(bp, reg_addr, bp->aeu_mask);
1870 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1871 bp->attn_state &= ~deasserted;
1872 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1875 static void bnx2x_attn_int(struct bnx2x *bp)
1877 /* read local copy of bits */
1878 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
1879 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
1880 u32 attn_state = bp->attn_state;
1882 /* look for changed bits */
1883 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
1884 u32 deasserted = ~attn_bits & attn_ack & attn_state;
1887 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
1888 attn_bits, attn_ack, asserted, deasserted);
1890 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
1891 BNX2X_ERR("bad attention state\n");
1893 /* handle bits that were raised */
1895 bnx2x_attn_int_asserted(bp, asserted);
1898 bnx2x_attn_int_deasserted(bp, deasserted);
1901 static void bnx2x_sp_task(struct work_struct *work)
1903 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
1906 /* Return here if interrupt is disabled */
1907 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1908 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
1912 status = bnx2x_update_dsb_idx(bp);
1914 BNX2X_ERR("spurious slowpath interrupt!\n");
1916 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
1922 /* CStorm events: query_stats, port delete ramrod */
1924 bp->stat_pending = 0;
1926 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
1928 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
1930 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
1932 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
1934 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
1939 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
1941 struct net_device *dev = dev_instance;
1942 struct bnx2x *bp = netdev_priv(dev);
1944 /* Return here if interrupt is disabled */
1945 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1946 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
1950 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
1952 #ifdef BNX2X_STOP_ON_ERROR
1953 if (unlikely(bp->panic))
1957 schedule_work(&bp->sp_task);
1962 /* end of slow path */
1966 /****************************************************************************
1968 ****************************************************************************/
1970 #define UPDATE_STAT(s, t) \
1972 estats->t += new->s - old->s; \
1976 /* sum[hi:lo] += add[hi:lo] */
1977 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
1980 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
1983 /* difference = minuend - subtrahend */
1984 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
1986 if (m_lo < s_lo) { /* underflow */ \
1987 d_hi = m_hi - s_hi; \
1988 if (d_hi > 0) { /* we can 'loan' 1 */ \
1990 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
1991 } else { /* m_hi <= s_hi */ \
1995 } else { /* m_lo >= s_lo */ \
1996 if (m_hi < s_hi) { \
1999 } else { /* m_hi >= s_hi */ \
2000 d_hi = m_hi - s_hi; \
2001 d_lo = m_lo - s_lo; \
2006 /* minuend -= subtrahend */
2007 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
2009 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
2012 #define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
2014 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
2015 diff.lo, new->s_lo, old->s_lo); \
2016 old->s_hi = new->s_hi; \
2017 old->s_lo = new->s_lo; \
2018 ADD_64(estats->t_hi, diff.hi, \
2019 estats->t_lo, diff.lo); \
2022 /* sum[hi:lo] += add */
2023 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2026 s_hi += (s_lo < a) ? 1 : 0; \
2029 #define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
2031 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
2034 #define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
2036 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2037 old_tclient->s = le32_to_cpu(tclient->s); \
2038 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
2042 * General service functions
2045 static inline long bnx2x_hilo(u32 *hiref)
2047 u32 lo = *(hiref + 1);
2048 #if (BITS_PER_LONG == 64)
2051 return HILO_U64(hi, lo);
2058 * Init service functions
2061 static void bnx2x_init_mac_stats(struct bnx2x *bp)
2063 struct dmae_command *dmae;
2064 int port = bp->port;
2065 int loader_idx = port * 8;
2069 bp->executer_idx = 0;
2072 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2073 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2075 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2077 DMAE_CMD_ENDIANITY_DW_SWAP |
2079 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2081 if (bp->link_vars.link_up)
2082 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
2084 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2085 dmae->opcode = opcode;
2086 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
2088 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
2090 dmae->dst_addr_lo = bp->fw_mb >> 2;
2091 dmae->dst_addr_hi = 0;
2092 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
2094 if (bp->link_vars.link_up) {
2095 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2096 dmae->comp_addr_hi = 0;
2099 dmae->comp_addr_lo = 0;
2100 dmae->comp_addr_hi = 0;
2105 if (!bp->link_vars.link_up) {
2106 /* no need to collect statistics in link down */
2110 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2111 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2112 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2114 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2116 DMAE_CMD_ENDIANITY_DW_SWAP |
2118 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2120 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2122 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
2123 NIG_REG_INGRESS_BMAC0_MEM);
2125 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
2126 BIGMAC_REGISTER_TX_STAT_GTBYT */
2127 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2128 dmae->opcode = opcode;
2129 dmae->src_addr_lo = (mac_addr +
2130 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
2131 dmae->src_addr_hi = 0;
2132 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
2133 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
2134 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
2135 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
2136 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2137 dmae->comp_addr_hi = 0;
2140 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
2141 BIGMAC_REGISTER_RX_STAT_GRIPJ */
2142 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2143 dmae->opcode = opcode;
2144 dmae->src_addr_lo = (mac_addr +
2145 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
2146 dmae->src_addr_hi = 0;
2147 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
2148 offsetof(struct bmac_stats, rx_gr64));
2149 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
2150 offsetof(struct bmac_stats, rx_gr64));
2151 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
2152 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
2153 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2154 dmae->comp_addr_hi = 0;
2157 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
2159 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
2161 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
2162 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2163 dmae->opcode = opcode;
2164 dmae->src_addr_lo = (mac_addr +
2165 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
2166 dmae->src_addr_hi = 0;
2167 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
2168 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
2169 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
2170 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2171 dmae->comp_addr_hi = 0;
2174 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
2175 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2176 dmae->opcode = opcode;
2177 dmae->src_addr_lo = (mac_addr +
2178 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
2179 dmae->src_addr_hi = 0;
2180 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
2181 offsetof(struct emac_stats,
2182 rx_falsecarriererrors));
2183 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
2184 offsetof(struct emac_stats,
2185 rx_falsecarriererrors));
2187 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2188 dmae->comp_addr_hi = 0;
2191 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
2192 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2193 dmae->opcode = opcode;
2194 dmae->src_addr_lo = (mac_addr +
2195 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
2196 dmae->src_addr_hi = 0;
2197 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
2198 offsetof(struct emac_stats,
2200 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
2201 offsetof(struct emac_stats,
2203 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
2204 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2205 dmae->comp_addr_hi = 0;
2210 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2211 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2212 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
2213 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2215 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2217 DMAE_CMD_ENDIANITY_DW_SWAP |
2219 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2220 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
2221 NIG_REG_STAT0_BRB_DISCARD) >> 2;
2222 dmae->src_addr_hi = 0;
2223 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
2224 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
2225 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
2226 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
2227 offsetof(struct nig_stats, done));
2228 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
2229 offsetof(struct nig_stats, done));
2230 dmae->comp_val = 0xffffffff;
2233 static void bnx2x_init_stats(struct bnx2x *bp)
2235 int port = bp->port;
2237 bp->stats_state = STATS_STATE_DISABLE;
2238 bp->executer_idx = 0;
2240 bp->old_brb_discard = REG_RD(bp,
2241 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
2243 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2244 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
2245 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
2247 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
2248 REG_WR(bp, BAR_XSTRORM_INTMEM +
2249 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
2251 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
2252 REG_WR(bp, BAR_TSTRORM_INTMEM +
2253 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
2255 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
2256 REG_WR(bp, BAR_CSTRORM_INTMEM +
2257 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
2259 REG_WR(bp, BAR_XSTRORM_INTMEM +
2260 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
2261 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2262 REG_WR(bp, BAR_XSTRORM_INTMEM +
2263 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
2264 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2266 REG_WR(bp, BAR_TSTRORM_INTMEM +
2267 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
2268 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2269 REG_WR(bp, BAR_TSTRORM_INTMEM +
2270 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
2271 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2274 static void bnx2x_stop_stats(struct bnx2x *bp)
2277 if (bp->stats_state != STATS_STATE_DISABLE) {
2280 bp->stats_state = STATS_STATE_STOP;
2281 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2283 while (bp->stats_state != STATS_STATE_DISABLE) {
2285 BNX2X_ERR("timeout waiting for stats stop\n");
2292 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
2296 * Statistics service functions
2299 static void bnx2x_update_bmac_stats(struct bnx2x *bp)
2303 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
2304 struct bmac_stats *old = &bp->old_bmac;
2305 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2310 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
2311 tx_gtbyt.lo, total_bytes_transmitted_lo);
2313 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
2314 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
2315 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
2317 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
2318 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
2319 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
2321 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
2322 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
2323 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
2324 estats->total_unicast_packets_transmitted_lo, sum.lo);
2326 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
2327 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
2328 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
2329 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
2330 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
2331 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
2332 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
2333 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
2334 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
2335 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
2336 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
2338 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
2339 UPDATE_STAT(rx_grund.lo, runt_packets_received);
2340 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
2341 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
2342 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
2343 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
2344 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
2345 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
2347 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
2348 rx_grerb.lo, stat_IfHCInBadOctets_lo);
2349 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
2350 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
2351 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
2352 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
2353 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
2356 static void bnx2x_update_emac_stats(struct bnx2x *bp)
2358 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
2359 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2361 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
2362 total_bytes_transmitted_lo);
2363 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
2364 total_unicast_packets_transmitted_hi,
2365 total_unicast_packets_transmitted_lo);
2366 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
2367 total_multicast_packets_transmitted_hi,
2368 total_multicast_packets_transmitted_lo);
2369 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
2370 total_broadcast_packets_transmitted_hi,
2371 total_broadcast_packets_transmitted_lo);
2373 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
2374 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
2375 estats->single_collision_transmit_frames +=
2376 new->tx_dot3statssinglecollisionframes;
2377 estats->multiple_collision_transmit_frames +=
2378 new->tx_dot3statsmultiplecollisionframes;
2379 estats->late_collision_frames += new->tx_dot3statslatecollisions;
2380 estats->excessive_collision_frames +=
2381 new->tx_dot3statsexcessivecollisions;
2382 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
2383 estats->frames_transmitted_65_127_bytes +=
2384 new->tx_etherstatspkts65octetsto127octets;
2385 estats->frames_transmitted_128_255_bytes +=
2386 new->tx_etherstatspkts128octetsto255octets;
2387 estats->frames_transmitted_256_511_bytes +=
2388 new->tx_etherstatspkts256octetsto511octets;
2389 estats->frames_transmitted_512_1023_bytes +=
2390 new->tx_etherstatspkts512octetsto1023octets;
2391 estats->frames_transmitted_1024_1522_bytes +=
2392 new->tx_etherstatspkts1024octetsto1522octet;
2393 estats->frames_transmitted_1523_9022_bytes +=
2394 new->tx_etherstatspktsover1522octets;
2396 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
2397 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
2398 estats->false_carrier_detections += new->rx_falsecarriererrors;
2399 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
2400 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
2401 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
2402 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
2403 estats->control_frames_received += new->rx_maccontrolframesreceived;
2404 estats->error_runt_packets_received += new->rx_etherstatsfragments;
2405 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
2407 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
2408 stat_IfHCInBadOctets_lo);
2409 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
2410 stat_IfHCOutBadOctets_lo);
2411 estats->stat_Dot3statsInternalMacTransmitErrors +=
2412 new->tx_dot3statsinternalmactransmiterrors;
2413 estats->stat_Dot3StatsCarrierSenseErrors +=
2414 new->rx_dot3statscarriersenseerrors;
2415 estats->stat_Dot3StatsDeferredTransmissions +=
2416 new->tx_dot3statsdeferredtransmissions;
2417 estats->stat_FlowControlDone += new->tx_flowcontroldone;
2418 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
2421 static int bnx2x_update_storm_stats(struct bnx2x *bp)
2423 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
2424 struct tstorm_common_stats *tstats = &stats->tstorm_common;
2425 struct tstorm_per_client_stats *tclient =
2426 &tstats->client_statistics[0];
2427 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
2428 struct xstorm_common_stats *xstats = &stats->xstorm_common;
2429 struct nig_stats *nstats = bnx2x_sp(bp, nig);
2430 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2433 /* are DMAE stats valid? */
2434 if (nstats->done != 0xffffffff) {
2435 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
2439 /* are storm stats valid? */
2440 if (tstats->done.hi != 0xffffffff) {
2441 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
2444 if (xstats->done.hi != 0xffffffff) {
2445 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
2449 estats->total_bytes_received_hi =
2450 estats->valid_bytes_received_hi =
2451 le32_to_cpu(tclient->total_rcv_bytes.hi);
2452 estats->total_bytes_received_lo =
2453 estats->valid_bytes_received_lo =
2454 le32_to_cpu(tclient->total_rcv_bytes.lo);
2455 ADD_64(estats->total_bytes_received_hi,
2456 le32_to_cpu(tclient->rcv_error_bytes.hi),
2457 estats->total_bytes_received_lo,
2458 le32_to_cpu(tclient->rcv_error_bytes.lo));
2460 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
2461 total_unicast_packets_received_hi,
2462 total_unicast_packets_received_lo);
2463 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
2464 total_multicast_packets_received_hi,
2465 total_multicast_packets_received_lo);
2466 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
2467 total_broadcast_packets_received_hi,
2468 total_broadcast_packets_received_lo);
2470 estats->frames_received_64_bytes = MAC_STX_NA;
2471 estats->frames_received_65_127_bytes = MAC_STX_NA;
2472 estats->frames_received_128_255_bytes = MAC_STX_NA;
2473 estats->frames_received_256_511_bytes = MAC_STX_NA;
2474 estats->frames_received_512_1023_bytes = MAC_STX_NA;
2475 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
2476 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
2478 estats->x_total_sent_bytes_hi =
2479 le32_to_cpu(xstats->total_sent_bytes.hi);
2480 estats->x_total_sent_bytes_lo =
2481 le32_to_cpu(xstats->total_sent_bytes.lo);
2482 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
2484 estats->t_rcv_unicast_bytes_hi =
2485 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
2486 estats->t_rcv_unicast_bytes_lo =
2487 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
2488 estats->t_rcv_broadcast_bytes_hi =
2489 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
2490 estats->t_rcv_broadcast_bytes_lo =
2491 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
2492 estats->t_rcv_multicast_bytes_hi =
2493 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
2494 estats->t_rcv_multicast_bytes_lo =
2495 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
2496 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
2498 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
2499 estats->packets_too_big_discard =
2500 le32_to_cpu(tclient->packets_too_big_discard);
2501 estats->jabber_packets_received = estats->packets_too_big_discard +
2502 estats->stat_Dot3statsFramesTooLong;
2503 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
2504 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
2505 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
2506 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
2507 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
2508 estats->brb_truncate_discard =
2509 le32_to_cpu(tstats->brb_truncate_discard);
2511 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
2512 bp->old_brb_discard = nstats->brb_discard;
2514 estats->brb_packet = nstats->brb_packet;
2515 estats->brb_truncate = nstats->brb_truncate;
2516 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
2517 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
2518 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
2519 estats->mng_discard = nstats->mng_discard;
2520 estats->mng_octet_inp = nstats->mng_octet_inp;
2521 estats->mng_octet_out = nstats->mng_octet_out;
2522 estats->mng_packet_inp = nstats->mng_packet_inp;
2523 estats->mng_packet_out = nstats->mng_packet_out;
2524 estats->pbf_octets = nstats->pbf_octets;
2525 estats->pbf_packet = nstats->pbf_packet;
2526 estats->safc_inp = nstats->safc_inp;
2528 xstats->done.hi = 0;
2529 tstats->done.hi = 0;
2535 static void bnx2x_update_net_stats(struct bnx2x *bp)
2537 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2538 struct net_device_stats *nstats = &bp->dev->stats;
2540 nstats->rx_packets =
2541 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
2542 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
2543 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
2545 nstats->tx_packets =
2546 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
2547 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
2548 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
2550 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
2552 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
2554 nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
2555 nstats->tx_dropped = 0;
2558 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
2560 nstats->collisions = estats->single_collision_transmit_frames +
2561 estats->multiple_collision_transmit_frames +
2562 estats->late_collision_frames +
2563 estats->excessive_collision_frames;
2565 nstats->rx_length_errors = estats->runt_packets_received +
2566 estats->jabber_packets_received;
2567 nstats->rx_over_errors = estats->brb_discard +
2568 estats->brb_truncate_discard;
2569 nstats->rx_crc_errors = estats->crc_receive_errors;
2570 nstats->rx_frame_errors = estats->alignment_errors;
2571 nstats->rx_fifo_errors = estats->no_buff_discard;
2572 nstats->rx_missed_errors = estats->xxoverflow_discard;
2574 nstats->rx_errors = nstats->rx_length_errors +
2575 nstats->rx_over_errors +
2576 nstats->rx_crc_errors +
2577 nstats->rx_frame_errors +
2578 nstats->rx_fifo_errors +
2579 nstats->rx_missed_errors;
2581 nstats->tx_aborted_errors = estats->late_collision_frames +
2582 estats->excessive_collision_frames;
2583 nstats->tx_carrier_errors = estats->false_carrier_detections;
2584 nstats->tx_fifo_errors = 0;
2585 nstats->tx_heartbeat_errors = 0;
2586 nstats->tx_window_errors = 0;
2588 nstats->tx_errors = nstats->tx_aborted_errors +
2589 nstats->tx_carrier_errors;
2591 estats->mac_stx_start = ++estats->mac_stx_end;
2594 static void bnx2x_update_stats(struct bnx2x *bp)
2598 if (!bnx2x_update_storm_stats(bp)) {
2600 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2601 bnx2x_update_bmac_stats(bp);
2603 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
2604 bnx2x_update_emac_stats(bp);
2606 } else { /* unreached */
2607 BNX2X_ERR("no MAC active\n");
2611 bnx2x_update_net_stats(bp);
2614 if (bp->msglevel & NETIF_MSG_TIMER) {
2615 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2616 struct net_device_stats *nstats = &bp->dev->stats;
2618 printk(KERN_DEBUG "%s:\n", bp->dev->name);
2619 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
2621 bnx2x_tx_avail(bp->fp),
2622 *bp->fp->tx_cons_sb, nstats->tx_packets);
2623 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
2625 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
2626 *bp->fp->rx_cons_sb, nstats->rx_packets);
2627 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
2628 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
2629 estats->driver_xoff, estats->brb_discard);
2630 printk(KERN_DEBUG "tstats: checksum_discard %u "
2631 "packets_too_big_discard %u no_buff_discard %u "
2632 "mac_discard %u mac_filter_discard %u "
2633 "xxovrflow_discard %u brb_truncate_discard %u "
2634 "ttl0_discard %u\n",
2635 estats->checksum_discard,
2636 estats->packets_too_big_discard,
2637 estats->no_buff_discard, estats->mac_discard,
2638 estats->mac_filter_discard, estats->xxoverflow_discard,
2639 estats->brb_truncate_discard, estats->ttl0_discard);
2641 for_each_queue(bp, i) {
2642 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
2643 bnx2x_fp(bp, i, tx_pkt),
2644 bnx2x_fp(bp, i, rx_pkt),
2645 bnx2x_fp(bp, i, rx_calls));
2649 if (bp->state != BNX2X_STATE_OPEN) {
2650 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
2654 #ifdef BNX2X_STOP_ON_ERROR
2655 if (unlikely(bp->panic))
2660 if (bp->executer_idx) {
2661 struct dmae_command *dmae = &bp->dmae;
2662 int port = bp->port;
2663 int loader_idx = port * 8;
2665 memset(dmae, 0, sizeof(struct dmae_command));
2667 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2668 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2669 DMAE_CMD_DST_RESET |
2671 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2673 DMAE_CMD_ENDIANITY_DW_SWAP |
2675 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2676 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
2677 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
2678 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
2679 sizeof(struct dmae_command) *
2680 (loader_idx + 1)) >> 2;
2681 dmae->dst_addr_hi = 0;
2682 dmae->len = sizeof(struct dmae_command) >> 2;
2683 dmae->len--; /* !!! for A0/1 only */
2684 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
2685 dmae->comp_addr_hi = 0;
2688 bnx2x_post_dmae(bp, dmae, loader_idx);
2691 if (bp->stats_state != STATS_STATE_ENABLE) {
2692 bp->stats_state = STATS_STATE_DISABLE;
2696 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
2697 /* stats ramrod has it's own slot on the spe */
2699 bp->stat_pending = 1;
2703 static void bnx2x_timer(unsigned long data)
2705 struct bnx2x *bp = (struct bnx2x *) data;
2707 if (!netif_running(bp->dev))
2710 if (atomic_read(&bp->intr_sem) != 0)
2714 struct bnx2x_fastpath *fp = &bp->fp[0];
2717 bnx2x_tx_int(fp, 1000);
2718 rc = bnx2x_rx_int(fp, 1000);
2722 int port = bp->port;
2726 ++bp->fw_drv_pulse_wr_seq;
2727 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2728 /* TBD - add SYSTEM_TIME */
2729 drv_pulse = bp->fw_drv_pulse_wr_seq;
2730 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
2732 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
2733 MCP_PULSE_SEQ_MASK);
2734 /* The delta between driver pulse and mcp response
2735 * should be 1 (before mcp response) or 0 (after mcp response)
2737 if ((drv_pulse != mcp_pulse) &&
2738 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2739 /* someone lost a heartbeat... */
2740 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2741 drv_pulse, mcp_pulse);
2745 if (bp->stats_state == STATS_STATE_DISABLE)
2748 bnx2x_update_stats(bp);
2751 mod_timer(&bp->timer, jiffies + bp->current_interval);
2754 /* end of Statistics */
2759 * nic init service functions
2762 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2763 dma_addr_t mapping, int id)
2765 int port = bp->port;
2770 section = ((u64)mapping) + offsetof(struct host_status_block,
2772 sb->u_status_block.status_block_id = id;
2774 REG_WR(bp, BAR_USTRORM_INTMEM +
2775 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
2776 REG_WR(bp, BAR_USTRORM_INTMEM +
2777 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
2780 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2781 REG_WR16(bp, BAR_USTRORM_INTMEM +
2782 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
2785 section = ((u64)mapping) + offsetof(struct host_status_block,
2787 sb->c_status_block.status_block_id = id;
2789 REG_WR(bp, BAR_CSTRORM_INTMEM +
2790 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
2791 REG_WR(bp, BAR_CSTRORM_INTMEM +
2792 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
2795 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2796 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2797 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
2799 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2802 static void bnx2x_init_def_sb(struct bnx2x *bp,
2803 struct host_def_status_block *def_sb,
2804 dma_addr_t mapping, int id)
2806 int port = bp->port;
2807 int index, val, reg_offset;
2811 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2812 atten_status_block);
2813 def_sb->atten_status_block.status_block_id = id;
2815 bp->def_att_idx = 0;
2818 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2819 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2821 for (index = 0; index < 3; index++) {
2822 bp->attn_group[index].sig[0] = REG_RD(bp,
2823 reg_offset + 0x10*index);
2824 bp->attn_group[index].sig[1] = REG_RD(bp,
2825 reg_offset + 0x4 + 0x10*index);
2826 bp->attn_group[index].sig[2] = REG_RD(bp,
2827 reg_offset + 0x8 + 0x10*index);
2828 bp->attn_group[index].sig[3] = REG_RD(bp,
2829 reg_offset + 0xc + 0x10*index);
2832 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2833 MISC_REG_AEU_MASK_ATTN_FUNC_0));
2835 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2836 HC_REG_ATTN_MSG0_ADDR_L);
2838 REG_WR(bp, reg_offset, U64_LO(section));
2839 REG_WR(bp, reg_offset + 4, U64_HI(section));
2841 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2843 val = REG_RD(bp, reg_offset);
2845 REG_WR(bp, reg_offset, val);
2848 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2849 u_def_status_block);
2850 def_sb->u_def_status_block.status_block_id = id;
2854 REG_WR(bp, BAR_USTRORM_INTMEM +
2855 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
2856 REG_WR(bp, BAR_USTRORM_INTMEM +
2857 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
2859 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
2862 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2863 REG_WR16(bp, BAR_USTRORM_INTMEM +
2864 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
2867 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2868 c_def_status_block);
2869 def_sb->c_def_status_block.status_block_id = id;
2873 REG_WR(bp, BAR_CSTRORM_INTMEM +
2874 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
2875 REG_WR(bp, BAR_CSTRORM_INTMEM +
2876 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
2878 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
2881 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2882 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2883 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
2886 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2887 t_def_status_block);
2888 def_sb->t_def_status_block.status_block_id = id;
2892 REG_WR(bp, BAR_TSTRORM_INTMEM +
2893 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
2894 REG_WR(bp, BAR_TSTRORM_INTMEM +
2895 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
2897 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
2900 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2901 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2902 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
2905 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2906 x_def_status_block);
2907 def_sb->x_def_status_block.status_block_id = id;
2911 REG_WR(bp, BAR_XSTRORM_INTMEM +
2912 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
2913 REG_WR(bp, BAR_XSTRORM_INTMEM +
2914 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
2916 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
2919 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2920 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2921 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
2923 bp->stat_pending = 0;
2925 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2928 static void bnx2x_update_coalesce(struct bnx2x *bp)
2930 int port = bp->port;
2933 for_each_queue(bp, i) {
2935 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2936 REG_WR8(bp, BAR_USTRORM_INTMEM +
2937 USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
2938 HC_INDEX_U_ETH_RX_CQ_CONS),
2939 bp->rx_ticks_int/12);
2940 REG_WR16(bp, BAR_USTRORM_INTMEM +
2941 USTORM_SB_HC_DISABLE_OFFSET(port, i,
2942 HC_INDEX_U_ETH_RX_CQ_CONS),
2943 bp->rx_ticks_int ? 0 : 1);
2945 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2946 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2947 CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
2948 HC_INDEX_C_ETH_TX_CQ_CONS),
2949 bp->tx_ticks_int/12);
2950 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2951 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
2952 HC_INDEX_C_ETH_TX_CQ_CONS),
2953 bp->tx_ticks_int ? 0 : 1);
2957 static void bnx2x_init_rx_rings(struct bnx2x *bp)
2961 int port = bp->port;
2963 bp->rx_buf_use_size = bp->dev->mtu;
2965 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
2966 bp->rx_buf_size = bp->rx_buf_use_size + 64;
2968 for_each_queue(bp, j) {
2969 struct bnx2x_fastpath *fp = &bp->fp[j];
2972 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
2974 for (i = 1; i <= NUM_RX_RINGS; i++) {
2975 struct eth_rx_bd *rx_bd;
2977 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
2979 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
2980 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
2982 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
2983 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
2987 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
2988 struct eth_rx_cqe_next_page *nextpg;
2990 nextpg = (struct eth_rx_cqe_next_page *)
2991 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
2993 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
2994 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
2996 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
2997 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3000 /* rx completion queue */
3001 fp->rx_comp_cons = ring_prod = 0;
3003 for (i = 0; i < bp->rx_ring_size; i++) {
3004 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
3005 BNX2X_ERR("was only able to allocate "
3009 ring_prod = NEXT_RX_IDX(ring_prod);
3010 BUG_TRAP(ring_prod > i);
3013 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
3014 fp->rx_pkt = fp->rx_calls = 0;
3016 /* Warning! this will generate an interrupt (to the TSTORM) */
3017 /* must only be done when chip is initialized */
3018 REG_WR(bp, BAR_TSTRORM_INTMEM +
3019 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
3023 REG_WR(bp, BAR_USTRORM_INTMEM +
3024 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
3025 U64_LO(fp->rx_comp_mapping));
3026 REG_WR(bp, BAR_USTRORM_INTMEM +
3027 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
3028 U64_HI(fp->rx_comp_mapping));
3032 static void bnx2x_init_tx_ring(struct bnx2x *bp)
3036 for_each_queue(bp, j) {
3037 struct bnx2x_fastpath *fp = &bp->fp[j];
3039 for (i = 1; i <= NUM_TX_RINGS; i++) {
3040 struct eth_tx_bd *tx_bd =
3041 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
3044 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
3045 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
3047 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
3048 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
3051 fp->tx_pkt_prod = 0;
3052 fp->tx_pkt_cons = 0;
3055 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
3060 static void bnx2x_init_sp_ring(struct bnx2x *bp)
3062 int port = bp->port;
3064 spin_lock_init(&bp->spq_lock);
3066 bp->spq_left = MAX_SPQ_PENDING;
3067 bp->spq_prod_idx = 0;
3068 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
3069 bp->spq_prod_bd = bp->spq;
3070 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
3072 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
3073 U64_LO(bp->spq_mapping));
3074 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
3075 U64_HI(bp->spq_mapping));
3077 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
3081 static void bnx2x_init_context(struct bnx2x *bp)
3085 for_each_queue(bp, i) {
3086 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
3087 struct bnx2x_fastpath *fp = &bp->fp[i];
3089 context->xstorm_st_context.tx_bd_page_base_hi =
3090 U64_HI(fp->tx_desc_mapping);
3091 context->xstorm_st_context.tx_bd_page_base_lo =
3092 U64_LO(fp->tx_desc_mapping);
3093 context->xstorm_st_context.db_data_addr_hi =
3094 U64_HI(fp->tx_prods_mapping);
3095 context->xstorm_st_context.db_data_addr_lo =
3096 U64_LO(fp->tx_prods_mapping);
3098 context->ustorm_st_context.rx_bd_page_base_hi =
3099 U64_HI(fp->rx_desc_mapping);
3100 context->ustorm_st_context.rx_bd_page_base_lo =
3101 U64_LO(fp->rx_desc_mapping);
3102 context->ustorm_st_context.status_block_id = i;
3103 context->ustorm_st_context.sb_index_number =
3104 HC_INDEX_U_ETH_RX_CQ_CONS;
3105 context->ustorm_st_context.rcq_base_address_hi =
3106 U64_HI(fp->rx_comp_mapping);
3107 context->ustorm_st_context.rcq_base_address_lo =
3108 U64_LO(fp->rx_comp_mapping);
3109 context->ustorm_st_context.flags =
3110 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
3111 context->ustorm_st_context.mc_alignment_size = 64;
3112 context->ustorm_st_context.num_rss = bp->num_queues;
3114 context->cstorm_st_context.sb_index_number =
3115 HC_INDEX_C_ETH_TX_CQ_CONS;
3116 context->cstorm_st_context.status_block_id = i;
3118 context->xstorm_ag_context.cdu_reserved =
3119 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3120 CDU_REGION_NUMBER_XCM_AG,
3121 ETH_CONNECTION_TYPE);
3122 context->ustorm_ag_context.cdu_usage =
3123 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3124 CDU_REGION_NUMBER_UCM_AG,
3125 ETH_CONNECTION_TYPE);
3129 static void bnx2x_init_ind_table(struct bnx2x *bp)
3131 int port = bp->port;
3137 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3138 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
3139 i % bp->num_queues);
3141 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3144 static void bnx2x_set_client_config(struct bnx2x *bp)
3147 int mode = bp->rx_mode;
3149 int i, port = bp->port;
3150 struct tstorm_eth_client_config tstorm_client = {0};
3152 tstorm_client.mtu = bp->dev->mtu;
3153 tstorm_client.statistics_counter_id = 0;
3154 tstorm_client.config_flags =
3155 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
3157 if (mode && bp->vlgrp) {
3158 tstorm_client.config_flags |=
3159 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
3160 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3163 if (mode != BNX2X_RX_MODE_PROMISC)
3164 tstorm_client.drop_flags =
3165 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
3167 for_each_queue(bp, i) {
3168 REG_WR(bp, BAR_TSTRORM_INTMEM +
3169 TSTORM_CLIENT_CONFIG_OFFSET(port, i),
3170 ((u32 *)&tstorm_client)[0]);
3171 REG_WR(bp, BAR_TSTRORM_INTMEM +
3172 TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
3173 ((u32 *)&tstorm_client)[1]);
3176 /* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
3177 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
3180 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3182 int mode = bp->rx_mode;
3183 int port = bp->port;
3184 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3187 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
3190 case BNX2X_RX_MODE_NONE: /* no Rx */
3191 tstorm_mac_filter.ucast_drop_all = 1;
3192 tstorm_mac_filter.mcast_drop_all = 1;
3193 tstorm_mac_filter.bcast_drop_all = 1;
3195 case BNX2X_RX_MODE_NORMAL:
3196 tstorm_mac_filter.bcast_accept_all = 1;
3198 case BNX2X_RX_MODE_ALLMULTI:
3199 tstorm_mac_filter.mcast_accept_all = 1;
3200 tstorm_mac_filter.bcast_accept_all = 1;
3202 case BNX2X_RX_MODE_PROMISC:
3203 tstorm_mac_filter.ucast_accept_all = 1;
3204 tstorm_mac_filter.mcast_accept_all = 1;
3205 tstorm_mac_filter.bcast_accept_all = 1;
3208 BNX2X_ERR("bad rx mode (%d)\n", mode);
3211 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3212 REG_WR(bp, BAR_TSTRORM_INTMEM +
3213 TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
3214 ((u32 *)&tstorm_mac_filter)[i]);
3216 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3217 ((u32 *)&tstorm_mac_filter)[i]); */
3220 if (mode != BNX2X_RX_MODE_NONE)
3221 bnx2x_set_client_config(bp);
3224 static void bnx2x_init_internal(struct bnx2x *bp)
3226 int port = bp->port;
3227 struct tstorm_eth_function_common_config tstorm_config = {0};
3228 struct stats_indication_flags stats_flags = {0};
3231 tstorm_config.config_flags = MULTI_FLAGS;
3232 tstorm_config.rss_result_mask = MULTI_MASK;
3235 REG_WR(bp, BAR_TSTRORM_INTMEM +
3236 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
3237 (*(u32 *)&tstorm_config));
3239 /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
3240 (*(u32 *)&tstorm_config)); */
3242 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3243 bnx2x_set_storm_rx_mode(bp);
3245 stats_flags.collect_eth = cpu_to_le32(1);
3247 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
3248 ((u32 *)&stats_flags)[0]);
3249 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
3250 ((u32 *)&stats_flags)[1]);
3252 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
3253 ((u32 *)&stats_flags)[0]);
3254 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
3255 ((u32 *)&stats_flags)[1]);
3257 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
3258 ((u32 *)&stats_flags)[0]);
3259 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
3260 ((u32 *)&stats_flags)[1]);
3262 /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
3263 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
3266 static void bnx2x_nic_init(struct bnx2x *bp)
3270 for_each_queue(bp, i) {
3271 struct bnx2x_fastpath *fp = &bp->fp[i];
3273 fp->state = BNX2X_FP_STATE_CLOSED;
3274 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
3275 bp, fp->status_blk, i);
3277 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
3280 bnx2x_init_def_sb(bp, bp->def_status_blk,
3281 bp->def_status_blk_mapping, 0x10);
3282 bnx2x_update_coalesce(bp);
3283 bnx2x_init_rx_rings(bp);
3284 bnx2x_init_tx_ring(bp);
3285 bnx2x_init_sp_ring(bp);
3286 bnx2x_init_context(bp);
3287 bnx2x_init_internal(bp);
3288 bnx2x_init_stats(bp);
3289 bnx2x_init_ind_table(bp);
3290 bnx2x_int_enable(bp);
3294 /* end of nic init */
3297 * gzip service functions
3300 static int bnx2x_gunzip_init(struct bnx2x *bp)
3302 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
3303 &bp->gunzip_mapping);
3304 if (bp->gunzip_buf == NULL)
3307 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3308 if (bp->strm == NULL)
3311 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3313 if (bp->strm->workspace == NULL)
3323 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
3324 bp->gunzip_mapping);
3325 bp->gunzip_buf = NULL;
3328 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
3329 " uncompression\n", bp->dev->name);
3333 static void bnx2x_gunzip_end(struct bnx2x *bp)
3335 kfree(bp->strm->workspace);
3340 if (bp->gunzip_buf) {
3341 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
3342 bp->gunzip_mapping);
3343 bp->gunzip_buf = NULL;
3347 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
3351 /* check gzip header */
3352 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
3359 if (zbuf[3] & FNAME)
3360 while ((zbuf[n++] != 0) && (n < len));
3362 bp->strm->next_in = zbuf + n;
3363 bp->strm->avail_in = len - n;
3364 bp->strm->next_out = bp->gunzip_buf;
3365 bp->strm->avail_out = FW_BUF_SIZE;
3367 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3371 rc = zlib_inflate(bp->strm, Z_FINISH);
3372 if ((rc != Z_OK) && (rc != Z_STREAM_END))
3373 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
3374 bp->dev->name, bp->strm->msg);
3376 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3377 if (bp->gunzip_outlen & 0x3)
3378 printk(KERN_ERR PFX "%s: Firmware decompression error:"
3379 " gunzip_outlen (%d) not aligned\n",
3380 bp->dev->name, bp->gunzip_outlen);
3381 bp->gunzip_outlen >>= 2;
3383 zlib_inflateEnd(bp->strm);
3385 if (rc == Z_STREAM_END)
3391 /* nic load/unload */
3394 * general service functions
3397 /* send a NIG loopback debug packet */
3398 static void bnx2x_lb_pckt(struct bnx2x *bp)
3404 /* Ethernet source and destination addresses */
3406 wb_write[0] = 0x55555555;
3407 wb_write[1] = 0x55555555;
3408 wb_write[2] = 0x20; /* SOP */
3409 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3411 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
3412 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
3414 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
3417 /* NON-IP protocol */
3419 wb_write[0] = 0x09000000;
3420 wb_write[1] = 0x55555555;
3421 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
3422 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3424 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
3425 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
3426 /* EOP, eop_bvalid = 0 */
3427 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
3431 /* some of the internal memories
3432 * are not directly readable from the driver
3433 * to test them we send debug packets
3435 static int bnx2x_int_mem_test(struct bnx2x *bp)
3441 switch (CHIP_REV(bp)) {
3453 DP(NETIF_MSG_HW, "start part1\n");
3455 /* Disable inputs of parser neighbor blocks */
3456 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3457 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3458 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3459 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
3461 /* Write 0 to parser credits for CFC search request */
3462 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3464 /* send Ethernet packet */
3467 /* TODO do i reset NIG statistic? */
3468 /* Wait until NIG register shows 1 packet of size 0x10 */
3469 count = 1000 * factor;
3471 #ifdef BNX2X_DMAE_RD
3472 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3473 val = *bnx2x_sp(bp, wb_data[0]);
3475 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
3476 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
3485 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3489 /* Wait until PRS register shows 1 packet */
3490 count = 1000 * factor;
3492 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3501 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3505 /* Reset and init BRB, PRS */
3506 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
3508 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
3510 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
3511 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3513 DP(NETIF_MSG_HW, "part2\n");
3515 /* Disable inputs of parser neighbor blocks */
3516 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3517 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3518 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3519 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
3521 /* Write 0 to parser credits for CFC search request */
3522 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3524 /* send 10 Ethernet packets */
3525 for (i = 0; i < 10; i++)
3528 /* Wait until NIG register shows 10 + 1
3529 packets of size 11*0x10 = 0xb0 */
3530 count = 1000 * factor;
3532 #ifdef BNX2X_DMAE_RD
3533 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3534 val = *bnx2x_sp(bp, wb_data[0]);
3536 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
3537 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
3546 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3550 /* Wait until PRS register shows 2 packets */
3551 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3553 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3555 /* Write 1 to parser credits for CFC search request */
3556 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3558 /* Wait until PRS register shows 3 packets */
3559 msleep(10 * factor);
3560 /* Wait until NIG register shows 1 packet of size 0x10 */
3561 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3563 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3565 /* clear NIG EOP FIFO */
3566 for (i = 0; i < 11; i++)
3567 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3568 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3570 BNX2X_ERR("clear of NIG failed\n");
3574 /* Reset and init BRB, PRS, NIG */
3575 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3577 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3579 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
3580 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3583 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3586 /* Enable inputs of parser neighbor blocks */
3587 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3588 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3589 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3590 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
3592 DP(NETIF_MSG_HW, "done\n");
3597 static void enable_blocks_attention(struct bnx2x *bp)
3599 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3600 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3601 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3602 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3603 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3604 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3605 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3606 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3607 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3608 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3609 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3610 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3611 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3612 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3613 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3614 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3615 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3616 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3617 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3618 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3619 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3620 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3621 REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
3622 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3623 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3624 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3625 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3626 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3627 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3628 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3629 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3630 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
3633 static int bnx2x_function_init(struct bnx2x *bp, int mode)
3635 int func = bp->port;
3636 int port = func ? PORT1 : PORT0;
3642 DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
3643 if ((func != 0) && (func != 1)) {
3644 BNX2X_ERR("BAD function number (%d)\n", func);
3648 bnx2x_gunzip_init(bp);
3650 if (mode & 0x1) { /* init common */
3651 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
3653 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
3655 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
3657 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
3659 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3661 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3663 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
3664 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
3668 if (CHIP_REV(bp) == CHIP_REV_Ax) {
3669 /* enable HW interrupt from PXP on USDM
3670 overflow bit 16 on INT_MASK_0 */
3671 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3675 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3676 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3677 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3678 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3679 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3680 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
3682 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3683 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3684 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3685 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3686 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3691 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3694 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
3696 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3697 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3698 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3701 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
3703 /* let the HW do it's magic ... */
3706 (can be moved up if we want to use the DMAE) */
3707 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3709 BNX2X_ERR("PXP2 CFG failed\n");
3713 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3715 BNX2X_ERR("PXP2 RD_INIT failed\n");
3719 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3720 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3722 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
3724 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
3725 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
3726 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
3727 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
3729 #ifdef BNX2X_DMAE_RD
3730 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3731 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3732 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3733 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3735 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
3736 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
3737 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
3738 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
3739 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
3740 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
3741 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
3742 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
3743 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
3744 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
3745 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
3746 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
3748 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
3749 /* soft reset pulse */
3750 REG_WR(bp, QM_REG_SOFT_RESET, 1);
3751 REG_WR(bp, QM_REG_SOFT_RESET, 0);
3754 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
3756 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
3757 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
3758 if (CHIP_REV(bp) == CHIP_REV_Ax) {
3759 /* enable hw interrupt from doorbell Q */
3760 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3763 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
3765 if (CHIP_REV_IS_SLOW(bp)) {
3766 /* fix for emulation and FPGA for no pause */
3767 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
3768 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
3769 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
3770 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
3773 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3775 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
3776 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
3777 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
3778 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
3780 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
3781 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
3782 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
3783 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
3785 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
3786 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
3787 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
3788 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
3791 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3793 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
3796 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
3797 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
3798 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
3800 REG_WR(bp, SRC_REG_SOFT_RST, 1);
3801 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
3802 REG_WR(bp, i, 0xc0cac01a);
3803 /* TODO: replace with something meaningful */
3805 /* SRCH COMMON comes here */
3806 REG_WR(bp, SRC_REG_SOFT_RST, 0);
3808 if (sizeof(union cdu_context) != 1024) {
3809 /* we currently assume that a context is 1024 bytes */
3810 printk(KERN_ALERT PFX "please adjust the size of"
3811 " cdu_context(%ld)\n",
3812 (long)sizeof(union cdu_context));
3814 val = (4 << 24) + (0 << 12) + 1024;
3815 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
3816 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
3818 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
3819 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
3821 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
3822 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
3823 MISC_AEU_COMMON_END);
3824 /* RXPCS COMMON comes here */
3825 /* EMAC0 COMMON comes here */
3826 /* EMAC1 COMMON comes here */
3827 /* DBU COMMON comes here */
3828 /* DBG COMMON comes here */
3829 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
3831 if (CHIP_REV_IS_SLOW(bp))
3834 /* finish CFC init */
3835 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
3837 BNX2X_ERR("CFC LL_INIT failed\n");
3841 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
3843 BNX2X_ERR("CFC AC_INIT failed\n");
3847 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
3849 BNX2X_ERR("CFC CAM_INIT failed\n");
3853 REG_WR(bp, CFC_REG_DEBUG0, 0);
3855 /* read NIG statistic
3856 to see if this is our first up since powerup */
3857 #ifdef BNX2X_DMAE_RD
3858 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3859 val = *bnx2x_sp(bp, wb_data[0]);
3861 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
3862 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
3864 /* do internal memory self test */
3865 if ((val == 0) && bnx2x_int_mem_test(bp)) {
3866 BNX2X_ERR("internal mem selftest failed\n");
3870 /* clear PXP2 attentions */
3871 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
3873 enable_blocks_attention(bp);
3874 /* enable_blocks_parity(bp); */
3876 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
3877 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
3878 /* Fan failure is indicated by SPIO 5 */
3879 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3880 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3882 /* set to active low mode */
3883 val = REG_RD(bp, MISC_REG_SPIO_INT);
3884 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3885 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3886 REG_WR(bp, MISC_REG_SPIO_INT, val);
3888 /* enable interrupt to signal the IGU */
3889 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3890 val |= (1 << MISC_REGISTERS_SPIO_5);
3891 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3898 } /* end of common init */
3902 /* the phys address is shifted right 12 bits and has an added
3903 1=valid bit added to the 53rd bit
3904 then since this is a wide register(TM)
3905 we split it into two 32 bit writes
3907 #define RQ_ONCHIP_AT_PORT_SIZE 384
3908 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
3909 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
3910 #define PXP_ONE_ILT(x) ((x << 10) | x)
3912 DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
3914 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
3916 /* Port PXP comes here */
3917 /* Port PXP2 comes here */
3922 i = func * RQ_ONCHIP_AT_PORT_SIZE;
3924 wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
3925 wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
3926 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
3928 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
3929 ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
3930 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
3931 ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
3933 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
3939 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
3940 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
3941 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
3942 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
3947 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
3948 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
3949 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
3950 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
3955 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
3956 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
3957 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
3958 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
3961 /* Port TCM comes here */
3962 /* Port UCM comes here */
3963 /* Port CCM comes here */
3964 bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
3965 func ? XCM_PORT1_END : XCM_PORT0_END);
3971 for (i = 0; i < 32; i++) {
3972 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
3974 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
3976 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
3977 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
3980 REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
3982 /* Port QM comes here */
3985 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
3986 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
3988 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
3989 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
3991 /* Port DQ comes here */
3992 /* Port BRB1 comes here */
3993 bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
3994 func ? PRS_PORT1_END : PRS_PORT0_END);
3995 /* Port TSDM comes here */
3996 /* Port CSDM comes here */
3997 /* Port USDM comes here */
3998 /* Port XSDM comes here */
3999 bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
4000 func ? TSEM_PORT1_END : TSEM_PORT0_END);
4001 bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
4002 func ? USEM_PORT1_END : USEM_PORT0_END);
4003 bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
4004 func ? CSEM_PORT1_END : CSEM_PORT0_END);
4005 bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
4006 func ? XSEM_PORT1_END : XSEM_PORT0_END);
4007 /* Port UPB comes here */
4008 /* Port XSDM comes here */
4009 bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
4010 func ? PBF_PORT1_END : PBF_PORT0_END);
4012 /* configure PBF to work without PAUSE mtu 9000 */
4013 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
4015 /* update threshold */
4016 REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
4017 /* update init credit */
4018 REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
4021 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
4023 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
4026 /* tell the searcher where the T2 table is */
4027 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
4029 wb_write[0] = U64_LO(bp->t2_mapping);
4030 wb_write[1] = U64_HI(bp->t2_mapping);
4031 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
4032 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
4033 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
4034 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
4036 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
4037 /* Port SRCH comes here */
4039 /* Port CDU comes here */
4040 /* Port CFC comes here */
4041 bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
4042 func ? HC_PORT1_END : HC_PORT0_END);
4043 bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
4044 MISC_AEU_PORT0_START,
4045 func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
4046 /* Port PXPCS comes here */
4047 /* Port EMAC0 comes here */
4048 /* Port EMAC1 comes here */
4049 /* Port DBU comes here */
4050 /* Port DBG comes here */
4051 bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
4052 func ? NIG_PORT1_END : NIG_PORT0_END);
4053 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
4054 /* Port MCP comes here */
4055 /* Port DMAE comes here */
4057 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
4058 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
4059 /* add SPIO 5 to group 0 */
4060 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4061 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4062 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
4069 bnx2x__link_reset(bp);
4071 /* Reset PCIE errors for debug */
4072 REG_WR(bp, 0x2114, 0xffffffff);
4073 REG_WR(bp, 0x2120, 0xffffffff);
4074 REG_WR(bp, 0x2814, 0xffffffff);
4076 /* !!! move to init_values.h */
4077 REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
4078 REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
4079 REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
4080 REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
4082 REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
4083 REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
4084 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
4085 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
4087 bnx2x_gunzip_end(bp);
4092 bp->fw_drv_pulse_wr_seq =
4093 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
4094 DRV_PULSE_SEQ_MASK);
4095 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
4096 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
4097 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
4105 /* send the MCP a request, block until there is a reply */
4106 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
4108 int port = bp->port;
4109 u32 seq = ++bp->fw_seq;
4112 SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
4113 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
4115 /* let the FW do it's magic ... */
4116 msleep(100); /* TBD */
4118 if (CHIP_REV_IS_SLOW(bp))
4121 rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
4122 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
4124 /* is this a reply to our command? */
4125 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
4126 rc &= FW_MSG_CODE_MASK;
4130 BNX2X_ERR("FW failed to respond!\n");
4138 static void bnx2x_free_mem(struct bnx2x *bp)
4141 #define BNX2X_PCI_FREE(x, y, size) \
4144 pci_free_consistent(bp->pdev, size, x, y); \
4150 #define BNX2X_FREE(x) \
4161 for_each_queue(bp, i) {
4164 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4165 bnx2x_fp(bp, i, status_blk_mapping),
4166 sizeof(struct host_status_block) +
4167 sizeof(struct eth_tx_db_data));
4169 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
4170 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4171 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4172 bnx2x_fp(bp, i, tx_desc_mapping),
4173 sizeof(struct eth_tx_bd) * NUM_TX_BD);
4175 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4176 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4177 bnx2x_fp(bp, i, rx_desc_mapping),
4178 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4180 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4181 bnx2x_fp(bp, i, rx_comp_mapping),
4182 sizeof(struct eth_fast_path_rx_cqe) *
4188 /* end of fastpath */
4190 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4191 (sizeof(struct host_def_status_block)));
4193 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4194 (sizeof(struct bnx2x_slowpath)));
4197 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4198 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4199 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4200 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4202 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
4204 #undef BNX2X_PCI_FREE
4208 static int bnx2x_alloc_mem(struct bnx2x *bp)
4211 #define BNX2X_PCI_ALLOC(x, y, size) \
4213 x = pci_alloc_consistent(bp->pdev, size, y); \
4215 goto alloc_mem_err; \
4216 memset(x, 0, size); \
4219 #define BNX2X_ALLOC(x, size) \
4221 x = vmalloc(size); \
4223 goto alloc_mem_err; \
4224 memset(x, 0, size); \
4230 BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
4232 for_each_queue(bp, i) {
4233 bnx2x_fp(bp, i, bp) = bp;
4236 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4237 &bnx2x_fp(bp, i, status_blk_mapping),
4238 sizeof(struct host_status_block) +
4239 sizeof(struct eth_tx_db_data));
4241 bnx2x_fp(bp, i, hw_tx_prods) =
4242 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
4244 bnx2x_fp(bp, i, tx_prods_mapping) =
4245 bnx2x_fp(bp, i, status_blk_mapping) +
4246 sizeof(struct host_status_block);
4248 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
4249 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4250 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4251 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4252 &bnx2x_fp(bp, i, tx_desc_mapping),
4253 sizeof(struct eth_tx_bd) * NUM_TX_BD);
4255 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4256 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4257 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4258 &bnx2x_fp(bp, i, rx_desc_mapping),
4259 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4261 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4262 &bnx2x_fp(bp, i, rx_comp_mapping),
4263 sizeof(struct eth_fast_path_rx_cqe) *
4267 /* end of fastpath */
4269 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4270 sizeof(struct host_def_status_block));
4272 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4273 sizeof(struct bnx2x_slowpath));
4276 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4279 for (i = 0; i < 64*1024; i += 64) {
4280 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
4281 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
4284 /* allocate searcher T2 table
4285 we allocate 1/4 of alloc num for T2
4286 (which is not entered into the ILT) */
4287 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4290 for (i = 0; i < 16*1024; i += 64)
4291 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4293 /* now fixup the last line in the block to point to the next block */
4294 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
4296 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
4297 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4299 /* QM queues (128*MAX_CONN) */
4300 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4303 /* Slow path ring */
4304 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4312 #undef BNX2X_PCI_ALLOC
4316 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
4320 for_each_queue(bp, i) {
4321 struct bnx2x_fastpath *fp = &bp->fp[i];
4323 u16 bd_cons = fp->tx_bd_cons;
4324 u16 sw_prod = fp->tx_pkt_prod;
4325 u16 sw_cons = fp->tx_pkt_cons;
4327 BUG_TRAP(fp->tx_buf_ring != NULL);
4329 while (sw_cons != sw_prod) {
4330 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
4336 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
4340 for_each_queue(bp, j) {
4341 struct bnx2x_fastpath *fp = &bp->fp[j];
4343 BUG_TRAP(fp->rx_buf_ring != NULL);
4345 for (i = 0; i < NUM_RX_BD; i++) {
4346 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
4347 struct sk_buff *skb = rx_buf->skb;
4352 pci_unmap_single(bp->pdev,
4353 pci_unmap_addr(rx_buf, mapping),
4354 bp->rx_buf_use_size,
4355 PCI_DMA_FROMDEVICE);
4363 static void bnx2x_free_skbs(struct bnx2x *bp)
4365 bnx2x_free_tx_skbs(bp);
4366 bnx2x_free_rx_skbs(bp);
4369 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
4373 free_irq(bp->msix_table[0].vector, bp->dev);
4374 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
4375 bp->msix_table[0].vector);
4377 for_each_queue(bp, i) {
4378 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
4379 "state(%x)\n", i, bp->msix_table[i + 1].vector,
4380 bnx2x_fp(bp, i, state));
4382 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
4383 BNX2X_ERR("IRQ of fp #%d being freed while "
4384 "state != closed\n", i);
4386 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
4391 static void bnx2x_free_irq(struct bnx2x *bp)
4394 if (bp->flags & USING_MSIX_FLAG) {
4396 bnx2x_free_msix_irqs(bp);
4397 pci_disable_msix(bp->pdev);
4399 bp->flags &= ~USING_MSIX_FLAG;
4402 free_irq(bp->pdev->irq, bp->dev);
4405 static int bnx2x_enable_msix(struct bnx2x *bp)
4410 bp->msix_table[0].entry = 0;
4411 for_each_queue(bp, i)
4412 bp->msix_table[i + 1].entry = i + 1;
4414 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
4415 bp->num_queues + 1)){
4416 BNX2X_LOG("failed to enable MSI-X\n");
4421 bp->flags |= USING_MSIX_FLAG;
4428 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
4433 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
4434 bp->dev->name, bp->dev);
4437 BNX2X_ERR("request sp irq failed\n");
4441 for_each_queue(bp, i) {
4442 rc = request_irq(bp->msix_table[i + 1].vector,
4443 bnx2x_msix_fp_int, 0,
4444 bp->dev->name, &bp->fp[i]);
4447 BNX2X_ERR("request fp #%d irq failed "
4449 bnx2x_free_msix_irqs(bp);
4453 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
4461 static int bnx2x_req_irq(struct bnx2x *bp)
4464 int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
4465 IRQF_SHARED, bp->dev->name, bp->dev);
4467 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
4474 * Init service functions
4477 static void bnx2x_set_mac_addr(struct bnx2x *bp)
4479 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4482 * unicasts 0-31:port0 32-63:port1
4483 * multicast 64-127:port0 128-191:port1
4485 config->hdr.length_6b = 2;
4486 config->hdr.offset = bp->port ? 31 : 0;
4487 config->hdr.reserved0 = 0;
4488 config->hdr.reserved1 = 0;
4491 config->config_table[0].cam_entry.msb_mac_addr =
4492 swab16(*(u16 *)&bp->dev->dev_addr[0]);
4493 config->config_table[0].cam_entry.middle_mac_addr =
4494 swab16(*(u16 *)&bp->dev->dev_addr[2]);
4495 config->config_table[0].cam_entry.lsb_mac_addr =
4496 swab16(*(u16 *)&bp->dev->dev_addr[4]);
4497 config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
4498 config->config_table[0].target_table_entry.flags = 0;
4499 config->config_table[0].target_table_entry.client_id = 0;
4500 config->config_table[0].target_table_entry.vlan_id = 0;
4502 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
4503 config->config_table[0].cam_entry.msb_mac_addr,
4504 config->config_table[0].cam_entry.middle_mac_addr,
4505 config->config_table[0].cam_entry.lsb_mac_addr);
4508 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
4509 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
4510 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
4511 config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
4512 config->config_table[1].target_table_entry.flags =
4513 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4514 config->config_table[1].target_table_entry.client_id = 0;
4515 config->config_table[1].target_table_entry.vlan_id = 0;
4517 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4518 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4519 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4522 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4523 int *state_p, int poll)
4525 /* can take a while if any port is running */
4528 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4529 poll ? "polling" : "waiting", state, idx);
4536 bnx2x_rx_int(bp->fp, 10);
4537 /* If index is different from 0
4538 * The reply for some commands will
4539 * be on the none default queue
4542 bnx2x_rx_int(&bp->fp[idx], 10);
4545 mb(); /* state is changed by bnx2x_sp_event()*/
4547 if (*state_p == state)
4556 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4557 poll ? "polling" : "waiting", state, idx);
4562 static int bnx2x_setup_leading(struct bnx2x *bp)
4565 /* reset IGU state */
4566 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4569 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4571 return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4575 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
4578 /* reset IGU state */
4579 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4582 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
4583 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
4585 /* Wait for completion */
4586 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
4587 &(bp->fp[index].state), 0);
4592 static int bnx2x_poll(struct napi_struct *napi, int budget);
4593 static void bnx2x_set_rx_mode(struct net_device *dev);
4595 static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
4600 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
4602 /* Send LOAD_REQUEST command to MCP.
4603 Returns the type of LOAD command: if it is the
4604 first port to be initialized common blocks should be
4605 initialized, otherwise - not.
4608 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
4610 BNX2X_ERR("MCP response failure, unloading\n");
4613 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
4614 BNX2X_ERR("MCP refused load request, unloading\n");
4615 return -EBUSY; /* other port in diagnostic mode */
4618 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
4621 /* if we can't use msix we only need one fp,
4622 * so try to enable msix with the requested number of fp's
4623 * and fallback to inta with one fp
4629 if ((use_multi > 1) && (use_multi <= 16))
4630 /* user requested number */
4631 bp->num_queues = use_multi;
4632 else if (use_multi == 1)
4633 bp->num_queues = num_online_cpus();
4637 if (bnx2x_enable_msix(bp)) {
4638 /* failed to enable msix */
4641 BNX2X_ERR("Multi requested but failed"
4642 " to enable MSI-X\n");
4647 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
4649 if (bnx2x_alloc_mem(bp))
4653 if (bp->flags & USING_MSIX_FLAG) {
4654 if (bnx2x_req_msix_irqs(bp)) {
4655 pci_disable_msix(bp->pdev);
4660 if (bnx2x_req_irq(bp)) {
4661 BNX2X_ERR("IRQ request failed, aborting\n");
4667 for_each_queue(bp, i)
4668 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
4673 if (bnx2x_function_init(bp,
4674 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON))) {
4675 BNX2X_ERR("HW init failed, aborting\n");
4680 atomic_set(&bp->intr_sem, 0);
4683 /* Setup NIC internals and enable interrupts */
4686 /* Send LOAD_DONE command to MCP */
4688 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
4690 BNX2X_ERR("MCP response failure, unloading\n");
4691 goto load_int_disable;
4695 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
4697 /* Enable Rx interrupt handling before sending the ramrod
4698 as it's completed on Rx FP queue */
4699 for_each_queue(bp, i)
4700 napi_enable(&bnx2x_fp(bp, i, napi));
4702 if (bnx2x_setup_leading(bp))
4703 goto load_stop_netif;
4705 for_each_nondefault_queue(bp, i)
4706 if (bnx2x_setup_multi(bp, i))
4707 goto load_stop_netif;
4709 bnx2x_set_mac_addr(bp);
4711 bnx2x_initial_phy_init(bp);
4713 /* Start fast path */
4714 if (req_irq) { /* IRQ is only requested from bnx2x_open */
4715 netif_start_queue(bp->dev);
4716 if (bp->flags & USING_MSIX_FLAG)
4717 printk(KERN_INFO PFX "%s: using MSI-X\n",
4720 /* Otherwise Tx queue should be only reenabled */
4721 } else if (netif_running(bp->dev)) {
4722 netif_wake_queue(bp->dev);
4723 bnx2x_set_rx_mode(bp->dev);
4726 /* start the timer */
4727 mod_timer(&bp->timer, jiffies + bp->current_interval);
4732 for_each_queue(bp, i)
4733 napi_disable(&bnx2x_fp(bp, i, napi));
4736 bnx2x_int_disable_sync(bp);
4738 bnx2x_free_skbs(bp);
4744 /* TBD we really need to reset the chip
4745 if we want to recover from this */
4750 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
4752 int port = bp->port;
4758 DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
4760 /* Do not rcv packets to BRB */
4761 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
4762 /* Do not direct rcv packets that are not for MCP to the BRB */
4763 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
4764 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
4766 /* Configure IGU and AEU */
4767 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
4768 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
4770 /* TODO: Close Doorbell port? */
4777 base = port * RQ_ONCHIP_AT_PORT_SIZE;
4778 for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
4780 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
4782 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
4783 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
4787 if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
4789 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4791 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
4796 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
4801 /* halt the connection */
4802 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
4803 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
4806 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
4807 &(bp->fp[index].state), 1);
4808 if (rc) /* timeout */
4811 /* delete cfc entry */
4812 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
4814 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
4815 &(bp->fp[index].state), 1);
4820 static void bnx2x_stop_leading(struct bnx2x *bp)
4822 u16 dsb_sp_prod_idx;
4823 /* if the other port is handling traffic,
4824 this can take a lot of time */
4829 /* Send HALT ramrod */
4830 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
4831 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
4833 if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
4834 &(bp->fp[0].state), 1))
4837 dsb_sp_prod_idx = *bp->dsb_sp_prod;
4839 /* Send PORT_DELETE ramrod */
4840 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
4842 /* Wait for completion to arrive on default status block
4843 we are going to reset the chip anyway
4844 so there is not much to do if this times out
4846 while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
4851 DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
4852 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
4853 *bp->dsb_sp_prod, dsb_sp_prod_idx);
4855 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
4856 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
4860 static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq)
4865 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
4867 del_timer_sync(&bp->timer);
4869 bp->rx_mode = BNX2X_RX_MODE_NONE;
4870 bnx2x_set_storm_rx_mode(bp);
4872 if (netif_running(bp->dev)) {
4873 netif_tx_disable(bp->dev);
4874 bp->dev->trans_start = jiffies; /* prevent tx timeout */
4877 /* Wait until all fast path tasks complete */
4878 for_each_queue(bp, i) {
4879 struct bnx2x_fastpath *fp = &bp->fp[i];
4882 while (bnx2x_has_work(fp) && (timeout--))
4885 BNX2X_ERR("timeout waiting for queue[%d]\n", i);
4888 /* Wait until stat ramrod returns and all SP tasks complete */
4890 while ((bp->stat_pending || (bp->spq_left != MAX_SPQ_PENDING)) &&
4894 for_each_queue(bp, i)
4895 napi_disable(&bnx2x_fp(bp, i, napi));
4896 /* Disable interrupts after Tx and Rx are disabled on stack level */
4897 bnx2x_int_disable_sync(bp);
4899 if (bp->flags & NO_WOL_FLAG)
4900 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
4903 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
4904 u8 *mac_addr = bp->dev->dev_addr;
4905 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
4906 EMAC_MODE_ACPI_RCVD);
4908 EMAC_WR(EMAC_REG_EMAC_MODE, val);
4910 val = (mac_addr[0] << 8) | mac_addr[1];
4911 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
4913 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
4914 (mac_addr[4] << 8) | mac_addr[5];
4915 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
4917 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
4920 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
4922 /* Close multi and leading connections */
4923 for_each_nondefault_queue(bp, i)
4924 if (bnx2x_stop_multi(bp, i))
4927 bnx2x_stop_leading(bp);
4928 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
4929 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
4930 DP(NETIF_MSG_IFDOWN, "failed to close leading properly!"
4931 "state 0x%x fp[0].state 0x%x",
4932 bp->state, bp->fp[0].state);
4936 bnx2x__link_reset(bp);
4939 reset_code = bnx2x_fw_command(bp, reset_code);
4941 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
4947 /* Reset the chip */
4948 bnx2x_reset_chip(bp, reset_code);
4950 /* Report UNLOAD_DONE to MCP */
4952 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
4954 /* Free SKBs and driver internals */
4955 bnx2x_free_skbs(bp);
4958 bp->state = BNX2X_STATE_CLOSED;
4960 netif_carrier_off(bp->dev);
4965 /* end of nic load/unload */
4970 * Init service functions
4973 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
4975 int port = bp->port;
4978 switch (switch_cfg) {
4980 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
4983 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
4984 switch (ext_phy_type) {
4985 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
4986 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
4989 bp->supported |= (SUPPORTED_10baseT_Half |
4990 SUPPORTED_10baseT_Full |
4991 SUPPORTED_100baseT_Half |
4992 SUPPORTED_100baseT_Full |
4993 SUPPORTED_1000baseT_Full |
4994 SUPPORTED_2500baseX_Full |
4995 SUPPORTED_TP | SUPPORTED_FIBRE |
4998 SUPPORTED_Asym_Pause);
5001 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
5002 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
5005 bp->supported |= (SUPPORTED_10baseT_Half |
5006 SUPPORTED_10baseT_Full |
5007 SUPPORTED_100baseT_Half |
5008 SUPPORTED_100baseT_Full |
5009 SUPPORTED_1000baseT_Full |
5010 SUPPORTED_TP | SUPPORTED_FIBRE |
5013 SUPPORTED_Asym_Pause);
5017 BNX2X_ERR("NVRAM config error. "
5018 "BAD SerDes ext_phy_config 0x%x\n",
5019 bp->link_params.ext_phy_config);
5023 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
5025 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
5028 case SWITCH_CFG_10G:
5029 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
5032 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
5033 switch (ext_phy_type) {
5034 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5035 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
5038 bp->supported |= (SUPPORTED_10baseT_Half |
5039 SUPPORTED_10baseT_Full |
5040 SUPPORTED_100baseT_Half |
5041 SUPPORTED_100baseT_Full |
5042 SUPPORTED_1000baseT_Full |
5043 SUPPORTED_2500baseX_Full |
5044 SUPPORTED_10000baseT_Full |
5045 SUPPORTED_TP | SUPPORTED_FIBRE |
5048 SUPPORTED_Asym_Pause);
5051 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
5052 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
5055 bp->supported |= (SUPPORTED_10000baseT_Full |
5058 SUPPORTED_Asym_Pause);
5061 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
5062 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
5065 bp->supported |= (SUPPORTED_10000baseT_Full |
5066 SUPPORTED_1000baseT_Full |
5070 SUPPORTED_Asym_Pause);
5073 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5074 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
5077 bp->supported |= (SUPPORTED_10000baseT_Full |
5078 SUPPORTED_1000baseT_Full |
5082 SUPPORTED_Asym_Pause);
5085 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5086 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
5089 bp->supported |= (SUPPORTED_10000baseT_Full |
5090 SUPPORTED_2500baseX_Full |
5091 SUPPORTED_1000baseT_Full |
5095 SUPPORTED_Asym_Pause);
5098 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5099 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
5102 bp->supported |= (SUPPORTED_10000baseT_Full |
5106 SUPPORTED_Asym_Pause);
5109 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
5110 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
5111 bp->link_params.ext_phy_config);
5115 BNX2X_ERR("NVRAM config error. "
5116 "BAD XGXS ext_phy_config 0x%x\n",
5117 bp->link_params.ext_phy_config);
5121 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
5123 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
5128 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
5132 bp->link_params.phy_addr = bp->phy_addr;
5134 /* mask what we support according to speed_cap_mask */
5135 if (!(bp->link_params.speed_cap_mask &
5136 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
5137 bp->supported &= ~SUPPORTED_10baseT_Half;
5139 if (!(bp->link_params.speed_cap_mask &
5140 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
5141 bp->supported &= ~SUPPORTED_10baseT_Full;
5143 if (!(bp->link_params.speed_cap_mask &
5144 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
5145 bp->supported &= ~SUPPORTED_100baseT_Half;
5147 if (!(bp->link_params.speed_cap_mask &
5148 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
5149 bp->supported &= ~SUPPORTED_100baseT_Full;
5151 if (!(bp->link_params.speed_cap_mask &
5152 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
5153 bp->supported &= ~(SUPPORTED_1000baseT_Half |
5154 SUPPORTED_1000baseT_Full);
5156 if (!(bp->link_params.speed_cap_mask &
5157 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
5158 bp->supported &= ~SUPPORTED_2500baseX_Full;
5160 if (!(bp->link_params.speed_cap_mask &
5161 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
5162 bp->supported &= ~SUPPORTED_10000baseT_Full;
5164 BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
5167 static void bnx2x_link_settings_requested(struct bnx2x *bp)
5169 bp->link_params.req_duplex = DUPLEX_FULL;
5171 switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
5172 case PORT_FEATURE_LINK_SPEED_AUTO:
5173 if (bp->supported & SUPPORTED_Autoneg) {
5174 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
5175 bp->advertising = bp->supported;
5178 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
5180 if ((ext_phy_type ==
5181 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
5183 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
5184 /* force 10G, no AN */
5185 bp->link_params.req_line_speed = SPEED_10000;
5187 (ADVERTISED_10000baseT_Full |
5191 BNX2X_ERR("NVRAM config error. "
5192 "Invalid link_config 0x%x"
5193 " Autoneg not supported\n",
5199 case PORT_FEATURE_LINK_SPEED_10M_FULL:
5200 if (bp->supported & SUPPORTED_10baseT_Full) {
5201 bp->link_params.req_line_speed = SPEED_10;
5202 bp->advertising = (ADVERTISED_10baseT_Full |
5205 BNX2X_ERR("NVRAM config error. "
5206 "Invalid link_config 0x%x"
5207 " speed_cap_mask 0x%x\n",
5209 bp->link_params.speed_cap_mask);
5214 case PORT_FEATURE_LINK_SPEED_10M_HALF:
5215 if (bp->supported & SUPPORTED_10baseT_Half) {
5216 bp->link_params.req_line_speed = SPEED_10;
5217 bp->link_params.req_duplex = DUPLEX_HALF;
5218 bp->advertising = (ADVERTISED_10baseT_Half |
5221 BNX2X_ERR("NVRAM config error. "
5222 "Invalid link_config 0x%x"
5223 " speed_cap_mask 0x%x\n",
5225 bp->link_params.speed_cap_mask);
5230 case PORT_FEATURE_LINK_SPEED_100M_FULL:
5231 if (bp->supported & SUPPORTED_100baseT_Full) {
5232 bp->link_params.req_line_speed = SPEED_100;
5233 bp->advertising = (ADVERTISED_100baseT_Full |
5236 BNX2X_ERR("NVRAM config error. "
5237 "Invalid link_config 0x%x"
5238 " speed_cap_mask 0x%x\n",
5240 bp->link_params.speed_cap_mask);
5245 case PORT_FEATURE_LINK_SPEED_100M_HALF:
5246 if (bp->supported & SUPPORTED_100baseT_Half) {
5247 bp->link_params.req_line_speed = SPEED_100;
5248 bp->link_params.req_duplex = DUPLEX_HALF;
5249 bp->advertising = (ADVERTISED_100baseT_Half |
5252 BNX2X_ERR("NVRAM config error. "
5253 "Invalid link_config 0x%x"
5254 " speed_cap_mask 0x%x\n",
5256 bp->link_params.speed_cap_mask);
5261 case PORT_FEATURE_LINK_SPEED_1G:
5262 if (bp->supported & SUPPORTED_1000baseT_Full) {
5263 bp->link_params.req_line_speed = SPEED_1000;
5264 bp->advertising = (ADVERTISED_1000baseT_Full |
5267 BNX2X_ERR("NVRAM config error. "
5268 "Invalid link_config 0x%x"
5269 " speed_cap_mask 0x%x\n",
5271 bp->link_params.speed_cap_mask);
5276 case PORT_FEATURE_LINK_SPEED_2_5G:
5277 if (bp->supported & SUPPORTED_2500baseX_Full) {
5278 bp->link_params.req_line_speed = SPEED_2500;
5279 bp->advertising = (ADVERTISED_2500baseX_Full |
5282 BNX2X_ERR("NVRAM config error. "
5283 "Invalid link_config 0x%x"
5284 " speed_cap_mask 0x%x\n",
5286 bp->link_params.speed_cap_mask);
5291 case PORT_FEATURE_LINK_SPEED_10G_CX4:
5292 case PORT_FEATURE_LINK_SPEED_10G_KX4:
5293 case PORT_FEATURE_LINK_SPEED_10G_KR:
5294 if (bp->supported & SUPPORTED_10000baseT_Full) {
5295 bp->link_params.req_line_speed = SPEED_10000;
5296 bp->advertising = (ADVERTISED_10000baseT_Full |
5299 BNX2X_ERR("NVRAM config error. "
5300 "Invalid link_config 0x%x"
5301 " speed_cap_mask 0x%x\n",
5303 bp->link_params.speed_cap_mask);
5309 BNX2X_ERR("NVRAM config error. "
5310 "BAD link speed link_config 0x%x\n",
5312 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
5313 bp->advertising = bp->supported;
5317 bp->link_params.req_flow_ctrl = (bp->link_config &
5318 PORT_FEATURE_FLOW_CONTROL_MASK);
5319 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
5320 (!bp->supported & SUPPORTED_Autoneg))
5321 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
5323 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
5324 " advertising 0x%x\n",
5325 bp->link_params.req_line_speed,
5326 bp->link_params.req_duplex,
5327 bp->link_params.req_flow_ctrl, bp->advertising);
5330 static void bnx2x_get_hwinfo(struct bnx2x *bp)
5332 u32 val, val2, val3, val4, id;
5333 int port = bp->port;
5335 bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5336 BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
5338 /* Get the chip revision id and number. */
5339 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5340 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5341 id = ((val & 0xffff) << 16);
5342 val = REG_RD(bp, MISC_REG_CHIP_REV);
5343 id |= ((val & 0xf) << 12);
5344 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5345 id |= ((val & 0xff) << 4);
5346 REG_RD(bp, MISC_REG_BOND_ID);
5349 BNX2X_DEV_INFO("chip ID is %x\n", id);
5351 bp->link_params.bp = bp;
5353 if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
5354 BNX2X_DEV_INFO("MCP not active\n");
5359 val = SHMEM_RD(bp, validity_map[port]);
5360 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5361 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5362 BNX2X_ERR("BAD MCP validity signature\n");
5364 bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
5365 DRV_MSG_SEQ_NUMBER_MASK);
5367 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
5368 bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
5369 bp->link_params.serdes_config =
5370 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
5371 bp->link_params.lane_config =
5372 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
5373 bp->link_params.ext_phy_config =
5375 dev_info.port_hw_config[port].external_phy_config);
5376 bp->link_params.speed_cap_mask =
5378 dev_info.port_hw_config[port].speed_capability_mask);
5381 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
5383 BNX2X_DEV_INFO("serdes_config (%08x) lane_config (%08x)\n"
5384 KERN_INFO " ext_phy_config (%08x) speed_cap_mask (%08x)"
5385 " link_config (%08x)\n",
5386 bp->link_params.serdes_config,
5387 bp->link_params.lane_config,
5388 bp->link_params.ext_phy_config,
5389 bp->link_params.speed_cap_mask,
5392 bp->link_params.switch_cfg = (bp->link_config &
5393 PORT_FEATURE_CONNECTED_SWITCH_MASK);
5394 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
5396 bnx2x_link_settings_requested(bp);
5398 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
5399 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
5400 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
5401 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
5402 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
5403 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
5404 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
5405 bp->dev->dev_addr[5] = (u8)(val & 0xff);
5406 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
5407 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
5411 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
5412 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
5413 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
5414 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
5416 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
5417 val, val2, val3, val4);
5421 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
5422 BNX2X_DEV_INFO("bc_ver %X\n", val);
5423 if (val < BNX2X_BC_VER) {
5424 /* for now only warn
5425 * later we might need to enforce this */
5426 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
5427 " please upgrade BC\n", BNX2X_BC_VER, val);
5433 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5434 bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
5435 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5436 bp->flash_size, bp->flash_size);
5440 set_mac: /* only supposed to happen on emulation/FPGA */
5441 BNX2X_ERR("warning rendom MAC workaround active\n");
5442 random_ether_addr(bp->dev->dev_addr);
5443 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
5448 * ethtool service functions
5451 /* All ethtool functions called with rtnl_lock */
5453 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5455 struct bnx2x *bp = netdev_priv(dev);
5457 cmd->supported = bp->supported;
5458 cmd->advertising = bp->advertising;
5460 if (netif_carrier_ok(dev)) {
5461 cmd->speed = bp->link_vars.line_speed;
5462 cmd->duplex = bp->link_vars.duplex;
5464 cmd->speed = bp->link_params.req_line_speed;
5465 cmd->duplex = bp->link_params.req_duplex;
5468 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
5470 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
5472 switch (ext_phy_type) {
5473 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5474 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
5475 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
5476 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5477 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5478 cmd->port = PORT_FIBRE;
5481 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5482 cmd->port = PORT_TP;
5485 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
5486 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
5487 bp->link_params.ext_phy_config);
5491 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
5492 bp->link_params.ext_phy_config);
5496 cmd->port = PORT_TP;
5498 cmd->phy_address = bp->phy_addr;
5499 cmd->transceiver = XCVR_INTERNAL;
5501 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
5502 cmd->autoneg = AUTONEG_ENABLE;
5504 cmd->autoneg = AUTONEG_DISABLE;
5509 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
5510 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
5511 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
5512 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
5513 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
5514 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
5515 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
5520 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5522 struct bnx2x *bp = netdev_priv(dev);
5525 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
5526 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
5527 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
5528 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
5529 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
5530 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
5531 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
5533 if (cmd->autoneg == AUTONEG_ENABLE) {
5534 if (!(bp->supported & SUPPORTED_Autoneg)) {
5535 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
5539 /* advertise the requested speed and duplex if supported */
5540 cmd->advertising &= bp->supported;
5542 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
5543 bp->link_params.req_duplex = DUPLEX_FULL;
5544 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
5546 } else { /* forced speed */
5547 /* advertise the requested speed and duplex if supported */
5548 switch (cmd->speed) {
5550 if (cmd->duplex == DUPLEX_FULL) {
5551 if (!(bp->supported &
5552 SUPPORTED_10baseT_Full)) {
5554 "10M full not supported\n");
5558 advertising = (ADVERTISED_10baseT_Full |
5561 if (!(bp->supported &
5562 SUPPORTED_10baseT_Half)) {
5564 "10M half not supported\n");
5568 advertising = (ADVERTISED_10baseT_Half |
5574 if (cmd->duplex == DUPLEX_FULL) {
5575 if (!(bp->supported &
5576 SUPPORTED_100baseT_Full)) {
5578 "100M full not supported\n");
5582 advertising = (ADVERTISED_100baseT_Full |
5585 if (!(bp->supported &
5586 SUPPORTED_100baseT_Half)) {
5588 "100M half not supported\n");
5592 advertising = (ADVERTISED_100baseT_Half |
5598 if (cmd->duplex != DUPLEX_FULL) {
5599 DP(NETIF_MSG_LINK, "1G half not supported\n");
5603 if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
5604 DP(NETIF_MSG_LINK, "1G full not supported\n");
5608 advertising = (ADVERTISED_1000baseT_Full |
5613 if (cmd->duplex != DUPLEX_FULL) {
5615 "2.5G half not supported\n");
5619 if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
5621 "2.5G full not supported\n");
5625 advertising = (ADVERTISED_2500baseX_Full |
5630 if (cmd->duplex != DUPLEX_FULL) {
5631 DP(NETIF_MSG_LINK, "10G half not supported\n");
5635 if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
5636 DP(NETIF_MSG_LINK, "10G full not supported\n");
5640 advertising = (ADVERTISED_10000baseT_Full |
5645 DP(NETIF_MSG_LINK, "Unsupported speed\n");
5649 bp->link_params.req_line_speed = cmd->speed;
5650 bp->link_params.req_duplex = cmd->duplex;
5651 bp->advertising = advertising;
5654 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
5655 DP_LEVEL " req_duplex %d advertising 0x%x\n",
5656 bp->link_params.req_line_speed, bp->link_params.req_duplex,
5659 bnx2x_stop_stats(bp);
5665 #define PHY_FW_VER_LEN 10
5667 static void bnx2x_get_drvinfo(struct net_device *dev,
5668 struct ethtool_drvinfo *info)
5670 struct bnx2x *bp = netdev_priv(dev);
5671 char phy_fw_ver[PHY_FW_VER_LEN];
5673 strcpy(info->driver, DRV_MODULE_NAME);
5674 strcpy(info->version, DRV_MODULE_VERSION);
5676 phy_fw_ver[0] = '\0';
5677 bnx2x_phy_hw_lock(bp);
5678 bnx2x_get_ext_phy_fw_version(&bp->link_params,
5679 (bp->state != BNX2X_STATE_CLOSED),
5680 phy_fw_ver, PHY_FW_VER_LEN);
5681 bnx2x_phy_hw_unlock(bp);
5683 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
5684 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
5685 BCM_5710_FW_REVISION_VERSION,
5686 BCM_5710_FW_COMPILE_FLAGS, bp->bc_ver,
5687 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
5688 strcpy(info->bus_info, pci_name(bp->pdev));
5689 info->n_stats = BNX2X_NUM_STATS;
5690 info->testinfo_len = BNX2X_NUM_TESTS;
5691 info->eedump_len = bp->flash_size;
5692 info->regdump_len = 0;
5695 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5697 struct bnx2x *bp = netdev_priv(dev);
5699 if (bp->flags & NO_WOL_FLAG) {
5703 wol->supported = WAKE_MAGIC;
5705 wol->wolopts = WAKE_MAGIC;
5709 memset(&wol->sopass, 0, sizeof(wol->sopass));
5712 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5714 struct bnx2x *bp = netdev_priv(dev);
5716 if (wol->wolopts & ~WAKE_MAGIC)
5719 if (wol->wolopts & WAKE_MAGIC) {
5720 if (bp->flags & NO_WOL_FLAG)
5730 static u32 bnx2x_get_msglevel(struct net_device *dev)
5732 struct bnx2x *bp = netdev_priv(dev);
5734 return bp->msglevel;
5737 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
5739 struct bnx2x *bp = netdev_priv(dev);
5741 if (capable(CAP_NET_ADMIN))
5742 bp->msglevel = level;
5745 static int bnx2x_nway_reset(struct net_device *dev)
5747 struct bnx2x *bp = netdev_priv(dev);
5749 if (bp->state != BNX2X_STATE_OPEN) {
5750 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
5754 bnx2x_stop_stats(bp);
5760 static int bnx2x_get_eeprom_len(struct net_device *dev)
5762 struct bnx2x *bp = netdev_priv(dev);
5764 return bp->flash_size;
5767 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
5769 int port = bp->port;
5773 /* adjust timeout for emulation/FPGA */
5774 count = NVRAM_TIMEOUT_COUNT;
5775 if (CHIP_REV_IS_SLOW(bp))
5778 /* request access to nvram interface */
5779 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
5780 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
5782 for (i = 0; i < count*10; i++) {
5783 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
5784 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
5790 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
5791 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
5798 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
5800 int port = bp->port;
5804 /* adjust timeout for emulation/FPGA */
5805 count = NVRAM_TIMEOUT_COUNT;
5806 if (CHIP_REV_IS_SLOW(bp))
5809 /* relinquish nvram interface */
5810 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
5811 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
5813 for (i = 0; i < count*10; i++) {
5814 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
5815 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
5821 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
5822 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
5829 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
5833 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
5835 /* enable both bits, even on read */
5836 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
5837 (val | MCPR_NVM_ACCESS_ENABLE_EN |
5838 MCPR_NVM_ACCESS_ENABLE_WR_EN));
5841 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
5845 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
5847 /* disable both bits, even after read */
5848 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
5849 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
5850 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
5853 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
5859 /* build the command word */
5860 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
5862 /* need to clear DONE bit separately */
5863 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
5865 /* address of the NVRAM to read from */
5866 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
5867 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
5869 /* issue a read command */
5870 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
5872 /* adjust timeout for emulation/FPGA */
5873 count = NVRAM_TIMEOUT_COUNT;
5874 if (CHIP_REV_IS_SLOW(bp))
5877 /* wait for completion */
5880 for (i = 0; i < count; i++) {
5882 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
5884 if (val & MCPR_NVM_COMMAND_DONE) {
5885 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
5886 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
5887 /* we read nvram data in cpu order
5888 * but ethtool sees it as an array of bytes
5889 * converting to big-endian will do the work */
5890 val = cpu_to_be32(val);
5900 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
5907 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
5909 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
5914 if (offset + buf_size > bp->flash_size) {
5915 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
5916 " buf_size (0x%x) > flash_size (0x%x)\n",
5917 offset, buf_size, bp->flash_size);
5921 /* request access to nvram interface */
5922 rc = bnx2x_acquire_nvram_lock(bp);
5926 /* enable access to nvram interface */
5927 bnx2x_enable_nvram_access(bp);
5929 /* read the first word(s) */
5930 cmd_flags = MCPR_NVM_COMMAND_FIRST;
5931 while ((buf_size > sizeof(u32)) && (rc == 0)) {
5932 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
5933 memcpy(ret_buf, &val, 4);
5935 /* advance to the next dword */
5936 offset += sizeof(u32);
5937 ret_buf += sizeof(u32);
5938 buf_size -= sizeof(u32);
5943 cmd_flags |= MCPR_NVM_COMMAND_LAST;
5944 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
5945 memcpy(ret_buf, &val, 4);
5948 /* disable access to nvram interface */
5949 bnx2x_disable_nvram_access(bp);
5950 bnx2x_release_nvram_lock(bp);
5955 static int bnx2x_get_eeprom(struct net_device *dev,
5956 struct ethtool_eeprom *eeprom, u8 *eebuf)
5958 struct bnx2x *bp = netdev_priv(dev);
5961 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
5962 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
5963 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
5964 eeprom->len, eeprom->len);
5966 /* parameters already validated in ethtool_get_eeprom */
5968 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5973 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
5978 /* build the command word */
5979 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
5981 /* need to clear DONE bit separately */
5982 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
5984 /* write the data */
5985 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
5987 /* address of the NVRAM to write to */
5988 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
5989 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
5991 /* issue the write command */
5992 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
5994 /* adjust timeout for emulation/FPGA */
5995 count = NVRAM_TIMEOUT_COUNT;
5996 if (CHIP_REV_IS_SLOW(bp))
5999 /* wait for completion */
6001 for (i = 0; i < count; i++) {
6003 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
6004 if (val & MCPR_NVM_COMMAND_DONE) {
6013 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
6015 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
6023 if (offset + buf_size > bp->flash_size) {
6024 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
6025 " buf_size (0x%x) > flash_size (0x%x)\n",
6026 offset, buf_size, bp->flash_size);
6030 /* request access to nvram interface */
6031 rc = bnx2x_acquire_nvram_lock(bp);
6035 /* enable access to nvram interface */
6036 bnx2x_enable_nvram_access(bp);
6038 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
6039 align_offset = (offset & ~0x03);
6040 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
6043 val &= ~(0xff << BYTE_OFFSET(offset));
6044 val |= (*data_buf << BYTE_OFFSET(offset));
6046 /* nvram data is returned as an array of bytes
6047 * convert it back to cpu order */
6048 val = be32_to_cpu(val);
6050 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
6052 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
6056 /* disable access to nvram interface */
6057 bnx2x_disable_nvram_access(bp);
6058 bnx2x_release_nvram_lock(bp);
6063 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
6071 if (buf_size == 1) { /* ethtool */
6072 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
6075 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
6077 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
6082 if (offset + buf_size > bp->flash_size) {
6083 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
6084 " buf_size (0x%x) > flash_size (0x%x)\n",
6085 offset, buf_size, bp->flash_size);
6089 /* request access to nvram interface */
6090 rc = bnx2x_acquire_nvram_lock(bp);
6094 /* enable access to nvram interface */
6095 bnx2x_enable_nvram_access(bp);
6098 cmd_flags = MCPR_NVM_COMMAND_FIRST;
6099 while ((written_so_far < buf_size) && (rc == 0)) {
6100 if (written_so_far == (buf_size - sizeof(u32)))
6101 cmd_flags |= MCPR_NVM_COMMAND_LAST;
6102 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
6103 cmd_flags |= MCPR_NVM_COMMAND_LAST;
6104 else if ((offset % NVRAM_PAGE_SIZE) == 0)
6105 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
6107 memcpy(&val, data_buf, 4);
6108 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
6110 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
6112 /* advance to the next dword */
6113 offset += sizeof(u32);
6114 data_buf += sizeof(u32);
6115 written_so_far += sizeof(u32);
6119 /* disable access to nvram interface */
6120 bnx2x_disable_nvram_access(bp);
6121 bnx2x_release_nvram_lock(bp);
6126 static int bnx2x_set_eeprom(struct net_device *dev,
6127 struct ethtool_eeprom *eeprom, u8 *eebuf)
6129 struct bnx2x *bp = netdev_priv(dev);
6132 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
6133 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
6134 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
6135 eeprom->len, eeprom->len);
6137 /* parameters already validated in ethtool_set_eeprom */
6139 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
6140 if (eeprom->magic == 0x00504859) {
6142 bnx2x_phy_hw_lock(bp);
6143 rc = bnx2x_flash_download(bp, bp->port,
6144 bp->link_params.ext_phy_config,
6145 (bp->state != BNX2X_STATE_CLOSED),
6146 eebuf, eeprom->len);
6147 rc |= bnx2x_link_reset(&bp->link_params,
6149 rc |= bnx2x_phy_init(&bp->link_params,
6151 bnx2x_phy_hw_unlock(bp);
6154 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6159 static int bnx2x_get_coalesce(struct net_device *dev,
6160 struct ethtool_coalesce *coal)
6162 struct bnx2x *bp = netdev_priv(dev);
6164 memset(coal, 0, sizeof(struct ethtool_coalesce));
6166 coal->rx_coalesce_usecs = bp->rx_ticks;
6167 coal->tx_coalesce_usecs = bp->tx_ticks;
6168 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6173 static int bnx2x_set_coalesce(struct net_device *dev,
6174 struct ethtool_coalesce *coal)
6176 struct bnx2x *bp = netdev_priv(dev);
6178 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6179 if (bp->rx_ticks > 3000)
6180 bp->rx_ticks = 3000;
6182 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6183 if (bp->tx_ticks > 0x3000)
6184 bp->tx_ticks = 0x3000;
6186 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6187 if (bp->stats_ticks > 0xffff00)
6188 bp->stats_ticks = 0xffff00;
6189 bp->stats_ticks &= 0xffff00;
6191 if (netif_running(bp->dev))
6192 bnx2x_update_coalesce(bp);
6197 static void bnx2x_get_ringparam(struct net_device *dev,
6198 struct ethtool_ringparam *ering)
6200 struct bnx2x *bp = netdev_priv(dev);
6202 ering->rx_max_pending = MAX_RX_AVAIL;
6203 ering->rx_mini_max_pending = 0;
6204 ering->rx_jumbo_max_pending = 0;
6206 ering->rx_pending = bp->rx_ring_size;
6207 ering->rx_mini_pending = 0;
6208 ering->rx_jumbo_pending = 0;
6210 ering->tx_max_pending = MAX_TX_AVAIL;
6211 ering->tx_pending = bp->tx_ring_size;
6214 static int bnx2x_set_ringparam(struct net_device *dev,
6215 struct ethtool_ringparam *ering)
6217 struct bnx2x *bp = netdev_priv(dev);
6219 if ((ering->rx_pending > MAX_RX_AVAIL) ||
6220 (ering->tx_pending > MAX_TX_AVAIL) ||
6221 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
6224 bp->rx_ring_size = ering->rx_pending;
6225 bp->tx_ring_size = ering->tx_pending;
6227 if (netif_running(bp->dev)) {
6228 bnx2x_nic_unload(bp, 0);
6229 bnx2x_nic_load(bp, 0);
6235 static void bnx2x_get_pauseparam(struct net_device *dev,
6236 struct ethtool_pauseparam *epause)
6238 struct bnx2x *bp = netdev_priv(dev);
6240 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
6241 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
6243 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
6245 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
6248 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
6249 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
6250 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
6253 static int bnx2x_set_pauseparam(struct net_device *dev,
6254 struct ethtool_pauseparam *epause)
6256 struct bnx2x *bp = netdev_priv(dev);
6258 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
6259 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
6260 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
6262 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
6264 if (epause->rx_pause)
6265 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
6267 if (epause->tx_pause)
6268 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
6270 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
6271 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
6273 if (epause->autoneg) {
6274 if (!(bp->supported & SUPPORTED_Autoneg)) {
6275 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
6279 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
6280 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
6284 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
6285 bnx2x_stop_stats(bp);
6291 static u32 bnx2x_get_rx_csum(struct net_device *dev)
6293 struct bnx2x *bp = netdev_priv(dev);
6298 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
6300 struct bnx2x *bp = netdev_priv(dev);
6306 static int bnx2x_set_tso(struct net_device *dev, u32 data)
6309 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
6311 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
6316 char string[ETH_GSTRING_LEN];
6317 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
6318 { "MC Errors (online)" }
6321 static int bnx2x_self_test_count(struct net_device *dev)
6323 return BNX2X_NUM_TESTS;
6326 static void bnx2x_self_test(struct net_device *dev,
6327 struct ethtool_test *etest, u64 *buf)
6329 struct bnx2x *bp = netdev_priv(dev);
6332 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
6334 if (bp->state != BNX2X_STATE_OPEN) {
6335 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
6339 stats_state = bp->stats_state;
6340 bnx2x_stop_stats(bp);
6342 if (bnx2x_mc_assert(bp) != 0) {
6344 etest->flags |= ETH_TEST_FL_FAILED;
6347 #ifdef BNX2X_EXTRA_DEBUG
6348 bnx2x_panic_dump(bp);
6350 bp->stats_state = stats_state;
6354 char string[ETH_GSTRING_LEN];
6355 } bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
6357 { "rx_error_bytes"},
6359 { "tx_error_bytes"},
6360 { "rx_ucast_packets"},
6361 { "rx_mcast_packets"},
6362 { "rx_bcast_packets"},
6363 { "tx_ucast_packets"},
6364 { "tx_mcast_packets"},
6365 { "tx_bcast_packets"},
6366 { "tx_mac_errors"}, /* 10 */
6367 { "tx_carrier_errors"},
6369 { "rx_align_errors"},
6370 { "tx_single_collisions"},
6371 { "tx_multi_collisions"},
6373 { "tx_excess_collisions"},
6374 { "tx_late_collisions"},
6375 { "tx_total_collisions"},
6376 { "rx_fragments"}, /* 20 */
6378 { "rx_undersize_packets"},
6379 { "rx_oversize_packets"},
6381 { "rx_xoff_frames"},
6383 { "tx_xoff_frames"},
6384 { "rx_mac_ctrl_frames"},
6385 { "rx_filtered_packets"},
6386 { "rx_discards"}, /* 30 */
6392 #define STATS_OFFSET32(offset_name) \
6393 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
6395 static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
6396 STATS_OFFSET32(total_bytes_received_hi),
6397 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6398 STATS_OFFSET32(total_bytes_transmitted_hi),
6399 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6400 STATS_OFFSET32(total_unicast_packets_received_hi),
6401 STATS_OFFSET32(total_multicast_packets_received_hi),
6402 STATS_OFFSET32(total_broadcast_packets_received_hi),
6403 STATS_OFFSET32(total_unicast_packets_transmitted_hi),
6404 STATS_OFFSET32(total_multicast_packets_transmitted_hi),
6405 STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
6406 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
6407 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6408 STATS_OFFSET32(crc_receive_errors),
6409 STATS_OFFSET32(alignment_errors),
6410 STATS_OFFSET32(single_collision_transmit_frames),
6411 STATS_OFFSET32(multiple_collision_transmit_frames),
6412 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6413 STATS_OFFSET32(excessive_collision_frames),
6414 STATS_OFFSET32(late_collision_frames),
6415 STATS_OFFSET32(number_of_bugs_found_in_stats_spec),
6416 STATS_OFFSET32(runt_packets_received), /* 20 */
6417 STATS_OFFSET32(jabber_packets_received),
6418 STATS_OFFSET32(error_runt_packets_received),
6419 STATS_OFFSET32(error_jabber_packets_received),
6420 STATS_OFFSET32(pause_xon_frames_received),
6421 STATS_OFFSET32(pause_xoff_frames_received),
6422 STATS_OFFSET32(pause_xon_frames_transmitted),
6423 STATS_OFFSET32(pause_xoff_frames_transmitted),
6424 STATS_OFFSET32(control_frames_received),
6425 STATS_OFFSET32(mac_filter_discard),
6426 STATS_OFFSET32(no_buff_discard), /* 30 */
6427 STATS_OFFSET32(brb_discard),
6428 STATS_OFFSET32(brb_truncate_discard),
6429 STATS_OFFSET32(xxoverflow_discard)
6432 static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
6433 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
6434 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
6435 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
6439 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6441 switch (stringset) {
6443 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
6447 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
6452 static int bnx2x_get_stats_count(struct net_device *dev)
6454 return BNX2X_NUM_STATS;
6457 static void bnx2x_get_ethtool_stats(struct net_device *dev,
6458 struct ethtool_stats *stats, u64 *buf)
6460 struct bnx2x *bp = netdev_priv(dev);
6461 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
6464 for (i = 0; i < BNX2X_NUM_STATS; i++) {
6465 if (bnx2x_stats_len_arr[i] == 0) {
6466 /* skip this counter */
6474 if (bnx2x_stats_len_arr[i] == 4) {
6475 /* 4-byte counter */
6476 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
6479 /* 8-byte counter */
6480 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
6481 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
6485 static int bnx2x_phys_id(struct net_device *dev, u32 data)
6487 struct bnx2x *bp = netdev_priv(dev);
6493 for (i = 0; i < (data * 2); i++) {
6495 bnx2x_set_led(bp, bp->port, LED_MODE_OPER, SPEED_1000,
6496 bp->link_params.hw_led_mode,
6497 bp->link_params.chip_id);
6499 bnx2x_set_led(bp, bp->port, LED_MODE_OFF, 0,
6500 bp->link_params.hw_led_mode,
6501 bp->link_params.chip_id);
6503 msleep_interruptible(500);
6504 if (signal_pending(current))
6508 if (bp->link_vars.link_up)
6509 bnx2x_set_led(bp, bp->port, LED_MODE_OPER,
6510 bp->link_vars.line_speed,
6511 bp->link_params.hw_led_mode,
6512 bp->link_params.chip_id);
6517 static struct ethtool_ops bnx2x_ethtool_ops = {
6518 .get_settings = bnx2x_get_settings,
6519 .set_settings = bnx2x_set_settings,
6520 .get_drvinfo = bnx2x_get_drvinfo,
6521 .get_wol = bnx2x_get_wol,
6522 .set_wol = bnx2x_set_wol,
6523 .get_msglevel = bnx2x_get_msglevel,
6524 .set_msglevel = bnx2x_set_msglevel,
6525 .nway_reset = bnx2x_nway_reset,
6526 .get_link = ethtool_op_get_link,
6527 .get_eeprom_len = bnx2x_get_eeprom_len,
6528 .get_eeprom = bnx2x_get_eeprom,
6529 .set_eeprom = bnx2x_set_eeprom,
6530 .get_coalesce = bnx2x_get_coalesce,
6531 .set_coalesce = bnx2x_set_coalesce,
6532 .get_ringparam = bnx2x_get_ringparam,
6533 .set_ringparam = bnx2x_set_ringparam,
6534 .get_pauseparam = bnx2x_get_pauseparam,
6535 .set_pauseparam = bnx2x_set_pauseparam,
6536 .get_rx_csum = bnx2x_get_rx_csum,
6537 .set_rx_csum = bnx2x_set_rx_csum,
6538 .get_tx_csum = ethtool_op_get_tx_csum,
6539 .set_tx_csum = ethtool_op_set_tx_csum,
6540 .get_sg = ethtool_op_get_sg,
6541 .set_sg = ethtool_op_set_sg,
6542 .get_tso = ethtool_op_get_tso,
6543 .set_tso = bnx2x_set_tso,
6544 .self_test_count = bnx2x_self_test_count,
6545 .self_test = bnx2x_self_test,
6546 .get_strings = bnx2x_get_strings,
6547 .phys_id = bnx2x_phys_id,
6548 .get_stats_count = bnx2x_get_stats_count,
6549 .get_ethtool_stats = bnx2x_get_ethtool_stats
6552 /* end of ethtool_ops */
6554 /****************************************************************************
6555 * General service functions
6556 ****************************************************************************/
6558 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
6562 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
6566 pci_write_config_word(bp->pdev,
6567 bp->pm_cap + PCI_PM_CTRL,
6568 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
6569 PCI_PM_CTRL_PME_STATUS));
6571 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
6572 /* delay required during transition out of D3hot */
6577 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
6581 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
6583 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
6586 /* No more memory access after this point until
6587 * device is brought back to D0.
6598 * net_device service functions
6601 /* called with netif_tx_lock from set_multicast */
6602 static void bnx2x_set_rx_mode(struct net_device *dev)
6604 struct bnx2x *bp = netdev_priv(dev);
6605 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6607 DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
6609 if (dev->flags & IFF_PROMISC)
6610 rx_mode = BNX2X_RX_MODE_PROMISC;
6612 else if ((dev->flags & IFF_ALLMULTI) ||
6613 (dev->mc_count > BNX2X_MAX_MULTICAST))
6614 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6616 else { /* some multicasts */
6618 struct dev_mc_list *mclist;
6619 struct mac_configuration_cmd *config =
6620 bnx2x_sp(bp, mcast_config);
6622 for (i = 0, mclist = dev->mc_list;
6623 mclist && (i < dev->mc_count);
6624 i++, mclist = mclist->next) {
6626 config->config_table[i].cam_entry.msb_mac_addr =
6627 swab16(*(u16 *)&mclist->dmi_addr[0]);
6628 config->config_table[i].cam_entry.middle_mac_addr =
6629 swab16(*(u16 *)&mclist->dmi_addr[2]);
6630 config->config_table[i].cam_entry.lsb_mac_addr =
6631 swab16(*(u16 *)&mclist->dmi_addr[4]);
6632 config->config_table[i].cam_entry.flags =
6633 cpu_to_le16(bp->port);
6634 config->config_table[i].target_table_entry.flags = 0;
6635 config->config_table[i].target_table_entry.
6637 config->config_table[i].target_table_entry.
6641 "setting MCAST[%d] (%04x:%04x:%04x)\n",
6642 i, config->config_table[i].cam_entry.msb_mac_addr,
6643 config->config_table[i].cam_entry.middle_mac_addr,
6644 config->config_table[i].cam_entry.lsb_mac_addr);
6646 old = config->hdr.length_6b;
6648 for (; i < old; i++) {
6649 if (CAM_IS_INVALID(config->config_table[i])) {
6650 i--; /* already invalidated */
6654 CAM_INVALIDATE(config->config_table[i]);
6658 if (CHIP_REV_IS_SLOW(bp))
6659 offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
6661 offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
6663 config->hdr.length_6b = i;
6664 config->hdr.offset = offset;
6665 config->hdr.reserved0 = 0;
6666 config->hdr.reserved1 = 0;
6668 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6669 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6670 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6673 bp->rx_mode = rx_mode;
6674 bnx2x_set_storm_rx_mode(bp);
6677 static int bnx2x_poll(struct napi_struct *napi, int budget)
6679 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
6681 struct bnx2x *bp = fp->bp;
6684 #ifdef BNX2X_STOP_ON_ERROR
6685 if (unlikely(bp->panic))
6689 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
6690 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
6691 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
6693 bnx2x_update_fpsb_idx(fp);
6695 if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
6696 bnx2x_tx_int(fp, budget);
6699 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
6700 work_done = bnx2x_rx_int(fp, budget);
6703 rmb(); /* bnx2x_has_work() reads the status block */
6705 /* must not complete if we consumed full budget */
6706 if ((work_done < budget) && !bnx2x_has_work(fp)) {
6708 #ifdef BNX2X_STOP_ON_ERROR
6711 netif_rx_complete(bp->dev, napi);
6713 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
6714 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
6715 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
6716 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
6722 /* Called with netif_tx_lock.
6723 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
6724 * netif_wake_queue().
6726 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
6728 struct bnx2x *bp = netdev_priv(dev);
6729 struct bnx2x_fastpath *fp;
6730 struct sw_tx_bd *tx_buf;
6731 struct eth_tx_bd *tx_bd;
6732 struct eth_tx_parse_bd *pbd = NULL;
6733 u16 pkt_prod, bd_prod;
6734 int nbd, fp_index = 0;
6737 #ifdef BNX2X_STOP_ON_ERROR
6738 if (unlikely(bp->panic))
6739 return NETDEV_TX_BUSY;
6742 fp_index = smp_processor_id() % (bp->num_queues);
6744 fp = &bp->fp[fp_index];
6745 if (unlikely(bnx2x_tx_avail(bp->fp) <
6746 (skb_shinfo(skb)->nr_frags + 3))) {
6747 bp->slowpath->eth_stats.driver_xoff++,
6748 netif_stop_queue(dev);
6749 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
6750 return NETDEV_TX_BUSY;
6754 This is a bit ugly. First we use one BD which we mark as start,
6755 then for TSO or xsum we have a parsing info BD,
6756 and only then we have the rest of the TSO bds.
6757 (don't forget to mark the last one as last,
6758 and to unmap only AFTER you write to the BD ...)
6759 I would like to thank DovH for this mess.
6762 pkt_prod = fp->tx_pkt_prod++;
6763 bd_prod = fp->tx_bd_prod;
6764 bd_prod = TX_BD(bd_prod);
6766 /* get a tx_buff and first bd */
6767 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
6768 tx_bd = &fp->tx_desc_ring[bd_prod];
6770 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
6771 tx_bd->general_data = (UNICAST_ADDRESS <<
6772 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
6773 tx_bd->general_data |= 1; /* header nbd */
6775 /* remember the first bd of the packet */
6776 tx_buf->first_bd = bd_prod;
6778 DP(NETIF_MSG_TX_QUEUED,
6779 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6780 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
6782 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6783 struct iphdr *iph = ip_hdr(skb);
6786 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
6788 /* turn on parsing and get a bd */
6789 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6790 pbd = (void *)&fp->tx_desc_ring[bd_prod];
6791 len = ((u8 *)iph - (u8 *)skb->data) / 2;
6793 /* for now NS flag is not used in Linux */
6794 pbd->global_data = (len |
6795 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
6796 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
6797 pbd->ip_hlen = ip_hdrlen(skb) / 2;
6798 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
6799 if (iph->protocol == IPPROTO_TCP) {
6800 struct tcphdr *th = tcp_hdr(skb);
6802 tx_bd->bd_flags.as_bitfield |=
6803 ETH_TX_BD_FLAGS_TCP_CSUM;
6804 pbd->tcp_flags = pbd_tcp_flags(skb);
6805 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
6806 pbd->tcp_pseudo_csum = swab16(th->check);
6808 } else if (iph->protocol == IPPROTO_UDP) {
6809 struct udphdr *uh = udp_hdr(skb);
6811 tx_bd->bd_flags.as_bitfield |=
6812 ETH_TX_BD_FLAGS_TCP_CSUM;
6813 pbd->total_hlen += cpu_to_le16(4);
6814 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
6815 pbd->cs_offset = 5; /* 10 >> 1 */
6816 pbd->tcp_pseudo_csum = 0;
6817 /* HW bug: we need to subtract 10 bytes before the
6818 * UDP header from the csum
6820 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
6821 csum_partial(((u8 *)(uh)-10), 10, 0)));
6825 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
6826 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
6827 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
6829 tx_bd->vlan = cpu_to_le16(pkt_prod);
6832 mapping = pci_map_single(bp->pdev, skb->data,
6833 skb->len, PCI_DMA_TODEVICE);
6835 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
6836 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6837 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
6838 tx_bd->nbd = cpu_to_le16(nbd);
6839 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
6841 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
6842 " nbytes %d flags %x vlan %u\n",
6843 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
6844 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
6846 if (skb_shinfo(skb)->gso_size &&
6847 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
6848 int hlen = 2 * le16_to_cpu(pbd->total_hlen);
6850 DP(NETIF_MSG_TX_QUEUED,
6851 "TSO packet len %d hlen %d total len %d tso size %d\n",
6852 skb->len, hlen, skb_headlen(skb),
6853 skb_shinfo(skb)->gso_size);
6855 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
6857 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
6858 /* we split the first bd into headers and data bds
6859 * to ease the pain of our fellow micocode engineers
6860 * we use one mapping for both bds
6861 * So far this has only been observed to happen
6862 * in Other Operating Systems(TM)
6865 /* first fix first bd */
6867 tx_bd->nbd = cpu_to_le16(nbd);
6868 tx_bd->nbytes = cpu_to_le16(hlen);
6870 /* we only print this as an error
6871 * because we don't think this will ever happen.
6873 BNX2X_ERR("TSO split header size is %d (%x:%x)"
6874 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
6875 tx_bd->addr_lo, tx_bd->nbd);
6877 /* now get a new data bd
6878 * (after the pbd) and fill it */
6879 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6880 tx_bd = &fp->tx_desc_ring[bd_prod];
6882 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
6883 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
6884 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
6885 tx_bd->vlan = cpu_to_le16(pkt_prod);
6886 /* this marks the bd
6887 * as one that has no individual mapping
6888 * the FW ignores this flag in a bd not marked start
6890 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
6891 DP(NETIF_MSG_TX_QUEUED,
6892 "TSO split data size is %d (%x:%x)\n",
6893 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
6897 /* supposed to be unreached
6898 * (and therefore not handled properly...)
6900 BNX2X_ERR("LSO with no PBD\n");
6904 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
6905 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
6906 pbd->ip_id = swab16(ip_hdr(skb)->id);
6907 pbd->tcp_pseudo_csum =
6908 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
6910 0, IPPROTO_TCP, 0));
6911 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
6917 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6918 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6920 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6921 tx_bd = &fp->tx_desc_ring[bd_prod];
6923 mapping = pci_map_page(bp->pdev, frag->page,
6925 frag->size, PCI_DMA_TODEVICE);
6927 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
6928 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6929 tx_bd->nbytes = cpu_to_le16(frag->size);
6930 tx_bd->vlan = cpu_to_le16(pkt_prod);
6931 tx_bd->bd_flags.as_bitfield = 0;
6932 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
6933 " addr (%x:%x) nbytes %d flags %x\n",
6934 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
6935 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
6939 /* now at last mark the bd as the last bd */
6940 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
6942 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
6943 tx_bd, tx_bd->bd_flags.as_bitfield);
6947 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6949 /* now send a tx doorbell, counting the next bd
6950 * if the packet contains or ends with it
6952 if (TX_BD_POFF(bd_prod) < nbd)
6956 DP(NETIF_MSG_TX_QUEUED,
6957 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
6958 " tcp_flags %x xsum %x seq %u hlen %u\n",
6959 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
6960 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
6961 pbd->tcp_send_seq, pbd->total_hlen);
6963 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
6965 fp->hw_tx_prods->bds_prod =
6966 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
6967 mb(); /* FW restriction: must not reorder writing nbd and packets */
6968 fp->hw_tx_prods->packets_prod =
6969 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
6970 DOORBELL(bp, fp_index, 0);
6974 fp->tx_bd_prod = bd_prod;
6975 dev->trans_start = jiffies;
6977 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
6978 netif_stop_queue(dev);
6979 bp->slowpath->eth_stats.driver_xoff++;
6980 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
6981 netif_wake_queue(dev);
6985 return NETDEV_TX_OK;
6988 /* Called with rtnl_lock */
6989 static int bnx2x_open(struct net_device *dev)
6991 struct bnx2x *bp = netdev_priv(dev);
6993 bnx2x_set_power_state(bp, PCI_D0);
6995 return bnx2x_nic_load(bp, 1);
6998 /* Called with rtnl_lock */
6999 static int bnx2x_close(struct net_device *dev)
7001 struct bnx2x *bp = netdev_priv(dev);
7003 /* Unload the driver, release IRQs */
7004 bnx2x_nic_unload(bp, 1);
7006 if (!CHIP_REV_IS_SLOW(bp))
7007 bnx2x_set_power_state(bp, PCI_D3hot);
7012 /* Called with rtnl_lock */
7013 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
7015 struct sockaddr *addr = p;
7016 struct bnx2x *bp = netdev_priv(dev);
7018 if (!is_valid_ether_addr(addr->sa_data))
7021 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7022 if (netif_running(dev))
7023 bnx2x_set_mac_addr(bp);
7028 /* called with rtnl_lock */
7029 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7031 struct mii_ioctl_data *data = if_mii(ifr);
7032 struct bnx2x *bp = netdev_priv(dev);
7037 data->phy_id = bp->phy_addr;
7044 if (!netif_running(dev))
7047 mutex_lock(&bp->phy_mutex);
7048 err = bnx2x_cl45_read(bp, bp->port, 0, bp->phy_addr,
7049 DEFAULT_PHY_DEV_ADDR,
7050 (data->reg_num & 0x1f), &mii_regval);
7051 data->val_out = mii_regval;
7052 mutex_unlock(&bp->phy_mutex);
7057 if (!capable(CAP_NET_ADMIN))
7060 if (!netif_running(dev))
7063 mutex_lock(&bp->phy_mutex);
7064 err = bnx2x_cl45_write(bp, bp->port, 0, bp->phy_addr,
7065 DEFAULT_PHY_DEV_ADDR,
7066 (data->reg_num & 0x1f), data->val_in);
7067 mutex_unlock(&bp->phy_mutex);
7078 /* Called with rtnl_lock */
7079 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
7081 struct bnx2x *bp = netdev_priv(dev);
7083 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
7084 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
7087 /* This does not race with packet allocation
7088 * because the actual alloc size is
7089 * only updated as part of load
7093 if (netif_running(dev)) {
7094 bnx2x_nic_unload(bp, 0);
7095 bnx2x_nic_load(bp, 0);
7100 static void bnx2x_tx_timeout(struct net_device *dev)
7102 struct bnx2x *bp = netdev_priv(dev);
7104 #ifdef BNX2X_STOP_ON_ERROR
7108 /* This allows the netif to be shutdown gracefully before resetting */
7109 schedule_work(&bp->reset_task);
7113 /* Called with rtnl_lock */
7114 static void bnx2x_vlan_rx_register(struct net_device *dev,
7115 struct vlan_group *vlgrp)
7117 struct bnx2x *bp = netdev_priv(dev);
7120 if (netif_running(dev))
7121 bnx2x_set_client_config(bp);
7125 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7126 static void poll_bnx2x(struct net_device *dev)
7128 struct bnx2x *bp = netdev_priv(dev);
7130 disable_irq(bp->pdev->irq);
7131 bnx2x_interrupt(bp->pdev->irq, dev);
7132 enable_irq(bp->pdev->irq);
7136 static void bnx2x_reset_task(struct work_struct *work)
7138 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7140 #ifdef BNX2X_STOP_ON_ERROR
7141 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7142 " so reset not done to allow debug dump,\n"
7143 KERN_ERR " you will need to reboot when done\n");
7147 if (!netif_running(bp->dev))
7152 if (bp->state != BNX2X_STATE_OPEN) {
7153 DP(NETIF_MSG_TX_ERR, "state is %x, returning\n", bp->state);
7154 goto reset_task_exit;
7157 bnx2x_nic_unload(bp, 0);
7158 bnx2x_nic_load(bp, 0);
7164 static int __devinit bnx2x_init_board(struct pci_dev *pdev,
7165 struct net_device *dev)
7170 SET_NETDEV_DEV(dev, &pdev->dev);
7171 bp = netdev_priv(dev);
7174 bp->port = PCI_FUNC(pdev->devfn);
7176 rc = pci_enable_device(pdev);
7178 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
7182 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7183 printk(KERN_ERR PFX "Cannot find PCI device base address,"
7186 goto err_out_disable;
7189 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7190 printk(KERN_ERR PFX "Cannot find second PCI device"
7191 " base address, aborting\n");
7193 goto err_out_disable;
7196 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7198 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
7200 goto err_out_disable;
7203 pci_set_master(pdev);
7205 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7206 if (bp->pm_cap == 0) {
7207 printk(KERN_ERR PFX "Cannot find power management"
7208 " capability, aborting\n");
7210 goto err_out_release;
7213 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7214 if (bp->pcie_cap == 0) {
7215 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
7218 goto err_out_release;
7221 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
7222 bp->flags |= USING_DAC_FLAG;
7223 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
7224 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
7225 " failed, aborting\n");
7227 goto err_out_release;
7230 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
7231 printk(KERN_ERR PFX "System does not support DMA,"
7234 goto err_out_release;
7240 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7241 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7243 dev->base_addr = pci_resource_start(pdev, 0);
7245 dev->irq = pdev->irq;
7247 bp->regview = ioremap_nocache(dev->base_addr,
7248 pci_resource_len(pdev, 0));
7250 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
7252 goto err_out_release;
7255 bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
7256 pci_resource_len(pdev, 2));
7257 if (!bp->doorbells) {
7258 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
7263 bnx2x_set_power_state(bp, PCI_D0);
7265 bnx2x_get_hwinfo(bp);
7267 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
7268 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
7269 " will only init first device\n");
7275 printk(KERN_ERR PFX "MCP disabled, will only"
7276 " init first device\n");
7280 if (onefunc && bp->port) {
7281 printk(KERN_ERR PFX "Second device disabled, exiting\n");
7286 bp->tx_ring_size = MAX_TX_AVAIL;
7287 bp->rx_ring_size = MAX_RX_AVAIL;
7293 bp->tx_quick_cons_trip_int = 0xff;
7294 bp->tx_quick_cons_trip = 0xff;
7295 bp->tx_ticks_int = 50;
7298 bp->rx_quick_cons_trip_int = 0xff;
7299 bp->rx_quick_cons_trip = 0xff;
7300 bp->rx_ticks_int = 25;
7303 bp->stats_ticks = 1000000 & 0xffff00;
7305 bp->timer_interval = HZ;
7306 bp->current_interval = (poll ? poll : HZ);
7308 init_timer(&bp->timer);
7309 bp->timer.expires = jiffies + bp->current_interval;
7310 bp->timer.data = (unsigned long) bp;
7311 bp->timer.function = bnx2x_timer;
7317 iounmap(bp->regview);
7321 if (bp->doorbells) {
7322 iounmap(bp->doorbells);
7323 bp->doorbells = NULL;
7327 pci_release_regions(pdev);
7330 pci_disable_device(pdev);
7331 pci_set_drvdata(pdev, NULL);
7337 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
7339 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7341 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7345 /* return value of 1=2.5GHz 2=5GHz */
7346 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
7348 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7350 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7354 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7355 const struct pci_device_id *ent)
7357 static int version_printed;
7358 struct net_device *dev = NULL;
7361 int port = PCI_FUNC(pdev->devfn);
7362 DECLARE_MAC_BUF(mac);
7364 if (version_printed++ == 0)
7365 printk(KERN_INFO "%s", version);
7367 /* dev zeroed in init_etherdev */
7368 dev = alloc_etherdev(sizeof(*bp));
7372 netif_carrier_off(dev);
7374 bp = netdev_priv(dev);
7375 bp->msglevel = debug;
7377 if (port && onefunc) {
7378 printk(KERN_ERR PFX "second function disabled. exiting\n");
7383 rc = bnx2x_init_board(pdev, dev);
7389 dev->hard_start_xmit = bnx2x_start_xmit;
7390 dev->watchdog_timeo = TX_TIMEOUT;
7392 dev->ethtool_ops = &bnx2x_ethtool_ops;
7393 dev->open = bnx2x_open;
7394 dev->stop = bnx2x_close;
7395 dev->set_multicast_list = bnx2x_set_rx_mode;
7396 dev->set_mac_address = bnx2x_change_mac_addr;
7397 dev->do_ioctl = bnx2x_ioctl;
7398 dev->change_mtu = bnx2x_change_mtu;
7399 dev->tx_timeout = bnx2x_tx_timeout;
7401 dev->vlan_rx_register = bnx2x_vlan_rx_register;
7403 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7404 dev->poll_controller = poll_bnx2x;
7406 dev->features |= NETIF_F_SG;
7407 if (bp->flags & USING_DAC_FLAG)
7408 dev->features |= NETIF_F_HIGHDMA;
7409 dev->features |= NETIF_F_IP_CSUM;
7411 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7413 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7415 rc = register_netdev(dev);
7417 dev_err(&pdev->dev, "Cannot register net device\n");
7419 iounmap(bp->regview);
7421 iounmap(bp->doorbells);
7422 pci_release_regions(pdev);
7423 pci_disable_device(pdev);
7424 pci_set_drvdata(pdev, NULL);
7429 pci_set_drvdata(pdev, dev);
7431 bp->name = board_info[ent->driver_data].name;
7432 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
7433 " IRQ %d, ", dev->name, bp->name,
7434 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7435 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7436 bnx2x_get_pcie_width(bp),
7437 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
7438 dev->base_addr, bp->pdev->irq);
7439 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
7443 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7445 struct net_device *dev = pci_get_drvdata(pdev);
7449 /* we get here if init_one() fails */
7450 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
7454 bp = netdev_priv(dev);
7456 unregister_netdev(dev);
7459 iounmap(bp->regview);
7462 iounmap(bp->doorbells);
7465 pci_release_regions(pdev);
7466 pci_disable_device(pdev);
7467 pci_set_drvdata(pdev, NULL);
7470 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
7472 struct net_device *dev = pci_get_drvdata(pdev);
7478 if (!netif_running(dev))
7481 bp = netdev_priv(dev);
7483 bnx2x_nic_unload(bp, 0);
7485 netif_device_detach(dev);
7487 pci_save_state(pdev);
7488 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
7493 static int bnx2x_resume(struct pci_dev *pdev)
7495 struct net_device *dev = pci_get_drvdata(pdev);
7500 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
7504 if (!netif_running(dev))
7507 bp = netdev_priv(dev);
7509 pci_restore_state(pdev);
7510 bnx2x_set_power_state(bp, PCI_D0);
7511 netif_device_attach(dev);
7513 rc = bnx2x_nic_load(bp, 0);
7520 static struct pci_driver bnx2x_pci_driver = {
7521 .name = DRV_MODULE_NAME,
7522 .id_table = bnx2x_pci_tbl,
7523 .probe = bnx2x_init_one,
7524 .remove = __devexit_p(bnx2x_remove_one),
7525 .suspend = bnx2x_suspend,
7526 .resume = bnx2x_resume,
7529 static int __init bnx2x_init(void)
7531 return pci_register_driver(&bnx2x_pci_driver);
7534 static void __exit bnx2x_cleanup(void)
7536 pci_unregister_driver(&bnx2x_pci_driver);
7539 module_init(bnx2x_init);
7540 module_exit(bnx2x_cleanup);