]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/bnx2x_main.c
7b547f03b5654df84aad09b4ac73cfacc7c13456
[linux-2.6-omap-h63xx.git] / drivers / net / bnx2x_main.c
1 /* bnx2x.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 /* define this to make the driver freeze on error
19  * to allow getting debug info
20  * (you will need to reboot afterwards)
21  */
22 /*#define BNX2X_STOP_ON_ERROR*/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kernel.h>
27 #include <linux/device.h>  /* for dev_info() */
28 #include <linux/timer.h>
29 #include <linux/errno.h>
30 #include <linux/ioport.h>
31 #include <linux/slab.h>
32 #include <linux/vmalloc.h>
33 #include <linux/interrupt.h>
34 #include <linux/pci.h>
35 #include <linux/init.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/bitops.h>
41 #include <linux/irq.h>
42 #include <linux/delay.h>
43 #include <asm/byteorder.h>
44 #include <linux/time.h>
45 #include <linux/ethtool.h>
46 #include <linux/mii.h>
47 #ifdef NETIF_F_HW_VLAN_TX
48         #include <linux/if_vlan.h>
49         #define BCM_VLAN 1
50 #endif
51 #include <net/ip.h>
52 #include <net/tcp.h>
53 #include <net/checksum.h>
54 #include <linux/workqueue.h>
55 #include <linux/crc32.h>
56 #include <linux/prefetch.h>
57 #include <linux/zlib.h>
58 #include <linux/version.h>
59 #include <linux/io.h>
60
61 #include "bnx2x_reg.h"
62 #include "bnx2x_fw_defs.h"
63 #include "bnx2x_hsi.h"
64 #include "bnx2x_link.h"
65 #include "bnx2x.h"
66 #include "bnx2x_init.h"
67
68 #define DRV_MODULE_VERSION      "1.42.4"
69 #define DRV_MODULE_RELDATE      "2008/4/9"
70 #define BNX2X_BC_VER            0x040200
71
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT              (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
77         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83
84 static int use_inta;
85 static int poll;
86 static int onefunc;
87 static int nomcp;
88 static int debug;
89 static int use_multi;
90
91 module_param(use_inta, int, 0);
92 module_param(poll, int, 0);
93 module_param(onefunc, int, 0);
94 module_param(debug, int, 0);
95 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
96 MODULE_PARM_DESC(poll, "use polling (for debug)");
97 MODULE_PARM_DESC(onefunc, "enable only first function");
98 MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
99 MODULE_PARM_DESC(debug, "default debug msglevel");
100
101 #ifdef BNX2X_MULTI
102 module_param(use_multi, int, 0);
103 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
104 #endif
105
106 enum bnx2x_board_type {
107         BCM57710 = 0,
108 };
109
110 /* indexed by board_t, above */
111 static struct {
112         char *name;
113 } board_info[] __devinitdata = {
114         { "Broadcom NetXtreme II BCM57710 XGb" }
115 };
116
117 static const struct pci_device_id bnx2x_pci_tbl[] = {
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
119                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
120         { 0 }
121 };
122
123 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
124
125 /****************************************************************************
126 * General service functions
127 ****************************************************************************/
128
129 /* used only at init
130  * locking is done by mcp
131  */
132 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
133 {
134         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
137                                PCICFG_VENDOR_ID_OFFSET);
138 }
139
140 #ifdef BNX2X_IND_RD
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152 #endif
153
154 static const u32 dmae_reg_go_c[] = {
155         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
156         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
157         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
158         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
159 };
160
161 /* copy command into DMAE command memory and set DMAE command go */
162 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
163                             int idx)
164 {
165         u32 cmd_offset;
166         int i;
167
168         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
169         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
170                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171
172 /*              DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
174         }
175         REG_WR(bp, dmae_reg_go_c[idx], 1);
176 }
177
178 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
179                              u32 dst_addr, u32 len32)
180 {
181         struct dmae_command *dmae = &bp->dmae;
182         int port = bp->port;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int timeout = 200;
185
186         memset(dmae, 0, sizeof(struct dmae_command));
187
188         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
189                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
190                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
191 #ifdef __BIG_ENDIAN
192                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
193 #else
194                         DMAE_CMD_ENDIANITY_DW_SWAP |
195 #endif
196                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
197         dmae->src_addr_lo = U64_LO(dma_addr);
198         dmae->src_addr_hi = U64_HI(dma_addr);
199         dmae->dst_addr_lo = dst_addr >> 2;
200         dmae->dst_addr_hi = 0;
201         dmae->len = len32;
202         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
203         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
204         dmae->comp_val = BNX2X_WB_COMP_VAL;
205
206 /*
207         DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
208            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
209                     "dst_addr [%x:%08x (%08x)]\n"
210            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
211            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
212            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
213            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
214 */
215 /*
216         DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
217            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
218            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
219 */
220
221         *wb_comp = 0;
222
223         bnx2x_post_dmae(bp, dmae, port * 8);
224
225         udelay(5);
226         /* adjust timeout for emulation/FPGA */
227         if (CHIP_REV_IS_SLOW(bp))
228                 timeout *= 100;
229         while (*wb_comp != BNX2X_WB_COMP_VAL) {
230 /*              DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
231                 udelay(5);
232                 if (!timeout) {
233                         BNX2X_ERR("dmae timeout!\n");
234                         break;
235                 }
236                 timeout--;
237         }
238 }
239
240 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
241 {
242         struct dmae_command *dmae = &bp->dmae;
243         int port = bp->port;
244         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
245         int timeout = 200;
246
247         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248         memset(dmae, 0, sizeof(struct dmae_command));
249
250         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
253 #ifdef __BIG_ENDIAN
254                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
255 #else
256                         DMAE_CMD_ENDIANITY_DW_SWAP |
257 #endif
258                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259         dmae->src_addr_lo = src_addr >> 2;
260         dmae->src_addr_hi = 0;
261         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
263         dmae->len = len32;
264         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266         dmae->comp_val = BNX2X_WB_COMP_VAL;
267
268 /*
269         DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
271                     "dst_addr [%x:%08x (%08x)]\n"
272            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
273            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
276 */
277
278         *wb_comp = 0;
279
280         bnx2x_post_dmae(bp, dmae, port * 8);
281
282         udelay(5);
283         while (*wb_comp != BNX2X_WB_COMP_VAL) {
284                 udelay(5);
285                 if (!timeout) {
286                         BNX2X_ERR("dmae timeout!\n");
287                         break;
288                 }
289                 timeout--;
290         }
291 /*
292         DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
295 */
296 }
297
298 static int bnx2x_mc_assert(struct bnx2x *bp)
299 {
300         int i, j, rc = 0;
301         char last_idx;
302         const char storm[] = {"XTCU"};
303         const u32 intmem_base[] = {
304                 BAR_XSTRORM_INTMEM,
305                 BAR_TSTRORM_INTMEM,
306                 BAR_CSTRORM_INTMEM,
307                 BAR_USTRORM_INTMEM
308         };
309
310         /* Go through all instances of all SEMIs */
311         for (i = 0; i < 4; i++) {
312                 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
313                                    intmem_base[i]);
314                 if (last_idx)
315                         BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
316                                   storm[i], last_idx);
317
318                 /* print the asserts */
319                 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
320                         u32 row0, row1, row2, row3;
321
322                         row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
323                                       intmem_base[i]);
324                         row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
325                                       intmem_base[i]);
326                         row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
327                                       intmem_base[i]);
328                         row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
329                                       intmem_base[i]);
330
331                         if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
332                                 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
333                                           " 0x%08x 0x%08x 0x%08x 0x%08x\n",
334                                           storm[i], j, row3, row2, row1, row0);
335                                 rc++;
336                         } else {
337                                 break;
338                         }
339                 }
340         }
341         return rc;
342 }
343
344 static void bnx2x_fw_dump(struct bnx2x *bp)
345 {
346         u32 mark, offset;
347         u32 data[9];
348         int word;
349
350         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
351         mark = ((mark + 0x3) & ~0x3);
352         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
353
354         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
355                 for (word = 0; word < 8; word++)
356                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
357                                                   offset + 4*word));
358                 data[8] = 0x0;
359                 printk(KERN_CONT "%s", (char *)data);
360         }
361         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
362                 for (word = 0; word < 8; word++)
363                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
364                                                   offset + 4*word));
365                 data[8] = 0x0;
366                 printk(KERN_CONT "%s", (char *)data);
367         }
368         printk("\n" KERN_ERR PFX "end of fw dump\n");
369 }
370
371 static void bnx2x_panic_dump(struct bnx2x *bp)
372 {
373         int i;
374         u16 j, start, end;
375
376         BNX2X_ERR("begin crash dump -----------------\n");
377
378         for_each_queue(bp, i) {
379                 struct bnx2x_fastpath *fp = &bp->fp[i];
380                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
381
382                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
383                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)"
384                           "  *rx_cons_sb(%x)  rx_comp_prod(%x)"
385                           "  rx_comp_cons(%x)  fp_c_idx(%x)  fp_u_idx(%x)"
386                           "  bd data(%x,%x)\n",
387                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
388                           fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
389                           fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
390                           fp->fp_u_idx, hw_prods->packets_prod,
391                           hw_prods->bds_prod);
392
393                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
394                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
395                 for (j = start; j < end; j++) {
396                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
397
398                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
399                                   sw_bd->skb, sw_bd->first_bd);
400                 }
401
402                 start = TX_BD(fp->tx_bd_cons - 10);
403                 end = TX_BD(fp->tx_bd_cons + 254);
404                 for (j = start; j < end; j++) {
405                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
406
407                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
408                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
409                 }
410
411                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
412                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
413                 for (j = start; j < end; j++) {
414                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
415                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
416
417                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
418                                   j, rx_bd[0], rx_bd[1], sw_bd->skb);
419                 }
420
421                 start = RCQ_BD(fp->rx_comp_cons - 10);
422                 end = RCQ_BD(fp->rx_comp_cons + 503);
423                 for (j = start; j < end; j++) {
424                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
425
426                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
427                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
428                 }
429         }
430
431         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
432                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
433                   "  spq_prod_idx(%u)\n",
434                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
435                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
436
437
438         bnx2x_mc_assert(bp);
439         BNX2X_ERR("end crash dump -----------------\n");
440
441         bp->stats_state = STATS_STATE_DISABLE;
442         DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
443 }
444
445 static void bnx2x_int_enable(struct bnx2x *bp)
446 {
447         int port = bp->port;
448         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
449         u32 val = REG_RD(bp, addr);
450         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
451
452         if (msix) {
453                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
454                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
455                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
456         } else {
457                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
458                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
459                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
460                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
461
462                 /* Errata A0.158 workaround */
463                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
464                    val, port, addr, msix);
465
466                 REG_WR(bp, addr, val);
467
468                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
469         }
470
471         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
472            val, port, addr, msix);
473
474         REG_WR(bp, addr, val);
475 }
476
477 static void bnx2x_int_disable(struct bnx2x *bp)
478 {
479         int port = bp->port;
480         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
481         u32 val = REG_RD(bp, addr);
482
483         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
484                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
485                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
486                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
487
488         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
489            val, port, addr);
490
491         REG_WR(bp, addr, val);
492         if (REG_RD(bp, addr) != val)
493                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
494 }
495
496 static void bnx2x_int_disable_sync(struct bnx2x *bp)
497 {
498
499         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
500         int i;
501
502         atomic_inc(&bp->intr_sem);
503         /* prevent the HW from sending interrupts */
504         bnx2x_int_disable(bp);
505
506         /* make sure all ISRs are done */
507         if (msix) {
508                 for_each_queue(bp, i)
509                         synchronize_irq(bp->msix_table[i].vector);
510
511                 /* one more for the Slow Path IRQ */
512                 synchronize_irq(bp->msix_table[i].vector);
513         } else
514                 synchronize_irq(bp->pdev->irq);
515
516         /* make sure sp_task is not running */
517         cancel_work_sync(&bp->sp_task);
518
519 }
520
521 /* fast path code */
522
523 /*
524  * general service functions
525  */
526
527 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
528                                 u8 storm, u16 index, u8 op, u8 update)
529 {
530         u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
531         struct igu_ack_register igu_ack;
532
533         igu_ack.status_block_index = index;
534         igu_ack.sb_id_and_flags =
535                         ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
536                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
537                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
538                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
539
540 /*      DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
541            (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
542         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
543 }
544
545 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
546 {
547         struct host_status_block *fpsb = fp->status_blk;
548         u16 rc = 0;
549
550         barrier(); /* status block is written to by the chip */
551         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
552                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
553                 rc |= 1;
554         }
555         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
556                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
557                 rc |= 2;
558         }
559         return rc;
560 }
561
562 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
563 {
564         u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
565
566         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
567                 rx_cons_sb++;
568
569         if ((rx_cons_sb != fp->rx_comp_cons) ||
570             (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
571                 return 1;
572
573         return 0;
574 }
575
576 static u16 bnx2x_ack_int(struct bnx2x *bp)
577 {
578         u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
579         u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
580
581 /*      DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
582            result, BAR_IGU_INTMEM + igu_addr); */
583
584 #ifdef IGU_DEBUG
585 #warning IGU_DEBUG active
586         if (result == 0) {
587                 BNX2X_ERR("read %x from IGU\n", result);
588                 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
589         }
590 #endif
591         return result;
592 }
593
594
595 /*
596  * fast path service functions
597  */
598
599 /* free skb in the packet ring at pos idx
600  * return idx of last bd freed
601  */
602 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
603                              u16 idx)
604 {
605         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
606         struct eth_tx_bd *tx_bd;
607         struct sk_buff *skb = tx_buf->skb;
608         u16 bd_idx = tx_buf->first_bd;
609         int nbd;
610
611         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
612            idx, tx_buf, skb);
613
614         /* unmap first bd */
615         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
616         tx_bd = &fp->tx_desc_ring[bd_idx];
617         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
618                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
619
620         nbd = le16_to_cpu(tx_bd->nbd) - 1;
621 #ifdef BNX2X_STOP_ON_ERROR
622         if (nbd > (MAX_SKB_FRAGS + 2)) {
623                 BNX2X_ERR("bad nbd!\n");
624                 bnx2x_panic();
625         }
626 #endif
627
628         /* Skip a parse bd and the TSO split header bd
629            since they have no mapping */
630         if (nbd)
631                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
632
633         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
634                                            ETH_TX_BD_FLAGS_TCP_CSUM |
635                                            ETH_TX_BD_FLAGS_SW_LSO)) {
636                 if (--nbd)
637                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
638                 tx_bd = &fp->tx_desc_ring[bd_idx];
639                 /* is this a TSO split header bd? */
640                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
641                         if (--nbd)
642                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
643                 }
644         }
645
646         /* now free frags */
647         while (nbd > 0) {
648
649                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
650                 tx_bd = &fp->tx_desc_ring[bd_idx];
651                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
652                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
653                 if (--nbd)
654                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
655         }
656
657         /* release skb */
658         BUG_TRAP(skb);
659         dev_kfree_skb(skb);
660         tx_buf->first_bd = 0;
661         tx_buf->skb = NULL;
662
663         return bd_idx;
664 }
665
666 static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
667 {
668         u16 used;
669         u32 prod;
670         u32 cons;
671
672         /* Tell compiler that prod and cons can change */
673         barrier();
674         prod = fp->tx_bd_prod;
675         cons = fp->tx_bd_cons;
676
677         used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
678                 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
679
680         if (prod >= cons) {
681                 /* used = prod - cons - prod/size + cons/size */
682                 used -= NUM_TX_BD - NUM_TX_RINGS;
683         }
684
685         BUG_TRAP(used <= fp->bp->tx_ring_size);
686         BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
687
688         return (fp->bp->tx_ring_size - used);
689 }
690
691 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
692 {
693         struct bnx2x *bp = fp->bp;
694         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
695         int done = 0;
696
697 #ifdef BNX2X_STOP_ON_ERROR
698         if (unlikely(bp->panic))
699                 return;
700 #endif
701
702         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
703         sw_cons = fp->tx_pkt_cons;
704
705         while (sw_cons != hw_cons) {
706                 u16 pkt_cons;
707
708                 pkt_cons = TX_BD(sw_cons);
709
710                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
711
712                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %d\n",
713                    hw_cons, sw_cons, pkt_cons);
714
715 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
716                         rmb();
717                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
718                 }
719 */
720                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
721                 sw_cons++;
722                 done++;
723
724                 if (done == work)
725                         break;
726         }
727
728         fp->tx_pkt_cons = sw_cons;
729         fp->tx_bd_cons = bd_cons;
730
731         /* Need to make the tx_cons update visible to start_xmit()
732          * before checking for netif_queue_stopped().  Without the
733          * memory barrier, there is a small possibility that start_xmit()
734          * will miss it and cause the queue to be stopped forever.
735          */
736         smp_mb();
737
738         /* TBD need a thresh? */
739         if (unlikely(netif_queue_stopped(bp->dev))) {
740
741                 netif_tx_lock(bp->dev);
742
743                 if (netif_queue_stopped(bp->dev) &&
744                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
745                         netif_wake_queue(bp->dev);
746
747                 netif_tx_unlock(bp->dev);
748
749         }
750 }
751
752 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
753                            union eth_rx_cqe *rr_cqe)
754 {
755         struct bnx2x *bp = fp->bp;
756         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
757         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
758
759         DP(NETIF_MSG_RX_STATUS,
760            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
761            fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
762
763         bp->spq_left++;
764
765         if (fp->index) {
766                 switch (command | fp->state) {
767                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
768                                                 BNX2X_FP_STATE_OPENING):
769                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
770                            cid);
771                         fp->state = BNX2X_FP_STATE_OPEN;
772                         break;
773
774                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
775                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
776                            cid);
777                         fp->state = BNX2X_FP_STATE_HALTED;
778                         break;
779
780                 default:
781                         BNX2X_ERR("unexpected MC reply(%d)  state is %x\n",
782                                   command, fp->state);
783                 }
784                 mb(); /* force bnx2x_wait_ramrod to see the change */
785                 return;
786         }
787
788         switch (command | bp->state) {
789         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
790                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
791                 bp->state = BNX2X_STATE_OPEN;
792                 break;
793
794         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
795                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
796                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
797                 fp->state = BNX2X_FP_STATE_HALTED;
798                 break;
799
800         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
801                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
802                    cid);
803                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
804                 break;
805
806         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
807                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
808                 break;
809
810         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
811                 DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
812                 break;
813
814         default:
815                 BNX2X_ERR("unexpected ramrod (%d)  state is %x\n",
816                           command, bp->state);
817         }
818
819         mb(); /* force bnx2x_wait_ramrod to see the change */
820 }
821
822 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
823                                      struct bnx2x_fastpath *fp, u16 index)
824 {
825         struct sk_buff *skb;
826         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
827         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
828         dma_addr_t mapping;
829
830         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
831         if (unlikely(skb == NULL))
832                 return -ENOMEM;
833
834         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
835                                  PCI_DMA_FROMDEVICE);
836         if (unlikely(dma_mapping_error(mapping))) {
837
838                 dev_kfree_skb(skb);
839                 return -ENOMEM;
840         }
841
842         rx_buf->skb = skb;
843         pci_unmap_addr_set(rx_buf, mapping, mapping);
844
845         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
846         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
847
848         return 0;
849 }
850
851 /* note that we are not allocating a new skb,
852  * we are just moving one from cons to prod
853  * we are not creating a new mapping,
854  * so there is no need to check for dma_mapping_error().
855  */
856 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
857                                struct sk_buff *skb, u16 cons, u16 prod)
858 {
859         struct bnx2x *bp = fp->bp;
860         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
861         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
862         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
863         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
864
865         pci_dma_sync_single_for_device(bp->pdev,
866                                        pci_unmap_addr(cons_rx_buf, mapping),
867                                        bp->rx_offset + RX_COPY_THRESH,
868                                        PCI_DMA_FROMDEVICE);
869
870         prod_rx_buf->skb = cons_rx_buf->skb;
871         pci_unmap_addr_set(prod_rx_buf, mapping,
872                            pci_unmap_addr(cons_rx_buf, mapping));
873         *prod_bd = *cons_bd;
874 }
875
876 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
877 {
878         struct bnx2x *bp = fp->bp;
879         u16 bd_cons, bd_prod, comp_ring_cons;
880         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
881         int rx_pkt = 0;
882
883 #ifdef BNX2X_STOP_ON_ERROR
884         if (unlikely(bp->panic))
885                 return 0;
886 #endif
887
888         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
889         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
890                 hw_comp_cons++;
891
892         bd_cons = fp->rx_bd_cons;
893         bd_prod = fp->rx_bd_prod;
894         sw_comp_cons = fp->rx_comp_cons;
895         sw_comp_prod = fp->rx_comp_prod;
896
897         /* Memory barrier necessary as speculative reads of the rx
898          * buffer can be ahead of the index in the status block
899          */
900         rmb();
901
902         DP(NETIF_MSG_RX_STATUS,
903            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
904            fp->index, hw_comp_cons, sw_comp_cons);
905
906         while (sw_comp_cons != hw_comp_cons) {
907                 unsigned int len, pad;
908                 struct sw_rx_bd *rx_buf;
909                 struct sk_buff *skb;
910                 union eth_rx_cqe *cqe;
911
912                 comp_ring_cons = RCQ_BD(sw_comp_cons);
913                 bd_prod = RX_BD(bd_prod);
914                 bd_cons = RX_BD(bd_cons);
915
916                 cqe = &fp->rx_comp_ring[comp_ring_cons];
917
918                 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u  sw_comp_cons %u"
919                    "  comp_ring (%u)  bd_ring (%u,%u)\n",
920                    hw_comp_cons, sw_comp_cons,
921                    comp_ring_cons, bd_prod, bd_cons);
922                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
923                    "  queue %x  vlan %x  len %x\n",
924                    cqe->fast_path_cqe.type,
925                    cqe->fast_path_cqe.error_type_flags,
926                    cqe->fast_path_cqe.status_flags,
927                    cqe->fast_path_cqe.rss_hash_result,
928                    cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
929
930                 /* is this a slowpath msg? */
931                 if (unlikely(cqe->fast_path_cqe.type)) {
932                         bnx2x_sp_event(fp, cqe);
933                         goto next_cqe;
934
935                 /* this is an rx packet */
936                 } else {
937                         rx_buf = &fp->rx_buf_ring[bd_cons];
938                         skb = rx_buf->skb;
939
940                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
941                         pad = cqe->fast_path_cqe.placement_offset;
942
943                         pci_dma_sync_single_for_device(bp->pdev,
944                                         pci_unmap_addr(rx_buf, mapping),
945                                                        pad + RX_COPY_THRESH,
946                                                        PCI_DMA_FROMDEVICE);
947                         prefetch(skb);
948                         prefetch(((char *)(skb)) + 128);
949
950                         /* is this an error packet? */
951                         if (unlikely(cqe->fast_path_cqe.error_type_flags &
952                                                         ETH_RX_ERROR_FALGS)) {
953                         /* do we sometimes forward error packets anyway? */
954                                 DP(NETIF_MSG_RX_ERR,
955                                    "ERROR flags(%u) Rx packet(%u)\n",
956                                    cqe->fast_path_cqe.error_type_flags,
957                                    sw_comp_cons);
958                                 /* TBD make sure MC counts this as a drop */
959                                 goto reuse_rx;
960                         }
961
962                         /* Since we don't have a jumbo ring
963                          * copy small packets if mtu > 1500
964                          */
965                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
966                             (len <= RX_COPY_THRESH)) {
967                                 struct sk_buff *new_skb;
968
969                                 new_skb = netdev_alloc_skb(bp->dev,
970                                                            len + pad);
971                                 if (new_skb == NULL) {
972                                         DP(NETIF_MSG_RX_ERR,
973                                            "ERROR packet dropped "
974                                            "because of alloc failure\n");
975                                         /* TBD count this as a drop? */
976                                         goto reuse_rx;
977                                 }
978
979                                 /* aligned copy */
980                                 skb_copy_from_linear_data_offset(skb, pad,
981                                                     new_skb->data + pad, len);
982                                 skb_reserve(new_skb, pad);
983                                 skb_put(new_skb, len);
984
985                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
986
987                                 skb = new_skb;
988
989                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
990                                 pci_unmap_single(bp->pdev,
991                                         pci_unmap_addr(rx_buf, mapping),
992                                                  bp->rx_buf_use_size,
993                                                  PCI_DMA_FROMDEVICE);
994                                 skb_reserve(skb, pad);
995                                 skb_put(skb, len);
996
997                         } else {
998                                 DP(NETIF_MSG_RX_ERR,
999                                    "ERROR packet dropped because "
1000                                    "of alloc failure\n");
1001 reuse_rx:
1002                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1003                                 goto next_rx;
1004                         }
1005
1006                         skb->protocol = eth_type_trans(skb, bp->dev);
1007
1008                         skb->ip_summed = CHECKSUM_NONE;
1009                         if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1010                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1011
1012                         /* TBD do we pass bad csum packets in promisc */
1013                 }
1014
1015 #ifdef BCM_VLAN
1016                 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1017                                 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1018                     && (bp->vlgrp != NULL))
1019                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1020                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1021                 else
1022 #endif
1023                 netif_receive_skb(skb);
1024
1025                 bp->dev->last_rx = jiffies;
1026
1027 next_rx:
1028                 rx_buf->skb = NULL;
1029
1030                 bd_cons = NEXT_RX_IDX(bd_cons);
1031                 bd_prod = NEXT_RX_IDX(bd_prod);
1032 next_cqe:
1033                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1034                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1035                 rx_pkt++;
1036
1037                 if ((rx_pkt == budget))
1038                         break;
1039         } /* while */
1040
1041         fp->rx_bd_cons = bd_cons;
1042         fp->rx_bd_prod = bd_prod;
1043         fp->rx_comp_cons = sw_comp_cons;
1044         fp->rx_comp_prod = sw_comp_prod;
1045
1046         REG_WR(bp, BAR_TSTRORM_INTMEM +
1047                TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1048
1049         mmiowb(); /* keep prod updates ordered */
1050
1051         fp->rx_pkt += rx_pkt;
1052         fp->rx_calls++;
1053
1054         return rx_pkt;
1055 }
1056
1057 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1058 {
1059         struct bnx2x_fastpath *fp = fp_cookie;
1060         struct bnx2x *bp = fp->bp;
1061         struct net_device *dev = bp->dev;
1062         int index = fp->index;
1063
1064         DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1065         bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1066
1067 #ifdef BNX2X_STOP_ON_ERROR
1068         if (unlikely(bp->panic))
1069                 return IRQ_HANDLED;
1070 #endif
1071
1072         prefetch(fp->rx_cons_sb);
1073         prefetch(fp->tx_cons_sb);
1074         prefetch(&fp->status_blk->c_status_block.status_block_index);
1075         prefetch(&fp->status_blk->u_status_block.status_block_index);
1076
1077         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1078         return IRQ_HANDLED;
1079 }
1080
1081 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1082 {
1083         struct net_device *dev = dev_instance;
1084         struct bnx2x *bp = netdev_priv(dev);
1085         u16 status = bnx2x_ack_int(bp);
1086
1087         if (unlikely(status == 0)) {
1088                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1089                 return IRQ_NONE;
1090         }
1091
1092         DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1093
1094 #ifdef BNX2X_STOP_ON_ERROR
1095         if (unlikely(bp->panic))
1096                 return IRQ_HANDLED;
1097 #endif
1098
1099         /* Return here if interrupt is shared and is disabled */
1100         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1101                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1102                 return IRQ_HANDLED;
1103         }
1104
1105         if (status & 0x2) {
1106                 struct bnx2x_fastpath *fp = &bp->fp[0];
1107
1108                 prefetch(fp->rx_cons_sb);
1109                 prefetch(fp->tx_cons_sb);
1110                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1111                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1112
1113                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1114
1115                 status &= ~0x2;
1116                 if (!status)
1117                         return IRQ_HANDLED;
1118         }
1119
1120         if (unlikely(status & 0x1)) {
1121
1122                 schedule_work(&bp->sp_task);
1123
1124                 status &= ~0x1;
1125                 if (!status)
1126                         return IRQ_HANDLED;
1127         }
1128
1129         DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1130            status);
1131
1132         return IRQ_HANDLED;
1133 }
1134
1135 /* end of fast path */
1136
1137
1138 /* Link */
1139
1140 /*
1141  * General service functions
1142  */
1143
1144 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1145 {
1146         u32 lock_status;
1147         u32 resource_bit = (1 << resource);
1148         u8 port = bp->port;
1149         int cnt;
1150
1151         /* Validating that the resource is within range */
1152         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1153                 DP(NETIF_MSG_HW,
1154                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1155                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1156                 return -EINVAL;
1157         }
1158
1159         /* Validating that the resource is not already taken */
1160         lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1161         if (lock_status & resource_bit) {
1162                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1163                    lock_status, resource_bit);
1164                 return -EEXIST;
1165         }
1166
1167         /* Try for 1 second every 5ms */
1168         for (cnt = 0; cnt < 200; cnt++) {
1169                 /* Try to acquire the lock */
1170                 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1171                        resource_bit);
1172                 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1173                 if (lock_status & resource_bit)
1174                         return 0;
1175
1176                 msleep(5);
1177         }
1178         DP(NETIF_MSG_HW, "Timeout\n");
1179         return -EAGAIN;
1180 }
1181
1182 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1183 {
1184         u32 lock_status;
1185         u32 resource_bit = (1 << resource);
1186         u8 port = bp->port;
1187
1188         /* Validating that the resource is within range */
1189         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1190                 DP(NETIF_MSG_HW,
1191                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1192                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1193                 return -EINVAL;
1194         }
1195
1196         /* Validating that the resource is currently taken */
1197         lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1198         if (!(lock_status & resource_bit)) {
1199                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1200                    lock_status, resource_bit);
1201                 return -EFAULT;
1202         }
1203
1204         REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1205         return 0;
1206 }
1207
1208 /* HW Lock for shared dual port PHYs */
1209 static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1210 {
1211         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1212
1213         mutex_lock(&bp->phy_mutex);
1214
1215         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1216             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1217                 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1218 }
1219
1220 static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1221 {
1222         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1223
1224         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1225             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1226                 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1227
1228         mutex_unlock(&bp->phy_mutex);
1229 }
1230
1231 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1232 {
1233         /* The GPIO should be swapped if swap register is set and active */
1234         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1235                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1236         int gpio_shift = gpio_num +
1237                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1238         u32 gpio_mask = (1 << gpio_shift);
1239         u32 gpio_reg;
1240
1241         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1242                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1243                 return -EINVAL;
1244         }
1245
1246         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1247         /* read GPIO and mask except the float bits */
1248         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1249
1250         switch (mode) {
1251         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1252                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1253                    gpio_num, gpio_shift);
1254                 /* clear FLOAT and set CLR */
1255                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1256                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1257                 break;
1258
1259         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1260                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1261                    gpio_num, gpio_shift);
1262                 /* clear FLOAT and set SET */
1263                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1264                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1265                 break;
1266
1267         case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1268                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1269                    gpio_num, gpio_shift);
1270                 /* set FLOAT */
1271                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1272                 break;
1273
1274         default:
1275                 break;
1276         }
1277
1278         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1279         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1280
1281         return 0;
1282 }
1283
1284 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1285 {
1286         u32 spio_mask = (1 << spio_num);
1287         u32 spio_reg;
1288
1289         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1290             (spio_num > MISC_REGISTERS_SPIO_7)) {
1291                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1292                 return -EINVAL;
1293         }
1294
1295         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1296         /* read SPIO and mask except the float bits */
1297         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1298
1299         switch (mode) {
1300         case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1301                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1302                 /* clear FLOAT and set CLR */
1303                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1304                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1305                 break;
1306
1307         case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1308                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1309                 /* clear FLOAT and set SET */
1310                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1311                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1312                 break;
1313
1314         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1315                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1316                 /* set FLOAT */
1317                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1318                 break;
1319
1320         default:
1321                 break;
1322         }
1323
1324         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1325         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1326
1327         return 0;
1328 }
1329
1330 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1331 {
1332         switch (bp->link_vars.ieee_fc) {
1333         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1334                 bp->advertising &= ~(ADVERTISED_Asym_Pause |
1335                                           ADVERTISED_Pause);
1336                 break;
1337         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1338                 bp->advertising |= (ADVERTISED_Asym_Pause |
1339                                          ADVERTISED_Pause);
1340                 break;
1341         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1342                 bp->advertising |= ADVERTISED_Asym_Pause;
1343                 break;
1344         default:
1345                 bp->advertising &= ~(ADVERTISED_Asym_Pause |
1346                                           ADVERTISED_Pause);
1347                 break;
1348         }
1349 }
1350
1351 static void bnx2x_link_report(struct bnx2x *bp)
1352 {
1353         if (bp->link_vars.link_up) {
1354                 if (bp->state == BNX2X_STATE_OPEN)
1355                         netif_carrier_on(bp->dev);
1356                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1357
1358                 printk("%d Mbps ", bp->link_vars.line_speed);
1359
1360                 if (bp->link_vars.duplex == DUPLEX_FULL)
1361                         printk("full duplex");
1362                 else
1363                         printk("half duplex");
1364
1365                 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1366                         if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1367                                 printk(", receive ");
1368                                 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1369                                         printk("& transmit ");
1370                         } else {
1371                                 printk(", transmit ");
1372                         }
1373                         printk("flow control ON");
1374                 }
1375                 printk("\n");
1376
1377         } else { /* link_down */
1378                 netif_carrier_off(bp->dev);
1379                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1380         }
1381 }
1382
1383 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1384 {
1385         u8 rc;
1386
1387         /* Initialize link parameters structure variables */
1388         bp->link_params.mtu = bp->dev->mtu;
1389
1390         bnx2x_phy_hw_lock(bp);
1391         rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1392         bnx2x_phy_hw_unlock(bp);
1393
1394         if (bp->link_vars.link_up)
1395                 bnx2x_link_report(bp);
1396
1397         bnx2x_calc_fc_adv(bp);
1398         return rc;
1399 }
1400
1401 static void bnx2x_link_set(struct bnx2x *bp)
1402 {
1403         bnx2x_phy_hw_lock(bp);
1404         bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1405         bnx2x_phy_hw_unlock(bp);
1406
1407         bnx2x_calc_fc_adv(bp);
1408 }
1409
1410 static void bnx2x__link_reset(struct bnx2x *bp)
1411 {
1412         bnx2x_phy_hw_lock(bp);
1413         bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1414         bnx2x_phy_hw_unlock(bp);
1415 }
1416
1417 static u8 bnx2x_link_test(struct bnx2x *bp)
1418 {
1419         u8 rc;
1420
1421         bnx2x_phy_hw_lock(bp);
1422         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1423         bnx2x_phy_hw_unlock(bp);
1424
1425         return rc;
1426 }
1427
1428 /* This function is called upon link interrupt */
1429 static void bnx2x_link_attn(struct bnx2x *bp)
1430 {
1431         bnx2x_phy_hw_lock(bp);
1432         bnx2x_link_update(&bp->link_params, &bp->link_vars);
1433         bnx2x_phy_hw_unlock(bp);
1434
1435         /* indicate link status */
1436         bnx2x_link_report(bp);
1437 }
1438
1439 static void bnx2x__link_status_update(struct bnx2x *bp)
1440 {
1441         if (bp->state != BNX2X_STATE_OPEN)
1442                 return;
1443
1444         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1445
1446         /* indicate link status */
1447         bnx2x_link_report(bp);
1448 }
1449
1450 /* end of Link */
1451
1452 /* slow path */
1453
1454 /*
1455  * General service functions
1456  */
1457
1458 /* the slow path queue is odd since completions arrive on the fastpath ring */
1459 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1460                          u32 data_hi, u32 data_lo, int common)
1461 {
1462         int port = bp->port;
1463
1464         DP(NETIF_MSG_TIMER,
1465            "spe (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
1466            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
1467            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1468            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1469
1470 #ifdef BNX2X_STOP_ON_ERROR
1471         if (unlikely(bp->panic))
1472                 return -EIO;
1473 #endif
1474
1475         spin_lock(&bp->spq_lock);
1476
1477         if (!bp->spq_left) {
1478                 BNX2X_ERR("BUG! SPQ ring full!\n");
1479                 spin_unlock(&bp->spq_lock);
1480                 bnx2x_panic();
1481                 return -EBUSY;
1482         }
1483
1484         /* CID needs port number to be encoded int it */
1485         bp->spq_prod_bd->hdr.conn_and_cmd_data =
1486                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
1487                                      HW_CID(bp, cid)));
1488         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1489         if (common)
1490                 bp->spq_prod_bd->hdr.type |=
1491                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1492
1493         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1494         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1495
1496         bp->spq_left--;
1497
1498         if (bp->spq_prod_bd == bp->spq_last_bd) {
1499                 bp->spq_prod_bd = bp->spq;
1500                 bp->spq_prod_idx = 0;
1501                 DP(NETIF_MSG_TIMER, "end of spq\n");
1502
1503         } else {
1504                 bp->spq_prod_bd++;
1505                 bp->spq_prod_idx++;
1506         }
1507
1508         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
1509                bp->spq_prod_idx);
1510
1511         spin_unlock(&bp->spq_lock);
1512         return 0;
1513 }
1514
1515 /* acquire split MCP access lock register */
1516 static int bnx2x_lock_alr(struct bnx2x *bp)
1517 {
1518         int rc = 0;
1519         u32 i, j, val;
1520
1521         might_sleep();
1522         i = 100;
1523         for (j = 0; j < i*10; j++) {
1524                 val = (1UL << 31);
1525                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1526                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1527                 if (val & (1L << 31))
1528                         break;
1529
1530                 msleep(5);
1531         }
1532
1533         if (!(val & (1L << 31))) {
1534                 BNX2X_ERR("Cannot acquire nvram interface\n");
1535
1536                 rc = -EBUSY;
1537         }
1538
1539         return rc;
1540 }
1541
1542 /* Release split MCP access lock register */
1543 static void bnx2x_unlock_alr(struct bnx2x *bp)
1544 {
1545         u32 val = 0;
1546
1547         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1548 }
1549
1550 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1551 {
1552         struct host_def_status_block *def_sb = bp->def_status_blk;
1553         u16 rc = 0;
1554
1555         barrier(); /* status block is written to by the chip */
1556
1557         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1558                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1559                 rc |= 1;
1560         }
1561         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1562                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1563                 rc |= 2;
1564         }
1565         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1566                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1567                 rc |= 4;
1568         }
1569         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1570                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1571                 rc |= 8;
1572         }
1573         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1574                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1575                 rc |= 16;
1576         }
1577         return rc;
1578 }
1579
1580 /*
1581  * slow path service functions
1582  */
1583
1584 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1585 {
1586         int port = bp->port;
1587         u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
1588         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1589                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
1590         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1591                                        NIG_REG_MASK_INTERRUPT_PORT0;
1592
1593         if (~bp->aeu_mask & (asserted & 0xff))
1594                 BNX2X_ERR("IGU ERROR\n");
1595         if (bp->attn_state & asserted)
1596                 BNX2X_ERR("IGU ERROR\n");
1597
1598         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
1599            bp->aeu_mask, asserted);
1600         bp->aeu_mask &= ~(asserted & 0xff);
1601         DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
1602
1603         REG_WR(bp, aeu_addr, bp->aeu_mask);
1604
1605         bp->attn_state |= asserted;
1606
1607         if (asserted & ATTN_HARD_WIRED_MASK) {
1608                 if (asserted & ATTN_NIG_FOR_FUNC) {
1609
1610                         /* save nig interrupt mask */
1611                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
1612                         REG_WR(bp, nig_int_mask_addr, 0);
1613
1614                         bnx2x_link_attn(bp);
1615
1616                         /* handle unicore attn? */
1617                 }
1618                 if (asserted & ATTN_SW_TIMER_4_FUNC)
1619                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1620
1621                 if (asserted & GPIO_2_FUNC)
1622                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1623
1624                 if (asserted & GPIO_3_FUNC)
1625                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1626
1627                 if (asserted & GPIO_4_FUNC)
1628                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1629
1630                 if (port == 0) {
1631                         if (asserted & ATTN_GENERAL_ATTN_1) {
1632                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1633                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1634                         }
1635                         if (asserted & ATTN_GENERAL_ATTN_2) {
1636                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1637                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1638                         }
1639                         if (asserted & ATTN_GENERAL_ATTN_3) {
1640                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1641                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1642                         }
1643                 } else {
1644                         if (asserted & ATTN_GENERAL_ATTN_4) {
1645                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1646                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1647                         }
1648                         if (asserted & ATTN_GENERAL_ATTN_5) {
1649                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1650                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1651                         }
1652                         if (asserted & ATTN_GENERAL_ATTN_6) {
1653                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1654                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1655                         }
1656                 }
1657
1658         } /* if hardwired */
1659
1660         DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
1661            asserted, BAR_IGU_INTMEM + igu_addr);
1662         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
1663
1664         /* now set back the mask */
1665         if (asserted & ATTN_NIG_FOR_FUNC)
1666                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
1667 }
1668
1669 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1670 {
1671         int port = bp->port;
1672         int reg_offset;
1673         u32 val;
1674
1675         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1676
1677                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1678                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1679
1680                 val = REG_RD(bp, reg_offset);
1681                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1682                 REG_WR(bp, reg_offset, val);
1683
1684                 BNX2X_ERR("SPIO5 hw attention\n");
1685
1686                 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
1687                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
1688                         /* Fan failure attention */
1689
1690                         /* The PHY reset is controled by GPIO 1 */
1691                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1692                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
1693                         /* Low power mode is controled by GPIO 2 */
1694                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1695                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
1696                         /* mark the failure */
1697                         bp->link_params.ext_phy_config &=
1698                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1699                         bp->link_params.ext_phy_config |=
1700                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1701                         SHMEM_WR(bp,
1702                                  dev_info.port_hw_config[port].
1703                                                         external_phy_config,
1704                                  bp->link_params.ext_phy_config);
1705                         /* log the failure */
1706                         printk(KERN_ERR PFX "Fan Failure on Network"
1707                                " Controller %s has caused the driver to"
1708                                " shutdown the card to prevent permanent"
1709                                " damage.  Please contact Dell Support for"
1710                                " assistance\n", bp->dev->name);
1711                         break;
1712
1713                 default:
1714                         break;
1715                 }
1716         }
1717 }
1718
1719 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
1720 {
1721         u32 val;
1722
1723         if (attn & BNX2X_DOORQ_ASSERT) {
1724
1725                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
1726                 BNX2X_ERR("DB hw attention 0x%x\n", val);
1727                 /* DORQ discard attention */
1728                 if (val & 0x2)
1729                         BNX2X_ERR("FATAL error from DORQ\n");
1730         }
1731 }
1732
1733 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
1734 {
1735         u32 val;
1736
1737         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
1738
1739                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
1740                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
1741                 /* CFC error attention */
1742                 if (val & 0x2)
1743                         BNX2X_ERR("FATAL error from CFC\n");
1744         }
1745
1746         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
1747
1748                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
1749                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
1750                 /* RQ_USDMDP_FIFO_OVERFLOW */
1751                 if (val & 0x18000)
1752                         BNX2X_ERR("FATAL error from PXP\n");
1753         }
1754 }
1755
1756 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
1757 {
1758         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
1759
1760                 if (attn & BNX2X_MC_ASSERT_BITS) {
1761
1762                         BNX2X_ERR("MC assert!\n");
1763                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
1764                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
1765                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
1766                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
1767                         bnx2x_panic();
1768
1769                 } else if (attn & BNX2X_MCP_ASSERT) {
1770
1771                         BNX2X_ERR("MCP assert!\n");
1772                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
1773                         bnx2x_mc_assert(bp);
1774
1775                 } else
1776                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
1777         }
1778
1779         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
1780
1781                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
1782                 BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn);
1783         }
1784 }
1785
1786 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
1787 {
1788         struct attn_route attn;
1789         struct attn_route group_mask;
1790         int port = bp->port;
1791         int index;
1792         u32 reg_addr;
1793         u32 val;
1794
1795         /* need to take HW lock because MCP or other port might also
1796            try to handle this event */
1797         bnx2x_lock_alr(bp);
1798
1799         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
1800         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
1801         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
1802         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
1803         DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
1804
1805         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
1806                 if (deasserted & (1 << index)) {
1807                         group_mask = bp->attn_group[index];
1808
1809                         DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
1810                            (unsigned long long)group_mask.sig[0]);
1811
1812                         bnx2x_attn_int_deasserted3(bp,
1813                                         attn.sig[3] & group_mask.sig[3]);
1814                         bnx2x_attn_int_deasserted1(bp,
1815                                         attn.sig[1] & group_mask.sig[1]);
1816                         bnx2x_attn_int_deasserted2(bp,
1817                                         attn.sig[2] & group_mask.sig[2]);
1818                         bnx2x_attn_int_deasserted0(bp,
1819                                         attn.sig[0] & group_mask.sig[0]);
1820
1821                         if ((attn.sig[0] & group_mask.sig[0] &
1822                                                 HW_INTERRUT_ASSERT_SET_0) ||
1823                             (attn.sig[1] & group_mask.sig[1] &
1824                                                 HW_INTERRUT_ASSERT_SET_1) ||
1825                             (attn.sig[2] & group_mask.sig[2] &
1826                                                 HW_INTERRUT_ASSERT_SET_2))
1827                                 BNX2X_ERR("FATAL HW block attention"
1828                                           "  set0 0x%x  set1 0x%x"
1829                                           "  set2 0x%x\n",
1830                                           (attn.sig[0] & group_mask.sig[0] &
1831                                            HW_INTERRUT_ASSERT_SET_0),
1832                                           (attn.sig[1] & group_mask.sig[1] &
1833                                            HW_INTERRUT_ASSERT_SET_1),
1834                                           (attn.sig[2] & group_mask.sig[2] &
1835                                            HW_INTERRUT_ASSERT_SET_2));
1836
1837                         if ((attn.sig[0] & group_mask.sig[0] &
1838                                                 HW_PRTY_ASSERT_SET_0) ||
1839                             (attn.sig[1] & group_mask.sig[1] &
1840                                                 HW_PRTY_ASSERT_SET_1) ||
1841                             (attn.sig[2] & group_mask.sig[2] &
1842                                                 HW_PRTY_ASSERT_SET_2))
1843                                BNX2X_ERR("FATAL HW block parity attention\n");
1844                 }
1845         }
1846
1847         bnx2x_unlock_alr(bp);
1848
1849         reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
1850
1851         val = ~deasserted;
1852 /*      DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
1853            val, BAR_IGU_INTMEM + reg_addr); */
1854         REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
1855
1856         if (bp->aeu_mask & (deasserted & 0xff))
1857                 BNX2X_ERR("IGU BUG\n");
1858         if (~bp->attn_state & deasserted)
1859                 BNX2X_ERR("IGU BUG\n");
1860
1861         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1862                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
1863
1864         DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
1865         bp->aeu_mask |= (deasserted & 0xff);
1866
1867         DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
1868         REG_WR(bp, reg_addr, bp->aeu_mask);
1869
1870         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1871         bp->attn_state &= ~deasserted;
1872         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1873 }
1874
1875 static void bnx2x_attn_int(struct bnx2x *bp)
1876 {
1877         /* read local copy of bits */
1878         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
1879         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
1880         u32 attn_state = bp->attn_state;
1881
1882         /* look for changed bits */
1883         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
1884         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
1885
1886         DP(NETIF_MSG_HW,
1887            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
1888            attn_bits, attn_ack, asserted, deasserted);
1889
1890         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
1891                 BNX2X_ERR("bad attention state\n");
1892
1893         /* handle bits that were raised */
1894         if (asserted)
1895                 bnx2x_attn_int_asserted(bp, asserted);
1896
1897         if (deasserted)
1898                 bnx2x_attn_int_deasserted(bp, deasserted);
1899 }
1900
1901 static void bnx2x_sp_task(struct work_struct *work)
1902 {
1903         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
1904         u16 status;
1905
1906         /* Return here if interrupt is disabled */
1907         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1908                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
1909                 return;
1910         }
1911
1912         status = bnx2x_update_dsb_idx(bp);
1913         if (status == 0)
1914                 BNX2X_ERR("spurious slowpath interrupt!\n");
1915
1916         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
1917
1918         /* HW attentions */
1919         if (status & 0x1)
1920                 bnx2x_attn_int(bp);
1921
1922         /* CStorm events: query_stats, port delete ramrod */
1923         if (status & 0x2)
1924                 bp->stat_pending = 0;
1925
1926         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
1927                      IGU_INT_NOP, 1);
1928         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
1929                      IGU_INT_NOP, 1);
1930         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
1931                      IGU_INT_NOP, 1);
1932         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
1933                      IGU_INT_NOP, 1);
1934         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
1935                      IGU_INT_ENABLE, 1);
1936
1937 }
1938
1939 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
1940 {
1941         struct net_device *dev = dev_instance;
1942         struct bnx2x *bp = netdev_priv(dev);
1943
1944         /* Return here if interrupt is disabled */
1945         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1946                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
1947                 return IRQ_HANDLED;
1948         }
1949
1950         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
1951
1952 #ifdef BNX2X_STOP_ON_ERROR
1953         if (unlikely(bp->panic))
1954                 return IRQ_HANDLED;
1955 #endif
1956
1957         schedule_work(&bp->sp_task);
1958
1959         return IRQ_HANDLED;
1960 }
1961
1962 /* end of slow path */
1963
1964 /* Statistics */
1965
1966 /****************************************************************************
1967 * Macros
1968 ****************************************************************************/
1969
1970 #define UPDATE_STAT(s, t) \
1971         do { \
1972                 estats->t += new->s - old->s; \
1973                 old->s = new->s; \
1974         } while (0)
1975
1976 /* sum[hi:lo] += add[hi:lo] */
1977 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
1978         do { \
1979                 s_lo += a_lo; \
1980                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
1981         } while (0)
1982
1983 /* difference = minuend - subtrahend */
1984 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
1985         do { \
1986                 if (m_lo < s_lo) {      /* underflow */ \
1987                         d_hi = m_hi - s_hi; \
1988                         if (d_hi > 0) { /* we can 'loan' 1 */ \
1989                                 d_hi--; \
1990                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
1991                         } else {        /* m_hi <= s_hi */ \
1992                                 d_hi = 0; \
1993                                 d_lo = 0; \
1994                         } \
1995                 } else {                /* m_lo >= s_lo */ \
1996                         if (m_hi < s_hi) { \
1997                             d_hi = 0; \
1998                             d_lo = 0; \
1999                         } else {        /* m_hi >= s_hi */ \
2000                             d_hi = m_hi - s_hi; \
2001                             d_lo = m_lo - s_lo; \
2002                         } \
2003                 } \
2004         } while (0)
2005
2006 /* minuend -= subtrahend */
2007 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
2008         do { \
2009                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
2010         } while (0)
2011
2012 #define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
2013         do { \
2014                 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
2015                         diff.lo, new->s_lo, old->s_lo); \
2016                 old->s_hi = new->s_hi; \
2017                 old->s_lo = new->s_lo; \
2018                 ADD_64(estats->t_hi, diff.hi, \
2019                        estats->t_lo, diff.lo); \
2020         } while (0)
2021
2022 /* sum[hi:lo] += add */
2023 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2024         do { \
2025                 s_lo += a; \
2026                 s_hi += (s_lo < a) ? 1 : 0; \
2027         } while (0)
2028
2029 #define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
2030         do { \
2031                 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
2032         } while (0)
2033
2034 #define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
2035         do { \
2036                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2037                 old_tclient->s = le32_to_cpu(tclient->s); \
2038                 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
2039         } while (0)
2040
2041 /*
2042  * General service functions
2043  */
2044
2045 static inline long bnx2x_hilo(u32 *hiref)
2046 {
2047         u32 lo = *(hiref + 1);
2048 #if (BITS_PER_LONG == 64)
2049         u32 hi = *hiref;
2050
2051         return HILO_U64(hi, lo);
2052 #else
2053         return lo;
2054 #endif
2055 }
2056
2057 /*
2058  * Init service functions
2059  */
2060
2061 static void bnx2x_init_mac_stats(struct bnx2x *bp)
2062 {
2063         struct dmae_command *dmae;
2064         int port = bp->port;
2065         int loader_idx = port * 8;
2066         u32 opcode;
2067         u32 mac_addr;
2068
2069         bp->executer_idx = 0;
2070         if (bp->fw_mb) {
2071                 /* MCP */
2072                 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2073                           DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2074 #ifdef __BIG_ENDIAN
2075                           DMAE_CMD_ENDIANITY_B_DW_SWAP |
2076 #else
2077                           DMAE_CMD_ENDIANITY_DW_SWAP |
2078 #endif
2079                           (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2080
2081                 if (bp->link_vars.link_up)
2082                         opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
2083
2084                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2085                 dmae->opcode = opcode;
2086                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
2087                                            sizeof(u32));
2088                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
2089                                            sizeof(u32));
2090                 dmae->dst_addr_lo = bp->fw_mb >> 2;
2091                 dmae->dst_addr_hi = 0;
2092                 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
2093                              sizeof(u32)) >> 2;
2094                 if (bp->link_vars.link_up) {
2095                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2096                         dmae->comp_addr_hi = 0;
2097                         dmae->comp_val = 1;
2098                 } else {
2099                         dmae->comp_addr_lo = 0;
2100                         dmae->comp_addr_hi = 0;
2101                         dmae->comp_val = 0;
2102                 }
2103         }
2104
2105         if (!bp->link_vars.link_up) {
2106                 /* no need to collect statistics in link down */
2107                 return;
2108         }
2109
2110         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2111                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2112                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2113 #ifdef __BIG_ENDIAN
2114                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
2115 #else
2116                   DMAE_CMD_ENDIANITY_DW_SWAP |
2117 #endif
2118                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2119
2120         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2121
2122                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
2123                                    NIG_REG_INGRESS_BMAC0_MEM);
2124
2125                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
2126                    BIGMAC_REGISTER_TX_STAT_GTBYT */
2127                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2128                 dmae->opcode = opcode;
2129                 dmae->src_addr_lo = (mac_addr +
2130                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
2131                 dmae->src_addr_hi = 0;
2132                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
2133                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
2134                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
2135                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
2136                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2137                 dmae->comp_addr_hi = 0;
2138                 dmae->comp_val = 1;
2139
2140                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
2141                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
2142                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2143                 dmae->opcode = opcode;
2144                 dmae->src_addr_lo = (mac_addr +
2145                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
2146                 dmae->src_addr_hi = 0;
2147                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
2148                                         offsetof(struct bmac_stats, rx_gr64));
2149                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
2150                                         offsetof(struct bmac_stats, rx_gr64));
2151                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
2152                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
2153                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2154                 dmae->comp_addr_hi = 0;
2155                 dmae->comp_val = 1;
2156
2157         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
2158
2159                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
2160
2161                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
2162                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2163                 dmae->opcode = opcode;
2164                 dmae->src_addr_lo = (mac_addr +
2165                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
2166                 dmae->src_addr_hi = 0;
2167                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
2168                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
2169                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
2170                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2171                 dmae->comp_addr_hi = 0;
2172                 dmae->comp_val = 1;
2173
2174                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
2175                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2176                 dmae->opcode = opcode;
2177                 dmae->src_addr_lo = (mac_addr +
2178                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
2179                 dmae->src_addr_hi = 0;
2180                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
2181                                            offsetof(struct emac_stats,
2182                                                     rx_falsecarriererrors));
2183                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
2184                                            offsetof(struct emac_stats,
2185                                                     rx_falsecarriererrors));
2186                 dmae->len = 1;
2187                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2188                 dmae->comp_addr_hi = 0;
2189                 dmae->comp_val = 1;
2190
2191                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
2192                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2193                 dmae->opcode = opcode;
2194                 dmae->src_addr_lo = (mac_addr +
2195                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
2196                 dmae->src_addr_hi = 0;
2197                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
2198                                            offsetof(struct emac_stats,
2199                                                     tx_ifhcoutoctets));
2200                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
2201                                            offsetof(struct emac_stats,
2202                                                     tx_ifhcoutoctets));
2203                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
2204                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2205                 dmae->comp_addr_hi = 0;
2206                 dmae->comp_val = 1;
2207         }
2208
2209         /* NIG */
2210         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2211         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2212                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
2213                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2214 #ifdef __BIG_ENDIAN
2215                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
2216 #else
2217                         DMAE_CMD_ENDIANITY_DW_SWAP |
2218 #endif
2219                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2220         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
2221                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
2222         dmae->src_addr_hi = 0;
2223         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
2224         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
2225         dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
2226         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
2227                                     offsetof(struct nig_stats, done));
2228         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
2229                                     offsetof(struct nig_stats, done));
2230         dmae->comp_val = 0xffffffff;
2231 }
2232
2233 static void bnx2x_init_stats(struct bnx2x *bp)
2234 {
2235         int port = bp->port;
2236
2237         bp->stats_state = STATS_STATE_DISABLE;
2238         bp->executer_idx = 0;
2239
2240         bp->old_brb_discard = REG_RD(bp,
2241                                      NIG_REG_STAT0_BRB_DISCARD + port*0x38);
2242
2243         memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2244         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
2245         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
2246
2247         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
2248         REG_WR(bp, BAR_XSTRORM_INTMEM +
2249                XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
2250
2251         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
2252         REG_WR(bp, BAR_TSTRORM_INTMEM +
2253                TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
2254
2255         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
2256         REG_WR(bp, BAR_CSTRORM_INTMEM +
2257                CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
2258
2259         REG_WR(bp, BAR_XSTRORM_INTMEM +
2260                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
2261                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2262         REG_WR(bp, BAR_XSTRORM_INTMEM +
2263                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
2264                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2265
2266         REG_WR(bp, BAR_TSTRORM_INTMEM +
2267                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
2268                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2269         REG_WR(bp, BAR_TSTRORM_INTMEM +
2270                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
2271                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2272 }
2273
2274 static void bnx2x_stop_stats(struct bnx2x *bp)
2275 {
2276         might_sleep();
2277         if (bp->stats_state != STATS_STATE_DISABLE) {
2278                 int timeout = 10;
2279
2280                 bp->stats_state = STATS_STATE_STOP;
2281                 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2282
2283                 while (bp->stats_state != STATS_STATE_DISABLE) {
2284                         if (!timeout) {
2285                                 BNX2X_ERR("timeout waiting for stats stop\n");
2286                                 break;
2287                         }
2288                         timeout--;
2289                         msleep(100);
2290                 }
2291         }
2292         DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
2293 }
2294
2295 /*
2296  * Statistics service functions
2297  */
2298
2299 static void bnx2x_update_bmac_stats(struct bnx2x *bp)
2300 {
2301         struct regp diff;
2302         struct regp sum;
2303         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
2304         struct bmac_stats *old = &bp->old_bmac;
2305         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2306
2307         sum.hi = 0;
2308         sum.lo = 0;
2309
2310         UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
2311                       tx_gtbyt.lo, total_bytes_transmitted_lo);
2312
2313         UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
2314                       tx_gtmca.lo, total_multicast_packets_transmitted_lo);
2315         ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
2316
2317         UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
2318                       tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
2319         ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
2320
2321         UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
2322                       tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
2323         SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
2324                estats->total_unicast_packets_transmitted_lo, sum.lo);
2325
2326         UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
2327         UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
2328         UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
2329         UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
2330         UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
2331         UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
2332         UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
2333         UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
2334         UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
2335         UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
2336         UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
2337
2338         UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
2339         UPDATE_STAT(rx_grund.lo, runt_packets_received);
2340         UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
2341         UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
2342         UPDATE_STAT(rx_grxcf.lo, control_frames_received);
2343         /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
2344         UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
2345         UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
2346
2347         UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
2348                       rx_grerb.lo, stat_IfHCInBadOctets_lo);
2349         UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
2350                       tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
2351         UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
2352         /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
2353         estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
2354 }
2355
2356 static void bnx2x_update_emac_stats(struct bnx2x *bp)
2357 {
2358         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
2359         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2360
2361         UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
2362                                              total_bytes_transmitted_lo);
2363         UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
2364                                         total_unicast_packets_transmitted_hi,
2365                                         total_unicast_packets_transmitted_lo);
2366         UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
2367                                       total_multicast_packets_transmitted_hi,
2368                                       total_multicast_packets_transmitted_lo);
2369         UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
2370                                       total_broadcast_packets_transmitted_hi,
2371                                       total_broadcast_packets_transmitted_lo);
2372
2373         estats->pause_xon_frames_transmitted += new->tx_outxonsent;
2374         estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
2375         estats->single_collision_transmit_frames +=
2376                                 new->tx_dot3statssinglecollisionframes;
2377         estats->multiple_collision_transmit_frames +=
2378                                 new->tx_dot3statsmultiplecollisionframes;
2379         estats->late_collision_frames += new->tx_dot3statslatecollisions;
2380         estats->excessive_collision_frames +=
2381                                 new->tx_dot3statsexcessivecollisions;
2382         estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
2383         estats->frames_transmitted_65_127_bytes +=
2384                                 new->tx_etherstatspkts65octetsto127octets;
2385         estats->frames_transmitted_128_255_bytes +=
2386                                 new->tx_etherstatspkts128octetsto255octets;
2387         estats->frames_transmitted_256_511_bytes +=
2388                                 new->tx_etherstatspkts256octetsto511octets;
2389         estats->frames_transmitted_512_1023_bytes +=
2390                                 new->tx_etherstatspkts512octetsto1023octets;
2391         estats->frames_transmitted_1024_1522_bytes +=
2392                                 new->tx_etherstatspkts1024octetsto1522octet;
2393         estats->frames_transmitted_1523_9022_bytes +=
2394                                 new->tx_etherstatspktsover1522octets;
2395
2396         estats->crc_receive_errors += new->rx_dot3statsfcserrors;
2397         estats->alignment_errors += new->rx_dot3statsalignmenterrors;
2398         estats->false_carrier_detections += new->rx_falsecarriererrors;
2399         estats->runt_packets_received += new->rx_etherstatsundersizepkts;
2400         estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
2401         estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
2402         estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
2403         estats->control_frames_received += new->rx_maccontrolframesreceived;
2404         estats->error_runt_packets_received += new->rx_etherstatsfragments;
2405         estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
2406
2407         UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
2408                                                stat_IfHCInBadOctets_lo);
2409         UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
2410                                                 stat_IfHCOutBadOctets_lo);
2411         estats->stat_Dot3statsInternalMacTransmitErrors +=
2412                                 new->tx_dot3statsinternalmactransmiterrors;
2413         estats->stat_Dot3StatsCarrierSenseErrors +=
2414                                 new->rx_dot3statscarriersenseerrors;
2415         estats->stat_Dot3StatsDeferredTransmissions +=
2416                                 new->tx_dot3statsdeferredtransmissions;
2417         estats->stat_FlowControlDone += new->tx_flowcontroldone;
2418         estats->stat_XoffStateEntered += new->rx_xoffstateentered;
2419 }
2420
2421 static int bnx2x_update_storm_stats(struct bnx2x *bp)
2422 {
2423         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
2424         struct tstorm_common_stats *tstats = &stats->tstorm_common;
2425         struct tstorm_per_client_stats *tclient =
2426                                                 &tstats->client_statistics[0];
2427         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
2428         struct xstorm_common_stats *xstats = &stats->xstorm_common;
2429         struct nig_stats *nstats = bnx2x_sp(bp, nig);
2430         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2431         u32 diff;
2432
2433         /* are DMAE stats valid? */
2434         if (nstats->done != 0xffffffff) {
2435                 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
2436                 return -1;
2437         }
2438
2439         /* are storm stats valid? */
2440         if (tstats->done.hi != 0xffffffff) {
2441                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
2442                 return -2;
2443         }
2444         if (xstats->done.hi != 0xffffffff) {
2445                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
2446                 return -3;
2447         }
2448
2449         estats->total_bytes_received_hi =
2450         estats->valid_bytes_received_hi =
2451                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
2452         estats->total_bytes_received_lo =
2453         estats->valid_bytes_received_lo =
2454                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
2455         ADD_64(estats->total_bytes_received_hi,
2456                le32_to_cpu(tclient->rcv_error_bytes.hi),
2457                estats->total_bytes_received_lo,
2458                le32_to_cpu(tclient->rcv_error_bytes.lo));
2459
2460         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
2461                                         total_unicast_packets_received_hi,
2462                                         total_unicast_packets_received_lo);
2463         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
2464                                         total_multicast_packets_received_hi,
2465                                         total_multicast_packets_received_lo);
2466         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
2467                                         total_broadcast_packets_received_hi,
2468                                         total_broadcast_packets_received_lo);
2469
2470         estats->frames_received_64_bytes = MAC_STX_NA;
2471         estats->frames_received_65_127_bytes = MAC_STX_NA;
2472         estats->frames_received_128_255_bytes = MAC_STX_NA;
2473         estats->frames_received_256_511_bytes = MAC_STX_NA;
2474         estats->frames_received_512_1023_bytes = MAC_STX_NA;
2475         estats->frames_received_1024_1522_bytes = MAC_STX_NA;
2476         estats->frames_received_1523_9022_bytes = MAC_STX_NA;
2477
2478         estats->x_total_sent_bytes_hi =
2479                                 le32_to_cpu(xstats->total_sent_bytes.hi);
2480         estats->x_total_sent_bytes_lo =
2481                                 le32_to_cpu(xstats->total_sent_bytes.lo);
2482         estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
2483
2484         estats->t_rcv_unicast_bytes_hi =
2485                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
2486         estats->t_rcv_unicast_bytes_lo =
2487                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
2488         estats->t_rcv_broadcast_bytes_hi =
2489                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
2490         estats->t_rcv_broadcast_bytes_lo =
2491                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
2492         estats->t_rcv_multicast_bytes_hi =
2493                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
2494         estats->t_rcv_multicast_bytes_lo =
2495                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
2496         estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
2497
2498         estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
2499         estats->packets_too_big_discard =
2500                                 le32_to_cpu(tclient->packets_too_big_discard);
2501         estats->jabber_packets_received = estats->packets_too_big_discard +
2502                                           estats->stat_Dot3statsFramesTooLong;
2503         estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
2504         estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
2505         estats->mac_discard = le32_to_cpu(tclient->mac_discard);
2506         estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
2507         estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
2508         estats->brb_truncate_discard =
2509                                 le32_to_cpu(tstats->brb_truncate_discard);
2510
2511         estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
2512         bp->old_brb_discard = nstats->brb_discard;
2513
2514         estats->brb_packet = nstats->brb_packet;
2515         estats->brb_truncate = nstats->brb_truncate;
2516         estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
2517         estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
2518         estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
2519         estats->mng_discard = nstats->mng_discard;
2520         estats->mng_octet_inp = nstats->mng_octet_inp;
2521         estats->mng_octet_out = nstats->mng_octet_out;
2522         estats->mng_packet_inp = nstats->mng_packet_inp;
2523         estats->mng_packet_out = nstats->mng_packet_out;
2524         estats->pbf_octets = nstats->pbf_octets;
2525         estats->pbf_packet = nstats->pbf_packet;
2526         estats->safc_inp = nstats->safc_inp;
2527
2528         xstats->done.hi = 0;
2529         tstats->done.hi = 0;
2530         nstats->done = 0;
2531
2532         return 0;
2533 }
2534
2535 static void bnx2x_update_net_stats(struct bnx2x *bp)
2536 {
2537         struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2538         struct net_device_stats *nstats = &bp->dev->stats;
2539
2540         nstats->rx_packets =
2541                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
2542                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
2543                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
2544
2545         nstats->tx_packets =
2546                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
2547                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
2548                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
2549
2550         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
2551
2552         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
2553
2554         nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
2555         nstats->tx_dropped = 0;
2556
2557         nstats->multicast =
2558                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
2559
2560         nstats->collisions = estats->single_collision_transmit_frames +
2561                              estats->multiple_collision_transmit_frames +
2562                              estats->late_collision_frames +
2563                              estats->excessive_collision_frames;
2564
2565         nstats->rx_length_errors = estats->runt_packets_received +
2566                                    estats->jabber_packets_received;
2567         nstats->rx_over_errors = estats->brb_discard +
2568                                  estats->brb_truncate_discard;
2569         nstats->rx_crc_errors = estats->crc_receive_errors;
2570         nstats->rx_frame_errors = estats->alignment_errors;
2571         nstats->rx_fifo_errors = estats->no_buff_discard;
2572         nstats->rx_missed_errors = estats->xxoverflow_discard;
2573
2574         nstats->rx_errors = nstats->rx_length_errors +
2575                             nstats->rx_over_errors +
2576                             nstats->rx_crc_errors +
2577                             nstats->rx_frame_errors +
2578                             nstats->rx_fifo_errors +
2579                             nstats->rx_missed_errors;
2580
2581         nstats->tx_aborted_errors = estats->late_collision_frames +
2582                                     estats->excessive_collision_frames;
2583         nstats->tx_carrier_errors = estats->false_carrier_detections;
2584         nstats->tx_fifo_errors = 0;
2585         nstats->tx_heartbeat_errors = 0;
2586         nstats->tx_window_errors = 0;
2587
2588         nstats->tx_errors = nstats->tx_aborted_errors +
2589                             nstats->tx_carrier_errors;
2590
2591         estats->mac_stx_start = ++estats->mac_stx_end;
2592 }
2593
2594 static void bnx2x_update_stats(struct bnx2x *bp)
2595 {
2596         int i;
2597
2598         if (!bnx2x_update_storm_stats(bp)) {
2599
2600                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2601                         bnx2x_update_bmac_stats(bp);
2602
2603                 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
2604                         bnx2x_update_emac_stats(bp);
2605
2606                 } else { /* unreached */
2607                         BNX2X_ERR("no MAC active\n");
2608                         return;
2609                 }
2610
2611                 bnx2x_update_net_stats(bp);
2612         }
2613
2614         if (bp->msglevel & NETIF_MSG_TIMER) {
2615                 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2616                 struct net_device_stats *nstats = &bp->dev->stats;
2617
2618                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
2619                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
2620                                   "  tx pkt (%lx)\n",
2621                        bnx2x_tx_avail(bp->fp),
2622                        *bp->fp->tx_cons_sb, nstats->tx_packets);
2623                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
2624                                   "  rx pkt (%lx)\n",
2625                        (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
2626                        *bp->fp->rx_cons_sb, nstats->rx_packets);
2627                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
2628                        netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
2629                        estats->driver_xoff, estats->brb_discard);
2630                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
2631                         "packets_too_big_discard %u  no_buff_discard %u  "
2632                         "mac_discard %u  mac_filter_discard %u  "
2633                         "xxovrflow_discard %u  brb_truncate_discard %u  "
2634                         "ttl0_discard %u\n",
2635                        estats->checksum_discard,
2636                        estats->packets_too_big_discard,
2637                        estats->no_buff_discard, estats->mac_discard,
2638                        estats->mac_filter_discard, estats->xxoverflow_discard,
2639                        estats->brb_truncate_discard, estats->ttl0_discard);
2640
2641                 for_each_queue(bp, i) {
2642                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
2643                                bnx2x_fp(bp, i, tx_pkt),
2644                                bnx2x_fp(bp, i, rx_pkt),
2645                                bnx2x_fp(bp, i, rx_calls));
2646                 }
2647         }
2648
2649         if (bp->state != BNX2X_STATE_OPEN) {
2650                 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
2651                 return;
2652         }
2653
2654 #ifdef BNX2X_STOP_ON_ERROR
2655         if (unlikely(bp->panic))
2656                 return;
2657 #endif
2658
2659         /* loader */
2660         if (bp->executer_idx) {
2661                 struct dmae_command *dmae = &bp->dmae;
2662                 int port = bp->port;
2663                 int loader_idx = port * 8;
2664
2665                 memset(dmae, 0, sizeof(struct dmae_command));
2666
2667                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2668                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2669                                 DMAE_CMD_DST_RESET |
2670 #ifdef __BIG_ENDIAN
2671                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2672 #else
2673                                 DMAE_CMD_ENDIANITY_DW_SWAP |
2674 #endif
2675                                 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2676                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
2677                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
2678                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
2679                                      sizeof(struct dmae_command) *
2680                                      (loader_idx + 1)) >> 2;
2681                 dmae->dst_addr_hi = 0;
2682                 dmae->len = sizeof(struct dmae_command) >> 2;
2683                 dmae->len--;    /* !!! for A0/1 only */
2684                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
2685                 dmae->comp_addr_hi = 0;
2686                 dmae->comp_val = 1;
2687
2688                 bnx2x_post_dmae(bp, dmae, loader_idx);
2689         }
2690
2691         if (bp->stats_state != STATS_STATE_ENABLE) {
2692                 bp->stats_state = STATS_STATE_DISABLE;
2693                 return;
2694         }
2695
2696         if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
2697                 /* stats ramrod has it's own slot on the spe */
2698                 bp->spq_left++;
2699                 bp->stat_pending = 1;
2700         }
2701 }
2702
2703 static void bnx2x_timer(unsigned long data)
2704 {
2705         struct bnx2x *bp = (struct bnx2x *) data;
2706
2707         if (!netif_running(bp->dev))
2708                 return;
2709
2710         if (atomic_read(&bp->intr_sem) != 0)
2711                 goto timer_restart;
2712
2713         if (poll) {
2714                 struct bnx2x_fastpath *fp = &bp->fp[0];
2715                 int rc;
2716
2717                 bnx2x_tx_int(fp, 1000);
2718                 rc = bnx2x_rx_int(fp, 1000);
2719         }
2720
2721         if (!nomcp) {
2722                 int port = bp->port;
2723                 u32 drv_pulse;
2724                 u32 mcp_pulse;
2725
2726                 ++bp->fw_drv_pulse_wr_seq;
2727                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2728                 /* TBD - add SYSTEM_TIME */
2729                 drv_pulse = bp->fw_drv_pulse_wr_seq;
2730                 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
2731
2732                 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
2733                              MCP_PULSE_SEQ_MASK);
2734                 /* The delta between driver pulse and mcp response
2735                  * should be 1 (before mcp response) or 0 (after mcp response)
2736                  */
2737                 if ((drv_pulse != mcp_pulse) &&
2738                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2739                         /* someone lost a heartbeat... */
2740                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2741                                   drv_pulse, mcp_pulse);
2742                 }
2743         }
2744
2745         if (bp->stats_state == STATS_STATE_DISABLE)
2746                 goto timer_restart;
2747
2748         bnx2x_update_stats(bp);
2749
2750 timer_restart:
2751         mod_timer(&bp->timer, jiffies + bp->current_interval);
2752 }
2753
2754 /* end of Statistics */
2755
2756 /* nic init */
2757
2758 /*
2759  * nic init service functions
2760  */
2761
2762 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2763                           dma_addr_t mapping, int id)
2764 {
2765         int port = bp->port;
2766         u64 section;
2767         int index;
2768
2769         /* USTORM */
2770         section = ((u64)mapping) + offsetof(struct host_status_block,
2771                                             u_status_block);
2772         sb->u_status_block.status_block_id = id;
2773
2774         REG_WR(bp, BAR_USTRORM_INTMEM +
2775                USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
2776         REG_WR(bp, BAR_USTRORM_INTMEM +
2777                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
2778                U64_HI(section));
2779
2780         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2781                 REG_WR16(bp, BAR_USTRORM_INTMEM +
2782                          USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
2783
2784         /* CSTORM */
2785         section = ((u64)mapping) + offsetof(struct host_status_block,
2786                                             c_status_block);
2787         sb->c_status_block.status_block_id = id;
2788
2789         REG_WR(bp, BAR_CSTRORM_INTMEM +
2790                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
2791         REG_WR(bp, BAR_CSTRORM_INTMEM +
2792                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
2793                U64_HI(section));
2794
2795         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2796                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2797                          CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
2798
2799         bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2800 }
2801
2802 static void bnx2x_init_def_sb(struct bnx2x *bp,
2803                               struct host_def_status_block *def_sb,
2804                               dma_addr_t mapping, int id)
2805 {
2806         int port = bp->port;
2807         int index, val, reg_offset;
2808         u64 section;
2809
2810         /* ATTN */
2811         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2812                                             atten_status_block);
2813         def_sb->atten_status_block.status_block_id = id;
2814
2815         bp->def_att_idx = 0;
2816         bp->attn_state = 0;
2817
2818         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2819                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2820
2821         for (index = 0; index < 3; index++) {
2822                 bp->attn_group[index].sig[0] = REG_RD(bp,
2823                                                      reg_offset + 0x10*index);
2824                 bp->attn_group[index].sig[1] = REG_RD(bp,
2825                                                reg_offset + 0x4 + 0x10*index);
2826                 bp->attn_group[index].sig[2] = REG_RD(bp,
2827                                                reg_offset + 0x8 + 0x10*index);
2828                 bp->attn_group[index].sig[3] = REG_RD(bp,
2829                                                reg_offset + 0xc + 0x10*index);
2830         }
2831
2832         bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2833                                           MISC_REG_AEU_MASK_ATTN_FUNC_0));
2834
2835         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2836                              HC_REG_ATTN_MSG0_ADDR_L);
2837
2838         REG_WR(bp, reg_offset, U64_LO(section));
2839         REG_WR(bp, reg_offset + 4, U64_HI(section));
2840
2841         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2842
2843         val = REG_RD(bp, reg_offset);
2844         val |= id;
2845         REG_WR(bp, reg_offset, val);
2846
2847         /* USTORM */
2848         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2849                                             u_def_status_block);
2850         def_sb->u_def_status_block.status_block_id = id;
2851
2852         bp->def_u_idx = 0;
2853
2854         REG_WR(bp, BAR_USTRORM_INTMEM +
2855                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
2856         REG_WR(bp, BAR_USTRORM_INTMEM +
2857                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
2858                U64_HI(section));
2859         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
2860                BNX2X_BTR);
2861
2862         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2863                 REG_WR16(bp, BAR_USTRORM_INTMEM +
2864                          USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
2865
2866         /* CSTORM */
2867         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2868                                             c_def_status_block);
2869         def_sb->c_def_status_block.status_block_id = id;
2870
2871         bp->def_c_idx = 0;
2872
2873         REG_WR(bp, BAR_CSTRORM_INTMEM +
2874                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
2875         REG_WR(bp, BAR_CSTRORM_INTMEM +
2876                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
2877                U64_HI(section));
2878         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
2879                BNX2X_BTR);
2880
2881         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2882                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2883                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
2884
2885         /* TSTORM */
2886         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2887                                             t_def_status_block);
2888         def_sb->t_def_status_block.status_block_id = id;
2889
2890         bp->def_t_idx = 0;
2891
2892         REG_WR(bp, BAR_TSTRORM_INTMEM +
2893                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
2894         REG_WR(bp, BAR_TSTRORM_INTMEM +
2895                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
2896                U64_HI(section));
2897         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
2898                BNX2X_BTR);
2899
2900         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2901                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2902                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
2903
2904         /* XSTORM */
2905         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2906                                             x_def_status_block);
2907         def_sb->x_def_status_block.status_block_id = id;
2908
2909         bp->def_x_idx = 0;
2910
2911         REG_WR(bp, BAR_XSTRORM_INTMEM +
2912                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
2913         REG_WR(bp, BAR_XSTRORM_INTMEM +
2914                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
2915                U64_HI(section));
2916         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
2917                BNX2X_BTR);
2918
2919         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2920                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2921                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
2922
2923         bp->stat_pending = 0;
2924
2925         bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2926 }
2927
2928 static void bnx2x_update_coalesce(struct bnx2x *bp)
2929 {
2930         int port = bp->port;
2931         int i;
2932
2933         for_each_queue(bp, i) {
2934
2935                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2936                 REG_WR8(bp, BAR_USTRORM_INTMEM +
2937                         USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
2938                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
2939                         bp->rx_ticks_int/12);
2940                 REG_WR16(bp, BAR_USTRORM_INTMEM +
2941                          USTORM_SB_HC_DISABLE_OFFSET(port, i,
2942                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
2943                          bp->rx_ticks_int ? 0 : 1);
2944
2945                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2946                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2947                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
2948                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
2949                         bp->tx_ticks_int/12);
2950                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2951                          CSTORM_SB_HC_DISABLE_OFFSET(port, i,
2952                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
2953                          bp->tx_ticks_int ? 0 : 1);
2954         }
2955 }
2956
2957 static void bnx2x_init_rx_rings(struct bnx2x *bp)
2958 {
2959         u16 ring_prod;
2960         int i, j;
2961         int port = bp->port;
2962
2963         bp->rx_buf_use_size = bp->dev->mtu;
2964
2965         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
2966         bp->rx_buf_size = bp->rx_buf_use_size + 64;
2967
2968         for_each_queue(bp, j) {
2969                 struct bnx2x_fastpath *fp = &bp->fp[j];
2970
2971                 fp->rx_bd_cons = 0;
2972                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
2973
2974                 for (i = 1; i <= NUM_RX_RINGS; i++) {
2975                         struct eth_rx_bd *rx_bd;
2976
2977                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
2978                         rx_bd->addr_hi =
2979                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
2980                                            BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
2981                         rx_bd->addr_lo =
2982                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
2983                                            BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
2984
2985                 }
2986
2987                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
2988                         struct eth_rx_cqe_next_page *nextpg;
2989
2990                         nextpg = (struct eth_rx_cqe_next_page *)
2991                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
2992                         nextpg->addr_hi =
2993                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
2994                                           BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
2995                         nextpg->addr_lo =
2996                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
2997                                           BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
2998                 }
2999
3000                 /* rx completion queue */
3001                 fp->rx_comp_cons = ring_prod = 0;
3002
3003                 for (i = 0; i < bp->rx_ring_size; i++) {
3004                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
3005                                 BNX2X_ERR("was only able to allocate "
3006                                           "%d rx skbs\n", i);
3007                                 break;
3008                         }
3009                         ring_prod = NEXT_RX_IDX(ring_prod);
3010                         BUG_TRAP(ring_prod > i);
3011                 }
3012
3013                 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
3014                 fp->rx_pkt = fp->rx_calls = 0;
3015
3016                 /* Warning! this will generate an interrupt (to the TSTORM) */
3017                 /* must only be done when chip is initialized */
3018                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3019                        TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
3020                 if (j != 0)
3021                         continue;
3022
3023                 REG_WR(bp, BAR_USTRORM_INTMEM +
3024                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
3025                        U64_LO(fp->rx_comp_mapping));
3026                 REG_WR(bp, BAR_USTRORM_INTMEM +
3027                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
3028                        U64_HI(fp->rx_comp_mapping));
3029         }
3030 }
3031
3032 static void bnx2x_init_tx_ring(struct bnx2x *bp)
3033 {
3034         int i, j;
3035
3036         for_each_queue(bp, j) {
3037                 struct bnx2x_fastpath *fp = &bp->fp[j];
3038
3039                 for (i = 1; i <= NUM_TX_RINGS; i++) {
3040                         struct eth_tx_bd *tx_bd =
3041                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
3042
3043                         tx_bd->addr_hi =
3044                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
3045                                            BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
3046                         tx_bd->addr_lo =
3047                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
3048                                            BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
3049                 }
3050
3051                 fp->tx_pkt_prod = 0;
3052                 fp->tx_pkt_cons = 0;
3053                 fp->tx_bd_prod = 0;
3054                 fp->tx_bd_cons = 0;
3055                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
3056                 fp->tx_pkt = 0;
3057         }
3058 }
3059
3060 static void bnx2x_init_sp_ring(struct bnx2x *bp)
3061 {
3062         int port = bp->port;
3063
3064         spin_lock_init(&bp->spq_lock);
3065
3066         bp->spq_left = MAX_SPQ_PENDING;
3067         bp->spq_prod_idx = 0;
3068         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
3069         bp->spq_prod_bd = bp->spq;
3070         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
3071
3072         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
3073                U64_LO(bp->spq_mapping));
3074         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
3075                U64_HI(bp->spq_mapping));
3076
3077         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
3078                bp->spq_prod_idx);
3079 }
3080
3081 static void bnx2x_init_context(struct bnx2x *bp)
3082 {
3083         int i;
3084
3085         for_each_queue(bp, i) {
3086                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
3087                 struct bnx2x_fastpath *fp = &bp->fp[i];
3088
3089                 context->xstorm_st_context.tx_bd_page_base_hi =
3090                                                 U64_HI(fp->tx_desc_mapping);
3091                 context->xstorm_st_context.tx_bd_page_base_lo =
3092                                                 U64_LO(fp->tx_desc_mapping);
3093                 context->xstorm_st_context.db_data_addr_hi =
3094                                                 U64_HI(fp->tx_prods_mapping);
3095                 context->xstorm_st_context.db_data_addr_lo =
3096                                                 U64_LO(fp->tx_prods_mapping);
3097
3098                 context->ustorm_st_context.rx_bd_page_base_hi =
3099                                                 U64_HI(fp->rx_desc_mapping);
3100                 context->ustorm_st_context.rx_bd_page_base_lo =
3101                                                 U64_LO(fp->rx_desc_mapping);
3102                 context->ustorm_st_context.status_block_id = i;
3103                 context->ustorm_st_context.sb_index_number =
3104                                                 HC_INDEX_U_ETH_RX_CQ_CONS;
3105                 context->ustorm_st_context.rcq_base_address_hi =
3106                                                 U64_HI(fp->rx_comp_mapping);
3107                 context->ustorm_st_context.rcq_base_address_lo =
3108                                                 U64_LO(fp->rx_comp_mapping);
3109                 context->ustorm_st_context.flags =
3110                                 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
3111                 context->ustorm_st_context.mc_alignment_size = 64;
3112                 context->ustorm_st_context.num_rss = bp->num_queues;
3113
3114                 context->cstorm_st_context.sb_index_number =
3115                                                 HC_INDEX_C_ETH_TX_CQ_CONS;
3116                 context->cstorm_st_context.status_block_id = i;
3117
3118                 context->xstorm_ag_context.cdu_reserved =
3119                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3120                                                CDU_REGION_NUMBER_XCM_AG,
3121                                                ETH_CONNECTION_TYPE);
3122                 context->ustorm_ag_context.cdu_usage =
3123                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3124                                                CDU_REGION_NUMBER_UCM_AG,
3125                                                ETH_CONNECTION_TYPE);
3126         }
3127 }
3128
3129 static void bnx2x_init_ind_table(struct bnx2x *bp)
3130 {
3131         int port = bp->port;
3132         int i;
3133
3134         if (!is_multi(bp))
3135                 return;
3136
3137         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3138                 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
3139                         i % bp->num_queues);
3140
3141         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3142 }
3143
3144 static void bnx2x_set_client_config(struct bnx2x *bp)
3145 {
3146 #ifdef BCM_VLAN
3147         int mode = bp->rx_mode;
3148 #endif
3149         int i, port = bp->port;
3150         struct tstorm_eth_client_config tstorm_client = {0};
3151
3152         tstorm_client.mtu = bp->dev->mtu;
3153         tstorm_client.statistics_counter_id = 0;
3154         tstorm_client.config_flags =
3155                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
3156 #ifdef BCM_VLAN
3157         if (mode && bp->vlgrp) {
3158                 tstorm_client.config_flags |=
3159                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
3160                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3161         }
3162 #endif
3163         if (mode != BNX2X_RX_MODE_PROMISC)
3164                 tstorm_client.drop_flags =
3165                                 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
3166
3167         for_each_queue(bp, i) {
3168                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3169                        TSTORM_CLIENT_CONFIG_OFFSET(port, i),
3170                        ((u32 *)&tstorm_client)[0]);
3171                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3172                        TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
3173                        ((u32 *)&tstorm_client)[1]);
3174         }
3175
3176 /*      DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
3177            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
3178 }
3179
3180 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3181 {
3182         int mode = bp->rx_mode;
3183         int port = bp->port;
3184         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3185         int i;
3186
3187         DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
3188
3189         switch (mode) {
3190         case BNX2X_RX_MODE_NONE: /* no Rx */
3191                 tstorm_mac_filter.ucast_drop_all = 1;
3192                 tstorm_mac_filter.mcast_drop_all = 1;
3193                 tstorm_mac_filter.bcast_drop_all = 1;
3194                 break;
3195         case BNX2X_RX_MODE_NORMAL:
3196                 tstorm_mac_filter.bcast_accept_all = 1;
3197                 break;
3198         case BNX2X_RX_MODE_ALLMULTI:
3199                 tstorm_mac_filter.mcast_accept_all = 1;
3200                 tstorm_mac_filter.bcast_accept_all = 1;
3201                 break;
3202         case BNX2X_RX_MODE_PROMISC:
3203                 tstorm_mac_filter.ucast_accept_all = 1;
3204                 tstorm_mac_filter.mcast_accept_all = 1;
3205                 tstorm_mac_filter.bcast_accept_all = 1;
3206                 break;
3207         default:
3208                 BNX2X_ERR("bad rx mode (%d)\n", mode);
3209         }
3210
3211         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3212                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3213                        TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
3214                        ((u32 *)&tstorm_mac_filter)[i]);
3215
3216 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3217                    ((u32 *)&tstorm_mac_filter)[i]); */
3218         }
3219
3220         if (mode != BNX2X_RX_MODE_NONE)
3221                 bnx2x_set_client_config(bp);
3222 }
3223
3224 static void bnx2x_init_internal(struct bnx2x *bp)
3225 {
3226         int port = bp->port;
3227         struct tstorm_eth_function_common_config tstorm_config = {0};
3228         struct stats_indication_flags stats_flags = {0};
3229
3230         if (is_multi(bp)) {
3231                 tstorm_config.config_flags = MULTI_FLAGS;
3232                 tstorm_config.rss_result_mask = MULTI_MASK;
3233         }
3234
3235         REG_WR(bp, BAR_TSTRORM_INTMEM +
3236                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
3237                (*(u32 *)&tstorm_config));
3238
3239 /*      DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
3240            (*(u32 *)&tstorm_config)); */
3241
3242         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3243         bnx2x_set_storm_rx_mode(bp);
3244
3245         stats_flags.collect_eth = cpu_to_le32(1);
3246
3247         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
3248                ((u32 *)&stats_flags)[0]);
3249         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
3250                ((u32 *)&stats_flags)[1]);
3251
3252         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
3253                ((u32 *)&stats_flags)[0]);
3254         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
3255                ((u32 *)&stats_flags)[1]);
3256
3257         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
3258                ((u32 *)&stats_flags)[0]);
3259         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
3260                ((u32 *)&stats_flags)[1]);
3261
3262 /*      DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
3263            ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
3264 }
3265
3266 static void bnx2x_nic_init(struct bnx2x *bp)
3267 {
3268         int i;
3269
3270         for_each_queue(bp, i) {
3271                 struct bnx2x_fastpath *fp = &bp->fp[i];
3272
3273                 fp->state = BNX2X_FP_STATE_CLOSED;
3274                 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
3275                    bp, fp->status_blk, i);
3276                 fp->index = i;
3277                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
3278         }
3279
3280         bnx2x_init_def_sb(bp, bp->def_status_blk,
3281                           bp->def_status_blk_mapping, 0x10);
3282         bnx2x_update_coalesce(bp);
3283         bnx2x_init_rx_rings(bp);
3284         bnx2x_init_tx_ring(bp);
3285         bnx2x_init_sp_ring(bp);
3286         bnx2x_init_context(bp);
3287         bnx2x_init_internal(bp);
3288         bnx2x_init_stats(bp);
3289         bnx2x_init_ind_table(bp);
3290         bnx2x_int_enable(bp);
3291
3292 }
3293
3294 /* end of nic init */
3295
3296 /*
3297  * gzip service functions
3298  */
3299
3300 static int bnx2x_gunzip_init(struct bnx2x *bp)
3301 {
3302         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
3303                                               &bp->gunzip_mapping);
3304         if (bp->gunzip_buf  == NULL)
3305                 goto gunzip_nomem1;
3306
3307         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3308         if (bp->strm  == NULL)
3309                 goto gunzip_nomem2;
3310
3311         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3312                                       GFP_KERNEL);
3313         if (bp->strm->workspace == NULL)
3314                 goto gunzip_nomem3;
3315
3316         return 0;
3317
3318 gunzip_nomem3:
3319         kfree(bp->strm);
3320         bp->strm = NULL;
3321
3322 gunzip_nomem2:
3323         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
3324                             bp->gunzip_mapping);
3325         bp->gunzip_buf = NULL;
3326
3327 gunzip_nomem1:
3328         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
3329                " uncompression\n", bp->dev->name);
3330         return -ENOMEM;
3331 }
3332
3333 static void bnx2x_gunzip_end(struct bnx2x *bp)
3334 {
3335         kfree(bp->strm->workspace);
3336
3337         kfree(bp->strm);
3338         bp->strm = NULL;
3339
3340         if (bp->gunzip_buf) {
3341                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
3342                                     bp->gunzip_mapping);
3343                 bp->gunzip_buf = NULL;
3344         }
3345 }
3346
3347 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
3348 {
3349         int n, rc;
3350
3351         /* check gzip header */
3352         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
3353                 return -EINVAL;
3354
3355         n = 10;
3356
3357 #define FNAME                           0x8
3358
3359         if (zbuf[3] & FNAME)
3360                 while ((zbuf[n++] != 0) && (n < len));
3361
3362         bp->strm->next_in = zbuf + n;
3363         bp->strm->avail_in = len - n;
3364         bp->strm->next_out = bp->gunzip_buf;
3365         bp->strm->avail_out = FW_BUF_SIZE;
3366
3367         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3368         if (rc != Z_OK)
3369                 return rc;
3370
3371         rc = zlib_inflate(bp->strm, Z_FINISH);
3372         if ((rc != Z_OK) && (rc != Z_STREAM_END))
3373                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
3374                        bp->dev->name, bp->strm->msg);
3375
3376         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3377         if (bp->gunzip_outlen & 0x3)
3378                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
3379                                     " gunzip_outlen (%d) not aligned\n",
3380                        bp->dev->name, bp->gunzip_outlen);
3381         bp->gunzip_outlen >>= 2;
3382
3383         zlib_inflateEnd(bp->strm);
3384
3385         if (rc == Z_STREAM_END)
3386                 return 0;
3387
3388         return rc;
3389 }
3390
3391 /* nic load/unload */
3392
3393 /*
3394  * general service functions
3395  */
3396
3397 /* send a NIG loopback debug packet */
3398 static void bnx2x_lb_pckt(struct bnx2x *bp)
3399 {
3400 #ifdef USE_DMAE
3401         u32 wb_write[3];
3402 #endif
3403
3404         /* Ethernet source and destination addresses */
3405 #ifdef USE_DMAE
3406         wb_write[0] = 0x55555555;
3407         wb_write[1] = 0x55555555;
3408         wb_write[2] = 0x20;             /* SOP */
3409         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3410 #else
3411         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
3412         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
3413         /* SOP */
3414         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
3415 #endif
3416
3417         /* NON-IP protocol */
3418 #ifdef USE_DMAE
3419         wb_write[0] = 0x09000000;
3420         wb_write[1] = 0x55555555;
3421         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
3422         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3423 #else
3424         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
3425         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
3426         /* EOP, eop_bvalid = 0 */
3427         REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
3428 #endif
3429 }
3430
3431 /* some of the internal memories
3432  * are not directly readable from the driver
3433  * to test them we send debug packets
3434  */
3435 static int bnx2x_int_mem_test(struct bnx2x *bp)
3436 {
3437         int factor;
3438         int count, i;
3439         u32 val = 0;
3440
3441         switch (CHIP_REV(bp)) {
3442         case CHIP_REV_EMUL:
3443                 factor = 200;
3444                 break;
3445         case CHIP_REV_FPGA:
3446                 factor = 120;
3447                 break;
3448         default:
3449                 factor = 1;
3450                 break;
3451         }
3452
3453         DP(NETIF_MSG_HW, "start part1\n");
3454
3455         /* Disable inputs of parser neighbor blocks */
3456         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3457         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3458         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3459         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
3460
3461         /*  Write 0 to parser credits for CFC search request */
3462         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3463
3464         /* send Ethernet packet */
3465         bnx2x_lb_pckt(bp);
3466
3467         /* TODO do i reset NIG statistic? */
3468         /* Wait until NIG register shows 1 packet of size 0x10 */
3469         count = 1000 * factor;
3470         while (count) {
3471 #ifdef BNX2X_DMAE_RD
3472                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3473                 val = *bnx2x_sp(bp, wb_data[0]);
3474 #else
3475                 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
3476                 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
3477 #endif
3478                 if (val == 0x10)
3479                         break;
3480
3481                 msleep(10);
3482                 count--;
3483         }
3484         if (val != 0x10) {
3485                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3486                 return -1;
3487         }
3488
3489         /* Wait until PRS register shows 1 packet */
3490         count = 1000 * factor;
3491         while (count) {
3492                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3493
3494                 if (val == 1)
3495                         break;
3496
3497                 msleep(10);
3498                 count--;
3499         }
3500         if (val != 0x1) {
3501                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3502                 return -2;
3503         }
3504
3505         /* Reset and init BRB, PRS */
3506         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
3507         msleep(50);
3508         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
3509         msleep(50);
3510         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
3511         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3512
3513         DP(NETIF_MSG_HW, "part2\n");
3514
3515         /* Disable inputs of parser neighbor blocks */
3516         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3517         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3518         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3519         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
3520
3521         /* Write 0 to parser credits for CFC search request */
3522         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3523
3524         /* send 10 Ethernet packets */
3525         for (i = 0; i < 10; i++)
3526                 bnx2x_lb_pckt(bp);
3527
3528         /* Wait until NIG register shows 10 + 1
3529            packets of size 11*0x10 = 0xb0 */
3530         count = 1000 * factor;
3531         while (count) {
3532 #ifdef BNX2X_DMAE_RD
3533                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3534                 val = *bnx2x_sp(bp, wb_data[0]);
3535 #else
3536                 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
3537                 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
3538 #endif
3539                 if (val == 0xb0)
3540                         break;
3541
3542                 msleep(10);
3543                 count--;
3544         }
3545         if (val != 0xb0) {
3546                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3547                 return -3;
3548         }
3549
3550         /* Wait until PRS register shows 2 packets */
3551         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3552         if (val != 2)
3553                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3554
3555         /* Write 1 to parser credits for CFC search request */
3556         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3557
3558         /* Wait until PRS register shows 3 packets */
3559         msleep(10 * factor);
3560         /* Wait until NIG register shows 1 packet of size 0x10 */
3561         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3562         if (val != 3)
3563                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3564
3565         /* clear NIG EOP FIFO */
3566         for (i = 0; i < 11; i++)
3567                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3568         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3569         if (val != 1) {
3570                 BNX2X_ERR("clear of NIG failed\n");
3571                 return -4;
3572         }
3573
3574         /* Reset and init BRB, PRS, NIG */
3575         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3576         msleep(50);
3577         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3578         msleep(50);
3579         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
3580         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3581 #ifndef BCM_ISCSI
3582         /* set NIC mode */
3583         REG_WR(bp, PRS_REG_NIC_MODE, 1);
3584 #endif
3585
3586         /* Enable inputs of parser neighbor blocks */
3587         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3588         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3589         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3590         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
3591
3592         DP(NETIF_MSG_HW, "done\n");
3593
3594         return 0; /* OK */
3595 }
3596
3597 static void enable_blocks_attention(struct bnx2x *bp)
3598 {
3599         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3600         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3601         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3602         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3603         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3604         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3605         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3606         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3607         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3608 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3609 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3610         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3611         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3612         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3613 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3614 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3615         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3616         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3617         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3618         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3619 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3620 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3621         REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
3622         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3623         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3624         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3625 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3626 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3627         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3628         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3629 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3630         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
3631 }
3632
3633 static int bnx2x_function_init(struct bnx2x *bp, int mode)
3634 {
3635         int func = bp->port;
3636         int port = func ? PORT1 : PORT0;
3637         u32 val, i;
3638 #ifdef USE_DMAE
3639         u32 wb_write[2];
3640 #endif
3641
3642         DP(BNX2X_MSG_MCP, "function is %d  mode is %x\n", func, mode);
3643         if ((func != 0) && (func != 1)) {
3644                 BNX2X_ERR("BAD function number (%d)\n", func);
3645                 return -ENODEV;
3646         }
3647
3648         bnx2x_gunzip_init(bp);
3649
3650         if (mode & 0x1) {       /* init common */
3651                 DP(BNX2X_MSG_MCP, "starting common init  func %d  mode %x\n",
3652                    func, mode);
3653                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
3654                        0xffffffff);
3655                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
3656                        0xfffc);
3657                 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
3658
3659                 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3660                 msleep(30);
3661                 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3662
3663                 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
3664                 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
3665
3666                 bnx2x_init_pxp(bp);
3667
3668                 if (CHIP_REV(bp) == CHIP_REV_Ax) {
3669                         /* enable HW interrupt from PXP on USDM
3670                            overflow bit 16 on INT_MASK_0 */
3671                         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3672                 }
3673
3674 #ifdef __BIG_ENDIAN
3675                 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3676                 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3677                 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3678                 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3679                 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3680                 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
3681
3682 /*              REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3683                 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3684                 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3685                 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3686                 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3687 #endif
3688
3689 #ifndef BCM_ISCSI
3690                 /* set NIC mode */
3691                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3692 #endif
3693
3694                 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
3695 #ifdef BCM_ISCSI
3696                 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3697                 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3698                 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3699 #endif
3700
3701                 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
3702
3703                 /* let the HW do it's magic ... */
3704                 msleep(100);
3705                 /* finish PXP init
3706                    (can be moved up if we want to use the DMAE) */
3707                 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3708                 if (val != 1) {
3709                         BNX2X_ERR("PXP2 CFG failed\n");
3710                         return -EBUSY;
3711                 }
3712
3713                 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3714                 if (val != 1) {
3715                         BNX2X_ERR("PXP2 RD_INIT failed\n");
3716                         return -EBUSY;
3717                 }
3718
3719                 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3720                 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3721
3722                 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
3723
3724                 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
3725                 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
3726                 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
3727                 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
3728
3729 #ifdef BNX2X_DMAE_RD
3730                 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3731                 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3732                 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3733                 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3734 #else
3735                 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
3736                 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
3737                 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
3738                 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
3739                 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
3740                 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
3741                 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
3742                 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
3743                 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
3744                 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
3745                 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
3746                 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
3747 #endif
3748                 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
3749                 /* soft reset pulse */
3750                 REG_WR(bp, QM_REG_SOFT_RESET, 1);
3751                 REG_WR(bp, QM_REG_SOFT_RESET, 0);
3752
3753 #ifdef BCM_ISCSI
3754                 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
3755 #endif
3756                 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
3757                 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
3758                 if (CHIP_REV(bp) == CHIP_REV_Ax) {
3759                         /* enable hw interrupt from doorbell Q */
3760                         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3761                 }
3762
3763                 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
3764
3765                 if (CHIP_REV_IS_SLOW(bp)) {
3766                         /* fix for emulation and FPGA for no pause */
3767                         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
3768                         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
3769                         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
3770                         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
3771                 }
3772
3773                 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3774
3775                 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
3776                 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
3777                 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
3778                 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
3779
3780                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
3781                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
3782                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
3783                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
3784
3785                 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
3786                 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
3787                 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
3788                 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
3789
3790                 /* sync semi rtc */
3791                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3792                        0x80000000);
3793                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
3794                        0x80000000);
3795
3796                 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
3797                 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
3798                 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
3799
3800                 REG_WR(bp, SRC_REG_SOFT_RST, 1);
3801                 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
3802                         REG_WR(bp, i, 0xc0cac01a);
3803                         /* TODO: replace with something meaningful */
3804                 }
3805                 /* SRCH COMMON comes here */
3806                 REG_WR(bp, SRC_REG_SOFT_RST, 0);
3807
3808                 if (sizeof(union cdu_context) != 1024) {
3809                         /* we currently assume that a context is 1024 bytes */
3810                         printk(KERN_ALERT PFX "please adjust the size of"
3811                                " cdu_context(%ld)\n",
3812                                (long)sizeof(union cdu_context));
3813                 }
3814                 val = (4 << 24) + (0 << 12) + 1024;
3815                 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
3816                 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
3817
3818                 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
3819                 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
3820
3821                 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
3822                 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
3823                                  MISC_AEU_COMMON_END);
3824                 /* RXPCS COMMON comes here */
3825                 /* EMAC0 COMMON comes here */
3826                 /* EMAC1 COMMON comes here */
3827                 /* DBU COMMON comes here */
3828                 /* DBG COMMON comes here */
3829                 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
3830
3831                 if (CHIP_REV_IS_SLOW(bp))
3832                         msleep(200);
3833
3834                 /* finish CFC init */
3835                 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
3836                 if (val != 1) {
3837                         BNX2X_ERR("CFC LL_INIT failed\n");
3838                         return -EBUSY;
3839                 }
3840
3841                 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
3842                 if (val != 1) {
3843                         BNX2X_ERR("CFC AC_INIT failed\n");
3844                         return -EBUSY;
3845                 }
3846
3847                 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
3848                 if (val != 1) {
3849                         BNX2X_ERR("CFC CAM_INIT failed\n");
3850                         return -EBUSY;
3851                 }
3852
3853                 REG_WR(bp, CFC_REG_DEBUG0, 0);
3854
3855                 /* read NIG statistic
3856                    to see if this is our first up since powerup */
3857 #ifdef BNX2X_DMAE_RD
3858                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3859                 val = *bnx2x_sp(bp, wb_data[0]);
3860 #else
3861                 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
3862                 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
3863 #endif
3864                 /* do internal memory self test */
3865                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
3866                         BNX2X_ERR("internal mem selftest failed\n");
3867                         return -EBUSY;
3868                 }
3869
3870                 /* clear PXP2 attentions */
3871                 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
3872
3873                 enable_blocks_attention(bp);
3874                 /* enable_blocks_parity(bp); */
3875
3876                 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
3877                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
3878                         /* Fan failure is indicated by SPIO 5 */
3879                         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3880                                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
3881
3882                         /* set to active low mode */
3883                         val = REG_RD(bp, MISC_REG_SPIO_INT);
3884                         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3885                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3886                         REG_WR(bp, MISC_REG_SPIO_INT, val);
3887
3888                         /* enable interrupt to signal the IGU */
3889                         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3890                         val |= (1 << MISC_REGISTERS_SPIO_5);
3891                         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3892                         break;
3893
3894                 default:
3895                         break;
3896                 }
3897
3898         } /* end of common init */
3899
3900         /* per port init */
3901
3902         /* the phys address is shifted right 12 bits and has an added
3903            1=valid bit added to the 53rd bit
3904            then since this is a wide register(TM)
3905            we split it into two 32 bit writes
3906          */
3907 #define RQ_ONCHIP_AT_PORT_SIZE  384
3908 #define ONCHIP_ADDR1(x)   ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
3909 #define ONCHIP_ADDR2(x)   ((u32)((1 << 20) | ((u64)x >> 44)))
3910 #define PXP_ONE_ILT(x)    ((x << 10) | x)
3911
3912         DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
3913
3914         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
3915
3916         /* Port PXP comes here */
3917         /* Port PXP2 comes here */
3918
3919         /* Offset is
3920          * Port0  0
3921          * Port1  384 */
3922         i = func * RQ_ONCHIP_AT_PORT_SIZE;
3923 #ifdef USE_DMAE
3924         wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
3925         wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
3926         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
3927 #else
3928         REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
3929                    ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
3930         REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
3931                    ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
3932 #endif
3933         REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
3934
3935 #ifdef BCM_ISCSI
3936         /* Port0  1
3937          * Port1  385 */
3938         i++;
3939         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
3940         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
3941         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
3942         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
3943
3944         /* Port0  2
3945          * Port1  386 */
3946         i++;
3947         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
3948         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
3949         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
3950         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
3951
3952         /* Port0  3
3953          * Port1  387 */
3954         i++;
3955         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
3956         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
3957         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
3958         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
3959 #endif
3960
3961         /* Port TCM comes here */
3962         /* Port UCM comes here */
3963         /* Port CCM comes here */
3964         bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
3965                              func ? XCM_PORT1_END : XCM_PORT0_END);
3966
3967 #ifdef USE_DMAE
3968         wb_write[0] = 0;
3969         wb_write[1] = 0;
3970 #endif
3971         for (i = 0; i < 32; i++) {
3972                 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
3973 #ifdef USE_DMAE
3974                 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
3975 #else
3976                 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
3977                 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
3978 #endif
3979         }
3980         REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
3981
3982         /* Port QM comes here */
3983
3984 #ifdef BCM_ISCSI
3985         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
3986         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
3987
3988         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
3989                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
3990 #endif
3991         /* Port DQ comes here */
3992         /* Port BRB1 comes here */
3993         bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
3994                              func ? PRS_PORT1_END : PRS_PORT0_END);
3995         /* Port TSDM comes here */
3996         /* Port CSDM comes here */
3997         /* Port USDM comes here */
3998         /* Port XSDM comes here */
3999         bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
4000                              func ? TSEM_PORT1_END : TSEM_PORT0_END);
4001         bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
4002                              func ? USEM_PORT1_END : USEM_PORT0_END);
4003         bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
4004                              func ? CSEM_PORT1_END : CSEM_PORT0_END);
4005         bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
4006                              func ? XSEM_PORT1_END : XSEM_PORT0_END);
4007         /* Port UPB comes here */
4008         /* Port XSDM comes here */
4009         bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
4010                              func ? PBF_PORT1_END : PBF_PORT0_END);
4011
4012         /* configure PBF to work without PAUSE mtu 9000 */
4013         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
4014
4015         /* update threshold */
4016         REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
4017         /* update init credit */
4018         REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
4019
4020         /* probe changes */
4021         REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
4022         msleep(5);
4023         REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
4024
4025 #ifdef BCM_ISCSI
4026         /* tell the searcher where the T2 table is */
4027         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
4028
4029         wb_write[0] = U64_LO(bp->t2_mapping);
4030         wb_write[1] = U64_HI(bp->t2_mapping);
4031         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
4032         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
4033         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
4034         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
4035
4036         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
4037         /* Port SRCH comes here */
4038 #endif
4039         /* Port CDU comes here */
4040         /* Port CFC comes here */
4041         bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
4042                              func ? HC_PORT1_END : HC_PORT0_END);
4043         bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
4044                                     MISC_AEU_PORT0_START,
4045                              func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
4046         /* Port PXPCS comes here */
4047         /* Port EMAC0 comes here */
4048         /* Port EMAC1 comes here */
4049         /* Port DBU comes here */
4050         /* Port DBG comes here */
4051         bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
4052                              func ? NIG_PORT1_END : NIG_PORT0_END);
4053         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
4054         /* Port MCP comes here */
4055         /* Port DMAE comes here */
4056
4057         switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
4058         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
4059                 /* add SPIO 5 to group 0 */
4060                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4061                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4062                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
4063                 break;
4064
4065         default:
4066                 break;
4067         }
4068
4069         bnx2x__link_reset(bp);
4070
4071         /* Reset PCIE errors for debug */
4072         REG_WR(bp, 0x2114, 0xffffffff);
4073         REG_WR(bp, 0x2120, 0xffffffff);
4074         REG_WR(bp, 0x2814, 0xffffffff);
4075
4076         /* !!! move to init_values.h */
4077         REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
4078         REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
4079         REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
4080         REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
4081
4082         REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
4083         REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
4084         REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
4085         REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
4086
4087         bnx2x_gunzip_end(bp);
4088
4089         if (!nomcp) {
4090                 port = bp->port;
4091
4092                 bp->fw_drv_pulse_wr_seq =
4093                                 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
4094                                  DRV_PULSE_SEQ_MASK);
4095                 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
4096                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  fw_mb 0x%x\n",
4097                    bp->fw_drv_pulse_wr_seq, bp->fw_mb);
4098         } else {
4099                 bp->fw_mb = 0;
4100         }
4101
4102         return 0;
4103 }
4104
4105 /* send the MCP a request, block until there is a reply */
4106 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
4107 {
4108         int port = bp->port;
4109         u32 seq = ++bp->fw_seq;
4110         u32 rc = 0;
4111
4112         SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
4113         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
4114
4115         /* let the FW do it's magic ... */
4116         msleep(100); /* TBD */
4117
4118         if (CHIP_REV_IS_SLOW(bp))
4119                 msleep(900);
4120
4121         rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
4122         DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
4123
4124         /* is this a reply to our command? */
4125         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
4126                 rc &= FW_MSG_CODE_MASK;
4127
4128         } else {
4129                 /* FW BUG! */
4130                 BNX2X_ERR("FW failed to respond!\n");
4131                 bnx2x_fw_dump(bp);
4132                 rc = 0;
4133         }
4134
4135         return rc;
4136 }
4137
4138 static void bnx2x_free_mem(struct bnx2x *bp)
4139 {
4140
4141 #define BNX2X_PCI_FREE(x, y, size) \
4142         do { \
4143                 if (x) { \
4144                         pci_free_consistent(bp->pdev, size, x, y); \
4145                         x = NULL; \
4146                         y = 0; \
4147                 } \
4148         } while (0)
4149
4150 #define BNX2X_FREE(x) \
4151         do { \
4152                 if (x) { \
4153                         vfree(x); \
4154                         x = NULL; \
4155                 } \
4156         } while (0)
4157
4158         int i;
4159
4160         /* fastpath */
4161         for_each_queue(bp, i) {
4162
4163                 /* Status blocks */
4164                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4165                                bnx2x_fp(bp, i, status_blk_mapping),
4166                                sizeof(struct host_status_block) +
4167                                sizeof(struct eth_tx_db_data));
4168
4169                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
4170                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4171                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4172                                bnx2x_fp(bp, i, tx_desc_mapping),
4173                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
4174
4175                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4176                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4177                                bnx2x_fp(bp, i, rx_desc_mapping),
4178                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4179
4180                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4181                                bnx2x_fp(bp, i, rx_comp_mapping),
4182                                sizeof(struct eth_fast_path_rx_cqe) *
4183                                NUM_RCQ_BD);
4184         }
4185
4186         BNX2X_FREE(bp->fp);
4187
4188         /* end of fastpath */
4189
4190         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4191                        (sizeof(struct host_def_status_block)));
4192
4193         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4194                        (sizeof(struct bnx2x_slowpath)));
4195
4196 #ifdef BCM_ISCSI
4197         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4198         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4199         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4200         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4201 #endif
4202         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
4203
4204 #undef BNX2X_PCI_FREE
4205 #undef BNX2X_KFREE
4206 }
4207
4208 static int bnx2x_alloc_mem(struct bnx2x *bp)
4209 {
4210
4211 #define BNX2X_PCI_ALLOC(x, y, size) \
4212         do { \
4213                 x = pci_alloc_consistent(bp->pdev, size, y); \
4214                 if (x == NULL) \
4215                         goto alloc_mem_err; \
4216                 memset(x, 0, size); \
4217         } while (0)
4218
4219 #define BNX2X_ALLOC(x, size) \
4220         do { \
4221                 x = vmalloc(size); \
4222                 if (x == NULL) \
4223                         goto alloc_mem_err; \
4224                 memset(x, 0, size); \
4225         } while (0)
4226
4227         int i;
4228
4229         /* fastpath */
4230         BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
4231
4232         for_each_queue(bp, i) {
4233                 bnx2x_fp(bp, i, bp) = bp;
4234
4235                 /* Status blocks */
4236                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4237                                 &bnx2x_fp(bp, i, status_blk_mapping),
4238                                 sizeof(struct host_status_block) +
4239                                 sizeof(struct eth_tx_db_data));
4240
4241                 bnx2x_fp(bp, i, hw_tx_prods) =
4242                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
4243
4244                 bnx2x_fp(bp, i, tx_prods_mapping) =
4245                                 bnx2x_fp(bp, i, status_blk_mapping) +
4246                                 sizeof(struct host_status_block);
4247
4248                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
4249                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4250                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4251                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4252                                 &bnx2x_fp(bp, i, tx_desc_mapping),
4253                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
4254
4255                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4256                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4257                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4258                                 &bnx2x_fp(bp, i, rx_desc_mapping),
4259                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4260
4261                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4262                                 &bnx2x_fp(bp, i, rx_comp_mapping),
4263                                 sizeof(struct eth_fast_path_rx_cqe) *
4264                                 NUM_RCQ_BD);
4265
4266         }
4267         /* end of fastpath */
4268
4269         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4270                         sizeof(struct host_def_status_block));
4271
4272         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4273                         sizeof(struct bnx2x_slowpath));
4274
4275 #ifdef BCM_ISCSI
4276         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4277
4278         /* Initialize T1 */
4279         for (i = 0; i < 64*1024; i += 64) {
4280                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
4281                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
4282         }
4283
4284         /* allocate searcher T2 table
4285            we allocate 1/4 of alloc num for T2
4286           (which is not entered into the ILT) */
4287         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4288
4289         /* Initialize T2 */
4290         for (i = 0; i < 16*1024; i += 64)
4291                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4292
4293         /* now fixup the last line in the block to point to the next block */
4294         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
4295
4296         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
4297         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4298
4299         /* QM queues (128*MAX_CONN) */
4300         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4301 #endif
4302
4303         /* Slow path ring */
4304         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4305
4306         return 0;
4307
4308 alloc_mem_err:
4309         bnx2x_free_mem(bp);
4310         return -ENOMEM;
4311
4312 #undef BNX2X_PCI_ALLOC
4313 #undef BNX2X_ALLOC
4314 }
4315
4316 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
4317 {
4318         int i;
4319
4320         for_each_queue(bp, i) {
4321                 struct bnx2x_fastpath *fp = &bp->fp[i];
4322
4323                 u16 bd_cons = fp->tx_bd_cons;
4324                 u16 sw_prod = fp->tx_pkt_prod;
4325                 u16 sw_cons = fp->tx_pkt_cons;
4326
4327                 BUG_TRAP(fp->tx_buf_ring != NULL);
4328
4329                 while (sw_cons != sw_prod) {
4330                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
4331                         sw_cons++;
4332                 }
4333         }
4334 }
4335
4336 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
4337 {
4338         int i, j;
4339
4340         for_each_queue(bp, j) {
4341                 struct bnx2x_fastpath *fp = &bp->fp[j];
4342
4343                 BUG_TRAP(fp->rx_buf_ring != NULL);
4344
4345                 for (i = 0; i < NUM_RX_BD; i++) {
4346                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
4347                         struct sk_buff *skb = rx_buf->skb;
4348
4349                         if (skb == NULL)
4350                                 continue;
4351
4352                         pci_unmap_single(bp->pdev,
4353                                          pci_unmap_addr(rx_buf, mapping),
4354                                          bp->rx_buf_use_size,
4355                                          PCI_DMA_FROMDEVICE);
4356
4357                         rx_buf->skb = NULL;
4358                         dev_kfree_skb(skb);
4359                 }
4360         }
4361 }
4362
4363 static void bnx2x_free_skbs(struct bnx2x *bp)
4364 {
4365         bnx2x_free_tx_skbs(bp);
4366         bnx2x_free_rx_skbs(bp);
4367 }
4368
4369 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
4370 {
4371         int i;
4372
4373         free_irq(bp->msix_table[0].vector, bp->dev);
4374         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
4375            bp->msix_table[0].vector);
4376
4377         for_each_queue(bp, i) {
4378                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
4379                    "state(%x)\n", i, bp->msix_table[i + 1].vector,
4380                    bnx2x_fp(bp, i, state));
4381
4382                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
4383                         BNX2X_ERR("IRQ of fp #%d being freed while "
4384                                   "state != closed\n", i);
4385
4386                 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
4387         }
4388
4389 }
4390
4391 static void bnx2x_free_irq(struct bnx2x *bp)
4392 {
4393
4394         if (bp->flags & USING_MSIX_FLAG) {
4395
4396                 bnx2x_free_msix_irqs(bp);
4397                 pci_disable_msix(bp->pdev);
4398
4399                 bp->flags &= ~USING_MSIX_FLAG;
4400
4401         } else
4402                 free_irq(bp->pdev->irq, bp->dev);
4403 }
4404
4405 static int bnx2x_enable_msix(struct bnx2x *bp)
4406 {
4407
4408         int i;
4409
4410         bp->msix_table[0].entry = 0;
4411         for_each_queue(bp, i)
4412                 bp->msix_table[i + 1].entry = i + 1;
4413
4414         if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
4415                                      bp->num_queues + 1)){
4416                 BNX2X_LOG("failed to enable MSI-X\n");
4417                 return -1;
4418
4419         }
4420
4421         bp->flags |= USING_MSIX_FLAG;
4422
4423         return 0;
4424
4425 }
4426
4427
4428 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
4429 {
4430
4431         int i, rc;
4432
4433         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
4434                          bp->dev->name, bp->dev);
4435
4436         if (rc) {
4437                 BNX2X_ERR("request sp irq failed\n");
4438                 return -EBUSY;
4439         }
4440
4441         for_each_queue(bp, i) {
4442                 rc = request_irq(bp->msix_table[i + 1].vector,
4443                                  bnx2x_msix_fp_int, 0,
4444                                  bp->dev->name, &bp->fp[i]);
4445
4446                 if (rc) {
4447                         BNX2X_ERR("request fp #%d irq failed  "
4448                                   "rc %d\n", i, rc);
4449                         bnx2x_free_msix_irqs(bp);
4450                         return -EBUSY;
4451                 }
4452
4453                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
4454
4455         }
4456
4457         return 0;
4458
4459 }
4460
4461 static int bnx2x_req_irq(struct bnx2x *bp)
4462 {
4463
4464         int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
4465                              IRQF_SHARED, bp->dev->name, bp->dev);
4466         if (!rc)
4467                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
4468
4469         return rc;
4470
4471 }
4472
4473 /*
4474  * Init service functions
4475  */
4476
4477 static void bnx2x_set_mac_addr(struct bnx2x *bp)
4478 {
4479         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4480
4481         /* CAM allocation
4482          * unicasts 0-31:port0 32-63:port1
4483          * multicast 64-127:port0 128-191:port1
4484          */
4485         config->hdr.length_6b = 2;
4486         config->hdr.offset = bp->port ? 31 : 0;
4487         config->hdr.reserved0 = 0;
4488         config->hdr.reserved1 = 0;
4489
4490         /* primary MAC */
4491         config->config_table[0].cam_entry.msb_mac_addr =
4492                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
4493         config->config_table[0].cam_entry.middle_mac_addr =
4494                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
4495         config->config_table[0].cam_entry.lsb_mac_addr =
4496                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
4497         config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
4498         config->config_table[0].target_table_entry.flags = 0;
4499         config->config_table[0].target_table_entry.client_id = 0;
4500         config->config_table[0].target_table_entry.vlan_id = 0;
4501
4502         DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
4503            config->config_table[0].cam_entry.msb_mac_addr,
4504            config->config_table[0].cam_entry.middle_mac_addr,
4505            config->config_table[0].cam_entry.lsb_mac_addr);
4506
4507         /* broadcast */
4508         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
4509         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
4510         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
4511         config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
4512         config->config_table[1].target_table_entry.flags =
4513                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4514         config->config_table[1].target_table_entry.client_id = 0;
4515         config->config_table[1].target_table_entry.vlan_id = 0;
4516
4517         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4518                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4519                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4520 }
4521
4522 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4523                              int *state_p, int poll)
4524 {
4525         /* can take a while if any port is running */
4526         int timeout = 500;
4527
4528         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4529            poll ? "polling" : "waiting", state, idx);
4530
4531         might_sleep();
4532
4533         while (timeout) {
4534
4535                 if (poll) {
4536                         bnx2x_rx_int(bp->fp, 10);
4537                         /* If index is different from 0
4538                          * The reply for some commands will
4539                          * be on the none default queue
4540                          */
4541                         if (idx)
4542                                 bnx2x_rx_int(&bp->fp[idx], 10);
4543                 }
4544
4545                 mb(); /* state is changed by bnx2x_sp_event()*/
4546
4547                 if (*state_p == state)
4548                         return 0;
4549
4550                 timeout--;
4551                 msleep(1);
4552
4553         }
4554
4555         /* timeout! */
4556         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4557                   poll ? "polling" : "waiting", state, idx);
4558
4559         return -EBUSY;
4560 }
4561
4562 static int bnx2x_setup_leading(struct bnx2x *bp)
4563 {
4564
4565         /* reset IGU state */
4566         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4567
4568         /* SETUP ramrod */
4569         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4570
4571         return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4572
4573 }
4574
4575 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
4576 {
4577
4578         /* reset IGU state */
4579         bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4580
4581         /* SETUP ramrod */
4582         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
4583         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
4584
4585         /* Wait for completion */
4586         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
4587                                  &(bp->fp[index].state), 0);
4588
4589 }
4590
4591
4592 static int bnx2x_poll(struct napi_struct *napi, int budget);
4593 static void bnx2x_set_rx_mode(struct net_device *dev);
4594
4595 static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
4596 {
4597         u32 load_code;
4598         int i;
4599
4600         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
4601
4602         /* Send LOAD_REQUEST command to MCP.
4603            Returns the type of LOAD command: if it is the
4604            first port to be initialized common blocks should be
4605            initialized, otherwise - not.
4606         */
4607         if (!nomcp) {
4608                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
4609                 if (!load_code) {
4610                         BNX2X_ERR("MCP response failure, unloading\n");
4611                         return -EBUSY;
4612                 }
4613                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
4614                         BNX2X_ERR("MCP refused load request, unloading\n");
4615                         return -EBUSY; /* other port in diagnostic mode */
4616                 }
4617         } else {
4618                 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
4619         }
4620
4621         /* if we can't use msix we only need one fp,
4622          * so try to enable msix with the requested number of fp's
4623          * and fallback to inta with one fp
4624          */
4625         if (req_irq) {
4626                 if (use_inta) {
4627                         bp->num_queues = 1;
4628                 } else {
4629                         if ((use_multi > 1) && (use_multi <= 16))
4630                                 /* user requested number */
4631                                 bp->num_queues = use_multi;
4632                         else if (use_multi == 1)
4633                                 bp->num_queues = num_online_cpus();
4634                         else
4635                                 bp->num_queues = 1;
4636
4637                         if (bnx2x_enable_msix(bp)) {
4638                                 /* failed to enable msix */
4639                                 bp->num_queues = 1;
4640                                 if (use_multi)
4641                                         BNX2X_ERR("Multi requested but failed"
4642                                                   " to enable MSI-X\n");
4643                         }
4644                 }
4645         }
4646
4647         DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
4648
4649         if (bnx2x_alloc_mem(bp))
4650                 return -ENOMEM;
4651
4652         if (req_irq) {
4653                 if (bp->flags & USING_MSIX_FLAG) {
4654                         if (bnx2x_req_msix_irqs(bp)) {
4655                                 pci_disable_msix(bp->pdev);
4656                                 goto load_error;
4657                         }
4658
4659                 } else {
4660                         if (bnx2x_req_irq(bp)) {
4661                                 BNX2X_ERR("IRQ request failed, aborting\n");
4662                                 goto load_error;
4663                         }
4664                 }
4665         }
4666
4667         for_each_queue(bp, i)
4668                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
4669                                bnx2x_poll, 128);
4670
4671
4672         /* Initialize HW */
4673         if (bnx2x_function_init(bp,
4674                                 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON))) {
4675                 BNX2X_ERR("HW init failed, aborting\n");
4676                 goto load_error;
4677         }
4678
4679
4680         atomic_set(&bp->intr_sem, 0);
4681
4682
4683         /* Setup NIC internals and enable interrupts */
4684         bnx2x_nic_init(bp);
4685
4686         /* Send LOAD_DONE command to MCP */
4687         if (!nomcp) {
4688                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
4689                 if (!load_code) {
4690                         BNX2X_ERR("MCP response failure, unloading\n");
4691                         goto load_int_disable;
4692                 }
4693         }
4694
4695         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
4696
4697         /* Enable Rx interrupt handling before sending the ramrod
4698            as it's completed on Rx FP queue */
4699         for_each_queue(bp, i)
4700                 napi_enable(&bnx2x_fp(bp, i, napi));
4701
4702         if (bnx2x_setup_leading(bp))
4703                 goto load_stop_netif;
4704
4705         for_each_nondefault_queue(bp, i)
4706                 if (bnx2x_setup_multi(bp, i))
4707                         goto load_stop_netif;
4708
4709         bnx2x_set_mac_addr(bp);
4710
4711         bnx2x_initial_phy_init(bp);
4712
4713         /* Start fast path */
4714         if (req_irq) { /* IRQ is only requested from bnx2x_open */
4715                 netif_start_queue(bp->dev);
4716                 if (bp->flags & USING_MSIX_FLAG)
4717                         printk(KERN_INFO PFX "%s: using MSI-X\n",
4718                                bp->dev->name);
4719
4720         /* Otherwise Tx queue should be only reenabled */
4721         } else if (netif_running(bp->dev)) {
4722                 netif_wake_queue(bp->dev);
4723                 bnx2x_set_rx_mode(bp->dev);
4724         }
4725
4726         /* start the timer */
4727         mod_timer(&bp->timer, jiffies + bp->current_interval);
4728
4729         return 0;
4730
4731 load_stop_netif:
4732         for_each_queue(bp, i)
4733                 napi_disable(&bnx2x_fp(bp, i, napi));
4734
4735 load_int_disable:
4736         bnx2x_int_disable_sync(bp);
4737
4738         bnx2x_free_skbs(bp);
4739         bnx2x_free_irq(bp);
4740
4741 load_error:
4742         bnx2x_free_mem(bp);
4743
4744         /* TBD we really need to reset the chip
4745            if we want to recover from this */
4746         return -EBUSY;
4747 }
4748
4749
4750 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
4751 {
4752         int port = bp->port;
4753 #ifdef USE_DMAE
4754         u32 wb_write[2];
4755 #endif
4756         int base, i;
4757
4758         DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
4759
4760         /* Do not rcv packets to BRB */
4761         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
4762         /* Do not direct rcv packets that are not for MCP to the BRB */
4763         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
4764                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
4765
4766         /* Configure IGU and AEU */
4767         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
4768         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
4769
4770         /* TODO: Close Doorbell port? */
4771
4772         /* Clear ILT */
4773 #ifdef USE_DMAE
4774         wb_write[0] = 0;
4775         wb_write[1] = 0;
4776 #endif
4777         base = port * RQ_ONCHIP_AT_PORT_SIZE;
4778         for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
4779 #ifdef USE_DMAE
4780                 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
4781 #else
4782                 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
4783                 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
4784 #endif
4785         }
4786
4787         if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
4788                 /* reset_common */
4789                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4790                        0xd3ffff7f);
4791                 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
4792                        0x1403);
4793         }
4794 }
4795
4796 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
4797 {
4798
4799         int rc;
4800
4801         /* halt the connection */
4802         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
4803         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
4804
4805
4806         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
4807                                        &(bp->fp[index].state), 1);
4808         if (rc) /* timeout */
4809                 return rc;
4810
4811         /* delete cfc entry */
4812         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
4813
4814         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
4815                                  &(bp->fp[index].state), 1);
4816
4817 }
4818
4819
4820 static void bnx2x_stop_leading(struct bnx2x *bp)
4821 {
4822         u16 dsb_sp_prod_idx;
4823         /* if the other port is handling traffic,
4824            this can take a lot of time */
4825         int timeout = 500;
4826
4827         might_sleep();
4828
4829         /* Send HALT ramrod */
4830         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
4831         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
4832
4833         if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
4834                                &(bp->fp[0].state), 1))
4835                 return;
4836
4837         dsb_sp_prod_idx = *bp->dsb_sp_prod;
4838
4839         /* Send PORT_DELETE ramrod */
4840         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
4841
4842         /* Wait for completion to arrive on default status block
4843            we are going to reset the chip anyway
4844            so there is not much to do if this times out
4845          */
4846         while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
4847                 timeout--;
4848                 msleep(1);
4849         }
4850         if (!timeout) {
4851                 DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
4852                    "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
4853                    *bp->dsb_sp_prod, dsb_sp_prod_idx);
4854         }
4855         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
4856         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
4857 }
4858
4859
4860 static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq)
4861 {
4862         u32 reset_code = 0;
4863         int i, timeout;
4864
4865         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
4866
4867         del_timer_sync(&bp->timer);
4868
4869         bp->rx_mode = BNX2X_RX_MODE_NONE;
4870         bnx2x_set_storm_rx_mode(bp);
4871
4872         if (netif_running(bp->dev)) {
4873                 netif_tx_disable(bp->dev);
4874                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
4875         }
4876
4877         /* Wait until all fast path tasks complete */
4878         for_each_queue(bp, i) {
4879                 struct bnx2x_fastpath *fp = &bp->fp[i];
4880
4881                 timeout = 1000;
4882                 while (bnx2x_has_work(fp) && (timeout--))
4883                         msleep(1);
4884                 if (!timeout)
4885                         BNX2X_ERR("timeout waiting for queue[%d]\n", i);
4886         }
4887
4888         /* Wait until stat ramrod returns and all SP tasks complete */
4889         timeout = 1000;
4890         while ((bp->stat_pending || (bp->spq_left != MAX_SPQ_PENDING)) &&
4891                (timeout--))
4892                 msleep(1);
4893
4894         for_each_queue(bp, i)
4895                 napi_disable(&bnx2x_fp(bp, i, napi));
4896         /* Disable interrupts after Tx and Rx are disabled on stack level */
4897         bnx2x_int_disable_sync(bp);
4898
4899         if (bp->flags & NO_WOL_FLAG)
4900                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
4901
4902         else if (bp->wol) {
4903                 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
4904                 u8 *mac_addr = bp->dev->dev_addr;
4905                 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
4906                            EMAC_MODE_ACPI_RCVD);
4907
4908                 EMAC_WR(EMAC_REG_EMAC_MODE, val);
4909
4910                 val = (mac_addr[0] << 8) | mac_addr[1];
4911                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
4912
4913                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
4914                       (mac_addr[4] << 8) | mac_addr[5];
4915                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
4916
4917                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
4918
4919         } else
4920                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
4921
4922         /* Close multi and leading connections */
4923         for_each_nondefault_queue(bp, i)
4924                 if (bnx2x_stop_multi(bp, i))
4925                         goto unload_error;
4926
4927         bnx2x_stop_leading(bp);
4928         if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
4929             (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
4930                 DP(NETIF_MSG_IFDOWN, "failed to close leading properly!"
4931                    "state 0x%x  fp[0].state 0x%x",
4932                    bp->state, bp->fp[0].state);
4933         }
4934
4935 unload_error:
4936         bnx2x__link_reset(bp);
4937
4938         if (!nomcp)
4939                 reset_code = bnx2x_fw_command(bp, reset_code);
4940         else
4941                 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
4942
4943         /* Release IRQs */
4944         if (free_irq)
4945                 bnx2x_free_irq(bp);
4946
4947         /* Reset the chip */
4948         bnx2x_reset_chip(bp, reset_code);
4949
4950         /* Report UNLOAD_DONE to MCP */
4951         if (!nomcp)
4952                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
4953
4954         /* Free SKBs and driver internals */
4955         bnx2x_free_skbs(bp);
4956         bnx2x_free_mem(bp);
4957
4958         bp->state = BNX2X_STATE_CLOSED;
4959
4960         netif_carrier_off(bp->dev);
4961
4962         return 0;
4963 }
4964
4965 /* end of nic load/unload */
4966
4967 /* ethtool_ops */
4968
4969 /*
4970  * Init service functions
4971  */
4972
4973 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
4974 {
4975         int port = bp->port;
4976         u32 ext_phy_type;
4977
4978         switch (switch_cfg) {
4979         case SWITCH_CFG_1G:
4980                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
4981
4982                 ext_phy_type =
4983                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
4984                 switch (ext_phy_type) {
4985                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
4986                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
4987                                        ext_phy_type);
4988
4989                         bp->supported |= (SUPPORTED_10baseT_Half |
4990                                           SUPPORTED_10baseT_Full |
4991                                           SUPPORTED_100baseT_Half |
4992                                           SUPPORTED_100baseT_Full |
4993                                           SUPPORTED_1000baseT_Full |
4994                                           SUPPORTED_2500baseX_Full |
4995                                           SUPPORTED_TP | SUPPORTED_FIBRE |
4996                                           SUPPORTED_Autoneg |
4997                                           SUPPORTED_Pause |
4998                                           SUPPORTED_Asym_Pause);
4999                         break;
5000
5001                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
5002                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
5003                                        ext_phy_type);
5004
5005                         bp->supported |= (SUPPORTED_10baseT_Half |
5006                                           SUPPORTED_10baseT_Full |
5007                                           SUPPORTED_100baseT_Half |
5008                                           SUPPORTED_100baseT_Full |
5009                                           SUPPORTED_1000baseT_Full |
5010                                           SUPPORTED_TP | SUPPORTED_FIBRE |
5011                                           SUPPORTED_Autoneg |
5012                                           SUPPORTED_Pause |
5013                                           SUPPORTED_Asym_Pause);
5014                         break;
5015
5016                 default:
5017                         BNX2X_ERR("NVRAM config error. "
5018                                   "BAD SerDes ext_phy_config 0x%x\n",
5019                                   bp->link_params.ext_phy_config);
5020                         return;
5021                 }
5022
5023                 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
5024                                       port*0x10);
5025                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
5026                 break;
5027
5028         case SWITCH_CFG_10G:
5029                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
5030
5031                 ext_phy_type =
5032                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
5033                 switch (ext_phy_type) {
5034                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5035                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
5036                                        ext_phy_type);
5037
5038                         bp->supported |= (SUPPORTED_10baseT_Half |
5039                                           SUPPORTED_10baseT_Full |
5040                                           SUPPORTED_100baseT_Half |
5041                                           SUPPORTED_100baseT_Full |
5042                                           SUPPORTED_1000baseT_Full |
5043                                           SUPPORTED_2500baseX_Full |
5044                                           SUPPORTED_10000baseT_Full |
5045                                           SUPPORTED_TP | SUPPORTED_FIBRE |
5046                                           SUPPORTED_Autoneg |
5047                                           SUPPORTED_Pause |
5048                                           SUPPORTED_Asym_Pause);
5049                         break;
5050
5051                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
5052                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
5053                                         ext_phy_type);
5054
5055                         bp->supported |= (SUPPORTED_10000baseT_Full |
5056                                           SUPPORTED_FIBRE |
5057                                           SUPPORTED_Pause |
5058                                           SUPPORTED_Asym_Pause);
5059                         break;
5060
5061                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
5062                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
5063                                        ext_phy_type);
5064
5065                         bp->supported |= (SUPPORTED_10000baseT_Full |
5066                                           SUPPORTED_1000baseT_Full |
5067                                           SUPPORTED_Autoneg |
5068                                           SUPPORTED_FIBRE |
5069                                           SUPPORTED_Pause |
5070                                           SUPPORTED_Asym_Pause);
5071                         break;
5072
5073                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5074                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
5075                                        ext_phy_type);
5076
5077                         bp->supported |= (SUPPORTED_10000baseT_Full |
5078                                           SUPPORTED_1000baseT_Full |
5079                                           SUPPORTED_FIBRE |
5080                                           SUPPORTED_Autoneg |
5081                                           SUPPORTED_Pause |
5082                                           SUPPORTED_Asym_Pause);
5083                         break;
5084
5085                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5086                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
5087                                        ext_phy_type);
5088
5089                         bp->supported |= (SUPPORTED_10000baseT_Full |
5090                                           SUPPORTED_2500baseX_Full |
5091                                           SUPPORTED_1000baseT_Full |
5092                                           SUPPORTED_FIBRE |
5093                                           SUPPORTED_Autoneg |
5094                                           SUPPORTED_Pause |
5095                                           SUPPORTED_Asym_Pause);
5096                         break;
5097
5098                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5099                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
5100                                        ext_phy_type);
5101
5102                         bp->supported |= (SUPPORTED_10000baseT_Full |
5103                                           SUPPORTED_TP |
5104                                           SUPPORTED_Autoneg |
5105                                           SUPPORTED_Pause |
5106                                           SUPPORTED_Asym_Pause);
5107                         break;
5108
5109                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
5110                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
5111                                   bp->link_params.ext_phy_config);
5112                         break;
5113
5114                 default:
5115                         BNX2X_ERR("NVRAM config error. "
5116                                   "BAD XGXS ext_phy_config 0x%x\n",
5117                                   bp->link_params.ext_phy_config);
5118                         return;
5119                 }
5120
5121                 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
5122                                       port*0x18);
5123                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
5124
5125                 break;
5126
5127         default:
5128                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
5129                           bp->link_config);
5130                 return;
5131         }
5132         bp->link_params.phy_addr = bp->phy_addr;
5133
5134         /* mask what we support according to speed_cap_mask */
5135         if (!(bp->link_params.speed_cap_mask &
5136                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
5137                 bp->supported &= ~SUPPORTED_10baseT_Half;
5138
5139         if (!(bp->link_params.speed_cap_mask &
5140                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
5141                 bp->supported &= ~SUPPORTED_10baseT_Full;
5142
5143         if (!(bp->link_params.speed_cap_mask &
5144                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
5145                 bp->supported &= ~SUPPORTED_100baseT_Half;
5146
5147         if (!(bp->link_params.speed_cap_mask &
5148                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
5149                 bp->supported &= ~SUPPORTED_100baseT_Full;
5150
5151         if (!(bp->link_params.speed_cap_mask &
5152                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
5153                 bp->supported &= ~(SUPPORTED_1000baseT_Half |
5154                                    SUPPORTED_1000baseT_Full);
5155
5156         if (!(bp->link_params.speed_cap_mask &
5157                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
5158                 bp->supported &= ~SUPPORTED_2500baseX_Full;
5159
5160         if (!(bp->link_params.speed_cap_mask &
5161                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
5162                 bp->supported &= ~SUPPORTED_10000baseT_Full;
5163
5164         BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
5165 }
5166
5167 static void bnx2x_link_settings_requested(struct bnx2x *bp)
5168 {
5169         bp->link_params.req_duplex = DUPLEX_FULL;
5170
5171         switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
5172         case PORT_FEATURE_LINK_SPEED_AUTO:
5173                 if (bp->supported & SUPPORTED_Autoneg) {
5174                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
5175                         bp->advertising = bp->supported;
5176                 } else {
5177                         u32 ext_phy_type =
5178                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
5179
5180                         if ((ext_phy_type ==
5181                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
5182                             (ext_phy_type ==
5183                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
5184                                 /* force 10G, no AN */
5185                                 bp->link_params.req_line_speed = SPEED_10000;
5186                                 bp->advertising =
5187                                                 (ADVERTISED_10000baseT_Full |
5188                                                  ADVERTISED_FIBRE);
5189                                 break;
5190                         }
5191                         BNX2X_ERR("NVRAM config error. "
5192                                   "Invalid link_config 0x%x"
5193                                   "  Autoneg not supported\n",
5194                                   bp->link_config);
5195                         return;
5196                 }
5197                 break;
5198
5199         case PORT_FEATURE_LINK_SPEED_10M_FULL:
5200                 if (bp->supported & SUPPORTED_10baseT_Full) {
5201                         bp->link_params.req_line_speed = SPEED_10;
5202                         bp->advertising = (ADVERTISED_10baseT_Full |
5203                                            ADVERTISED_TP);
5204                 } else {
5205                         BNX2X_ERR("NVRAM config error. "
5206                                   "Invalid link_config 0x%x"
5207                                   "  speed_cap_mask 0x%x\n",
5208                                   bp->link_config,
5209                                   bp->link_params.speed_cap_mask);
5210                         return;
5211                 }
5212                 break;
5213
5214         case PORT_FEATURE_LINK_SPEED_10M_HALF:
5215                 if (bp->supported & SUPPORTED_10baseT_Half) {
5216                         bp->link_params.req_line_speed = SPEED_10;
5217                         bp->link_params.req_duplex = DUPLEX_HALF;
5218                         bp->advertising = (ADVERTISED_10baseT_Half |
5219                                            ADVERTISED_TP);
5220                 } else {
5221                         BNX2X_ERR("NVRAM config error. "
5222                                   "Invalid link_config 0x%x"
5223                                   "  speed_cap_mask 0x%x\n",
5224                                   bp->link_config,
5225                                   bp->link_params.speed_cap_mask);
5226                         return;
5227                 }
5228                 break;
5229
5230         case PORT_FEATURE_LINK_SPEED_100M_FULL:
5231                 if (bp->supported & SUPPORTED_100baseT_Full) {
5232                         bp->link_params.req_line_speed = SPEED_100;
5233                         bp->advertising = (ADVERTISED_100baseT_Full |
5234                                            ADVERTISED_TP);
5235                 } else {
5236                         BNX2X_ERR("NVRAM config error. "
5237                                   "Invalid link_config 0x%x"
5238                                   "  speed_cap_mask 0x%x\n",
5239                                   bp->link_config,
5240                                   bp->link_params.speed_cap_mask);
5241                         return;
5242                 }
5243                 break;
5244
5245         case PORT_FEATURE_LINK_SPEED_100M_HALF:
5246                 if (bp->supported & SUPPORTED_100baseT_Half) {
5247                         bp->link_params.req_line_speed = SPEED_100;
5248                         bp->link_params.req_duplex = DUPLEX_HALF;
5249                         bp->advertising = (ADVERTISED_100baseT_Half |
5250                                            ADVERTISED_TP);
5251                 } else {
5252                         BNX2X_ERR("NVRAM config error. "
5253                                   "Invalid link_config 0x%x"
5254                                   "  speed_cap_mask 0x%x\n",
5255                                   bp->link_config,
5256                                   bp->link_params.speed_cap_mask);
5257                         return;
5258                 }
5259                 break;
5260
5261         case PORT_FEATURE_LINK_SPEED_1G:
5262                 if (bp->supported & SUPPORTED_1000baseT_Full) {
5263                         bp->link_params.req_line_speed = SPEED_1000;
5264                         bp->advertising = (ADVERTISED_1000baseT_Full |
5265                                            ADVERTISED_TP);
5266                 } else {
5267                         BNX2X_ERR("NVRAM config error. "
5268                                   "Invalid link_config 0x%x"
5269                                   "  speed_cap_mask 0x%x\n",
5270                                   bp->link_config,
5271                                   bp->link_params.speed_cap_mask);
5272                         return;
5273                 }
5274                 break;
5275
5276         case PORT_FEATURE_LINK_SPEED_2_5G:
5277                 if (bp->supported & SUPPORTED_2500baseX_Full) {
5278                         bp->link_params.req_line_speed = SPEED_2500;
5279                         bp->advertising = (ADVERTISED_2500baseX_Full |
5280                                            ADVERTISED_TP);
5281                 } else {
5282                         BNX2X_ERR("NVRAM config error. "
5283                                   "Invalid link_config 0x%x"
5284                                   "  speed_cap_mask 0x%x\n",
5285                                   bp->link_config,
5286                                   bp->link_params.speed_cap_mask);
5287                         return;
5288                 }
5289                 break;
5290
5291         case PORT_FEATURE_LINK_SPEED_10G_CX4:
5292         case PORT_FEATURE_LINK_SPEED_10G_KX4:
5293         case PORT_FEATURE_LINK_SPEED_10G_KR:
5294                 if (bp->supported & SUPPORTED_10000baseT_Full) {
5295                         bp->link_params.req_line_speed = SPEED_10000;
5296                         bp->advertising = (ADVERTISED_10000baseT_Full |
5297                                            ADVERTISED_FIBRE);
5298                 } else {
5299                         BNX2X_ERR("NVRAM config error. "
5300                                   "Invalid link_config 0x%x"
5301                                   "  speed_cap_mask 0x%x\n",
5302                                   bp->link_config,
5303                                   bp->link_params.speed_cap_mask);
5304                         return;
5305                 }
5306                 break;
5307
5308         default:
5309                 BNX2X_ERR("NVRAM config error. "
5310                           "BAD link speed link_config 0x%x\n",
5311                           bp->link_config);
5312                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
5313                 bp->advertising = bp->supported;
5314                 break;
5315         }
5316
5317         bp->link_params.req_flow_ctrl = (bp->link_config &
5318                              PORT_FEATURE_FLOW_CONTROL_MASK);
5319         if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
5320             (!bp->supported & SUPPORTED_Autoneg))
5321                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
5322
5323         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
5324                        "  advertising 0x%x\n",
5325                        bp->link_params.req_line_speed,
5326                        bp->link_params.req_duplex,
5327                        bp->link_params.req_flow_ctrl, bp->advertising);
5328 }
5329
5330 static void bnx2x_get_hwinfo(struct bnx2x *bp)
5331 {
5332         u32 val, val2, val3, val4, id;
5333         int port = bp->port;
5334
5335         bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5336         BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
5337
5338         /* Get the chip revision id and number. */
5339         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5340         val = REG_RD(bp, MISC_REG_CHIP_NUM);
5341         id = ((val & 0xffff) << 16);
5342         val = REG_RD(bp, MISC_REG_CHIP_REV);
5343         id |= ((val & 0xf) << 12);
5344         val = REG_RD(bp, MISC_REG_CHIP_METAL);
5345         id |= ((val & 0xff) << 4);
5346         REG_RD(bp, MISC_REG_BOND_ID);
5347         id |= (val & 0xf);
5348         bp->chip_id = id;
5349         BNX2X_DEV_INFO("chip ID is %x\n", id);
5350
5351         bp->link_params.bp = bp;
5352
5353         if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
5354                 BNX2X_DEV_INFO("MCP not active\n");
5355                 nomcp = 1;
5356                 goto set_mac;
5357         }
5358
5359         val = SHMEM_RD(bp, validity_map[port]);
5360         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5361                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5362                 BNX2X_ERR("BAD MCP validity signature\n");
5363
5364         bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
5365                       DRV_MSG_SEQ_NUMBER_MASK);
5366
5367         bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
5368         bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
5369         bp->link_params.serdes_config =
5370                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
5371         bp->link_params.lane_config =
5372                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
5373         bp->link_params.ext_phy_config =
5374                 SHMEM_RD(bp,
5375                          dev_info.port_hw_config[port].external_phy_config);
5376         bp->link_params.speed_cap_mask =
5377                 SHMEM_RD(bp,
5378                          dev_info.port_hw_config[port].speed_capability_mask);
5379
5380         bp->link_config =
5381                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
5382
5383         BNX2X_DEV_INFO("serdes_config (%08x)  lane_config (%08x)\n"
5384              KERN_INFO "  ext_phy_config (%08x)  speed_cap_mask (%08x)"
5385                        "  link_config (%08x)\n",
5386                        bp->link_params.serdes_config,
5387                        bp->link_params.lane_config,
5388                        bp->link_params.ext_phy_config,
5389                        bp->link_params.speed_cap_mask,
5390                        bp->link_config);
5391
5392         bp->link_params.switch_cfg = (bp->link_config &
5393                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
5394         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
5395
5396         bnx2x_link_settings_requested(bp);
5397
5398         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
5399         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
5400         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
5401         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
5402         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
5403         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
5404         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
5405         bp->dev->dev_addr[5] = (u8)(val & 0xff);
5406         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
5407         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
5408
5409
5410
5411         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
5412         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
5413         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
5414         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
5415
5416         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
5417                val, val2, val3, val4);
5418
5419         /* bc ver */
5420         if (!nomcp) {
5421                 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
5422                 BNX2X_DEV_INFO("bc_ver %X\n", val);
5423                 if (val < BNX2X_BC_VER) {
5424                         /* for now only warn
5425                          * later we might need to enforce this */
5426                         BNX2X_ERR("This driver needs bc_ver %X but found %X,"
5427                                   " please upgrade BC\n", BNX2X_BC_VER, val);
5428                 }
5429         } else {
5430                 bp->bc_ver = 0;
5431         }
5432
5433         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5434         bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
5435         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5436                        bp->flash_size, bp->flash_size);
5437
5438         return;
5439
5440 set_mac: /* only supposed to happen on emulation/FPGA */
5441         BNX2X_ERR("warning rendom MAC workaround active\n");
5442         random_ether_addr(bp->dev->dev_addr);
5443         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
5444
5445 }
5446
5447 /*
5448  * ethtool service functions
5449  */
5450
5451 /* All ethtool functions called with rtnl_lock */
5452
5453 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5454 {
5455         struct bnx2x *bp = netdev_priv(dev);
5456
5457         cmd->supported = bp->supported;
5458         cmd->advertising = bp->advertising;
5459
5460         if (netif_carrier_ok(dev)) {
5461                 cmd->speed = bp->link_vars.line_speed;
5462                 cmd->duplex = bp->link_vars.duplex;
5463         } else {
5464                 cmd->speed = bp->link_params.req_line_speed;
5465                 cmd->duplex = bp->link_params.req_duplex;
5466         }
5467
5468         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
5469                 u32 ext_phy_type =
5470                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
5471
5472                 switch (ext_phy_type) {
5473                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5474                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
5475                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
5476                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5477                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5478                         cmd->port = PORT_FIBRE;
5479                         break;
5480
5481                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5482                         cmd->port = PORT_TP;
5483                         break;
5484
5485                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
5486                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
5487                                   bp->link_params.ext_phy_config);
5488                         break;
5489
5490                 default:
5491                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
5492                            bp->link_params.ext_phy_config);
5493                         break;
5494                 }
5495         } else
5496                 cmd->port = PORT_TP;
5497
5498         cmd->phy_address = bp->phy_addr;
5499         cmd->transceiver = XCVR_INTERNAL;
5500
5501         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
5502                 cmd->autoneg = AUTONEG_ENABLE;
5503         else
5504                 cmd->autoneg = AUTONEG_DISABLE;
5505
5506         cmd->maxtxpkt = 0;
5507         cmd->maxrxpkt = 0;
5508
5509         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
5510            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
5511            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
5512            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
5513            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
5514            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
5515            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
5516
5517         return 0;
5518 }
5519
5520 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5521 {
5522         struct bnx2x *bp = netdev_priv(dev);
5523         u32 advertising;
5524
5525         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
5526            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
5527            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
5528            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
5529            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
5530            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
5531            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
5532
5533         if (cmd->autoneg == AUTONEG_ENABLE) {
5534                 if (!(bp->supported & SUPPORTED_Autoneg)) {
5535                         DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
5536                         return -EINVAL;
5537                 }
5538
5539                 /* advertise the requested speed and duplex if supported */
5540                 cmd->advertising &= bp->supported;
5541
5542                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
5543                 bp->link_params.req_duplex = DUPLEX_FULL;
5544                 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
5545
5546         } else { /* forced speed */
5547                 /* advertise the requested speed and duplex if supported */
5548                 switch (cmd->speed) {
5549                 case SPEED_10:
5550                         if (cmd->duplex == DUPLEX_FULL) {
5551                                 if (!(bp->supported &
5552                                       SUPPORTED_10baseT_Full)) {
5553                                         DP(NETIF_MSG_LINK,
5554                                            "10M full not supported\n");
5555                                         return -EINVAL;
5556                                 }
5557
5558                                 advertising = (ADVERTISED_10baseT_Full |
5559                                                ADVERTISED_TP);
5560                         } else {
5561                                 if (!(bp->supported &
5562                                       SUPPORTED_10baseT_Half)) {
5563                                         DP(NETIF_MSG_LINK,
5564                                            "10M half not supported\n");
5565                                         return -EINVAL;
5566                                 }
5567
5568                                 advertising = (ADVERTISED_10baseT_Half |
5569                                                ADVERTISED_TP);
5570                         }
5571                         break;
5572
5573                 case SPEED_100:
5574                         if (cmd->duplex == DUPLEX_FULL) {
5575                                 if (!(bp->supported &
5576                                                 SUPPORTED_100baseT_Full)) {
5577                                         DP(NETIF_MSG_LINK,
5578                                            "100M full not supported\n");
5579                                         return -EINVAL;
5580                                 }
5581
5582                                 advertising = (ADVERTISED_100baseT_Full |
5583                                                ADVERTISED_TP);
5584                         } else {
5585                                 if (!(bp->supported &
5586                                                 SUPPORTED_100baseT_Half)) {
5587                                         DP(NETIF_MSG_LINK,
5588                                            "100M half not supported\n");
5589                                         return -EINVAL;
5590                                 }
5591
5592                                 advertising = (ADVERTISED_100baseT_Half |
5593                                                ADVERTISED_TP);
5594                         }
5595                         break;
5596
5597                 case SPEED_1000:
5598                         if (cmd->duplex != DUPLEX_FULL) {
5599                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
5600                                 return -EINVAL;
5601                         }
5602
5603                         if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
5604                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
5605                                 return -EINVAL;
5606                         }
5607
5608                         advertising = (ADVERTISED_1000baseT_Full |
5609                                        ADVERTISED_TP);
5610                         break;
5611
5612                 case SPEED_2500:
5613                         if (cmd->duplex != DUPLEX_FULL) {
5614                                 DP(NETIF_MSG_LINK,
5615                                    "2.5G half not supported\n");
5616                                 return -EINVAL;
5617                         }
5618
5619                         if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
5620                                 DP(NETIF_MSG_LINK,
5621                                    "2.5G full not supported\n");
5622                                 return -EINVAL;
5623                         }
5624
5625                         advertising = (ADVERTISED_2500baseX_Full |
5626                                        ADVERTISED_TP);
5627                         break;
5628
5629                 case SPEED_10000:
5630                         if (cmd->duplex != DUPLEX_FULL) {
5631                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
5632                                 return -EINVAL;
5633                         }
5634
5635                         if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
5636                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
5637                                 return -EINVAL;
5638                         }
5639
5640                         advertising = (ADVERTISED_10000baseT_Full |
5641                                        ADVERTISED_FIBRE);
5642                         break;
5643
5644                 default:
5645                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
5646                         return -EINVAL;
5647                 }
5648
5649                 bp->link_params.req_line_speed = cmd->speed;
5650                 bp->link_params.req_duplex = cmd->duplex;
5651                 bp->advertising = advertising;
5652         }
5653
5654         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
5655            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
5656            bp->link_params.req_line_speed, bp->link_params.req_duplex,
5657            bp->advertising);
5658
5659         bnx2x_stop_stats(bp);
5660         bnx2x_link_set(bp);
5661
5662         return 0;
5663 }
5664
5665 #define PHY_FW_VER_LEN                  10
5666
5667 static void bnx2x_get_drvinfo(struct net_device *dev,
5668                               struct ethtool_drvinfo *info)
5669 {
5670         struct bnx2x *bp = netdev_priv(dev);
5671         char phy_fw_ver[PHY_FW_VER_LEN];
5672
5673         strcpy(info->driver, DRV_MODULE_NAME);
5674         strcpy(info->version, DRV_MODULE_VERSION);
5675
5676         phy_fw_ver[0] = '\0';
5677         bnx2x_phy_hw_lock(bp);
5678         bnx2x_get_ext_phy_fw_version(&bp->link_params,
5679                                      (bp->state != BNX2X_STATE_CLOSED),
5680                                      phy_fw_ver, PHY_FW_VER_LEN);
5681         bnx2x_phy_hw_unlock(bp);
5682
5683         snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
5684                  BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
5685                  BCM_5710_FW_REVISION_VERSION,
5686                  BCM_5710_FW_COMPILE_FLAGS, bp->bc_ver,
5687                  ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
5688         strcpy(info->bus_info, pci_name(bp->pdev));
5689         info->n_stats = BNX2X_NUM_STATS;
5690         info->testinfo_len = BNX2X_NUM_TESTS;
5691         info->eedump_len = bp->flash_size;
5692         info->regdump_len = 0;
5693 }
5694
5695 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5696 {
5697         struct bnx2x *bp = netdev_priv(dev);
5698
5699         if (bp->flags & NO_WOL_FLAG) {
5700                 wol->supported = 0;
5701                 wol->wolopts = 0;
5702         } else {
5703                 wol->supported = WAKE_MAGIC;
5704                 if (bp->wol)
5705                         wol->wolopts = WAKE_MAGIC;
5706                 else
5707                         wol->wolopts = 0;
5708         }
5709         memset(&wol->sopass, 0, sizeof(wol->sopass));
5710 }
5711
5712 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5713 {
5714         struct bnx2x *bp = netdev_priv(dev);
5715
5716         if (wol->wolopts & ~WAKE_MAGIC)
5717                 return -EINVAL;
5718
5719         if (wol->wolopts & WAKE_MAGIC) {
5720                 if (bp->flags & NO_WOL_FLAG)
5721                         return -EINVAL;
5722
5723                 bp->wol = 1;
5724         } else {
5725                 bp->wol = 0;
5726         }
5727         return 0;
5728 }
5729
5730 static u32 bnx2x_get_msglevel(struct net_device *dev)
5731 {
5732         struct bnx2x *bp = netdev_priv(dev);
5733
5734         return bp->msglevel;
5735 }
5736
5737 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
5738 {
5739         struct bnx2x *bp = netdev_priv(dev);
5740
5741         if (capable(CAP_NET_ADMIN))
5742                 bp->msglevel = level;
5743 }
5744
5745 static int bnx2x_nway_reset(struct net_device *dev)
5746 {
5747         struct bnx2x *bp = netdev_priv(dev);
5748
5749         if (bp->state != BNX2X_STATE_OPEN) {
5750                 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
5751                 return -EAGAIN;
5752         }
5753
5754         bnx2x_stop_stats(bp);
5755         bnx2x_link_set(bp);
5756
5757         return 0;
5758 }
5759
5760 static int bnx2x_get_eeprom_len(struct net_device *dev)
5761 {
5762         struct bnx2x *bp = netdev_priv(dev);
5763
5764         return bp->flash_size;
5765 }
5766
5767 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
5768 {
5769         int port = bp->port;
5770         int count, i;
5771         u32 val = 0;
5772
5773         /* adjust timeout for emulation/FPGA */
5774         count = NVRAM_TIMEOUT_COUNT;
5775         if (CHIP_REV_IS_SLOW(bp))
5776                 count *= 100;
5777
5778         /* request access to nvram interface */
5779         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
5780                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
5781
5782         for (i = 0; i < count*10; i++) {
5783                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
5784                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
5785                         break;
5786
5787                 udelay(5);
5788         }
5789
5790         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
5791                 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
5792                 return -EBUSY;
5793         }
5794
5795         return 0;
5796 }
5797
5798 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
5799 {
5800         int port = bp->port;
5801         int count, i;
5802         u32 val = 0;
5803
5804         /* adjust timeout for emulation/FPGA */
5805         count = NVRAM_TIMEOUT_COUNT;
5806         if (CHIP_REV_IS_SLOW(bp))
5807                 count *= 100;
5808
5809         /* relinquish nvram interface */
5810         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
5811                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
5812
5813         for (i = 0; i < count*10; i++) {
5814                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
5815                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
5816                         break;
5817
5818                 udelay(5);
5819         }
5820
5821         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
5822                 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
5823                 return -EBUSY;
5824         }
5825
5826         return 0;
5827 }
5828
5829 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
5830 {
5831         u32 val;
5832
5833         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
5834
5835         /* enable both bits, even on read */
5836         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
5837                (val | MCPR_NVM_ACCESS_ENABLE_EN |
5838                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
5839 }
5840
5841 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
5842 {
5843         u32 val;
5844
5845         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
5846
5847         /* disable both bits, even after read */
5848         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
5849                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
5850                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
5851 }
5852
5853 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
5854                                   u32 cmd_flags)
5855 {
5856         int count, i, rc;
5857         u32 val;
5858
5859         /* build the command word */
5860         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
5861
5862         /* need to clear DONE bit separately */
5863         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
5864
5865         /* address of the NVRAM to read from */
5866         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
5867                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
5868
5869         /* issue a read command */
5870         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
5871
5872         /* adjust timeout for emulation/FPGA */
5873         count = NVRAM_TIMEOUT_COUNT;
5874         if (CHIP_REV_IS_SLOW(bp))
5875                 count *= 100;
5876
5877         /* wait for completion */
5878         *ret_val = 0;
5879         rc = -EBUSY;
5880         for (i = 0; i < count; i++) {
5881                 udelay(5);
5882                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
5883
5884                 if (val & MCPR_NVM_COMMAND_DONE) {
5885                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
5886                         DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
5887                         /* we read nvram data in cpu order
5888                          * but ethtool sees it as an array of bytes
5889                          * converting to big-endian will do the work */
5890                         val = cpu_to_be32(val);
5891                         *ret_val = val;
5892                         rc = 0;
5893                         break;
5894                 }
5895         }
5896
5897         return rc;
5898 }
5899
5900 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
5901                             int buf_size)
5902 {
5903         int rc;
5904         u32 cmd_flags;
5905         u32 val;
5906
5907         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
5908                 DP(NETIF_MSG_NVM,
5909                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
5910                    offset, buf_size);
5911                 return -EINVAL;
5912         }
5913
5914         if (offset + buf_size > bp->flash_size) {
5915                 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
5916                                   " buf_size (0x%x) > flash_size (0x%x)\n",
5917                    offset, buf_size, bp->flash_size);
5918                 return -EINVAL;
5919         }
5920
5921         /* request access to nvram interface */
5922         rc = bnx2x_acquire_nvram_lock(bp);
5923         if (rc)
5924                 return rc;
5925
5926         /* enable access to nvram interface */
5927         bnx2x_enable_nvram_access(bp);
5928
5929         /* read the first word(s) */
5930         cmd_flags = MCPR_NVM_COMMAND_FIRST;
5931         while ((buf_size > sizeof(u32)) && (rc == 0)) {
5932                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
5933                 memcpy(ret_buf, &val, 4);
5934
5935                 /* advance to the next dword */
5936                 offset += sizeof(u32);
5937                 ret_buf += sizeof(u32);
5938                 buf_size -= sizeof(u32);
5939                 cmd_flags = 0;
5940         }
5941
5942         if (rc == 0) {
5943                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
5944                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
5945                 memcpy(ret_buf, &val, 4);
5946         }
5947
5948         /* disable access to nvram interface */
5949         bnx2x_disable_nvram_access(bp);
5950         bnx2x_release_nvram_lock(bp);
5951
5952         return rc;
5953 }
5954
5955 static int bnx2x_get_eeprom(struct net_device *dev,
5956                             struct ethtool_eeprom *eeprom, u8 *eebuf)
5957 {
5958         struct bnx2x *bp = netdev_priv(dev);
5959         int rc;
5960
5961         DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
5962            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
5963            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
5964            eeprom->len, eeprom->len);
5965
5966         /* parameters already validated in ethtool_get_eeprom */
5967
5968         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5969
5970         return rc;
5971 }
5972
5973 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
5974                                    u32 cmd_flags)
5975 {
5976         int count, i, rc;
5977
5978         /* build the command word */
5979         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
5980
5981         /* need to clear DONE bit separately */
5982         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
5983
5984         /* write the data */
5985         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
5986
5987         /* address of the NVRAM to write to */
5988         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
5989                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
5990
5991         /* issue the write command */
5992         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
5993
5994         /* adjust timeout for emulation/FPGA */
5995         count = NVRAM_TIMEOUT_COUNT;
5996         if (CHIP_REV_IS_SLOW(bp))
5997                 count *= 100;
5998
5999         /* wait for completion */
6000         rc = -EBUSY;
6001         for (i = 0; i < count; i++) {
6002                 udelay(5);
6003                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
6004                 if (val & MCPR_NVM_COMMAND_DONE) {
6005                         rc = 0;
6006                         break;
6007                 }
6008         }
6009
6010         return rc;
6011 }
6012
6013 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
6014
6015 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
6016                               int buf_size)
6017 {
6018         int rc;
6019         u32 cmd_flags;
6020         u32 align_offset;
6021         u32 val;
6022
6023         if (offset + buf_size > bp->flash_size) {
6024                 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
6025                                   " buf_size (0x%x) > flash_size (0x%x)\n",
6026                    offset, buf_size, bp->flash_size);
6027                 return -EINVAL;
6028         }
6029
6030         /* request access to nvram interface */
6031         rc = bnx2x_acquire_nvram_lock(bp);
6032         if (rc)
6033                 return rc;
6034
6035         /* enable access to nvram interface */
6036         bnx2x_enable_nvram_access(bp);
6037
6038         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
6039         align_offset = (offset & ~0x03);
6040         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
6041
6042         if (rc == 0) {
6043                 val &= ~(0xff << BYTE_OFFSET(offset));
6044                 val |= (*data_buf << BYTE_OFFSET(offset));
6045
6046                 /* nvram data is returned as an array of bytes
6047                  * convert it back to cpu order */
6048                 val = be32_to_cpu(val);
6049
6050                 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
6051
6052                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
6053                                              cmd_flags);
6054         }
6055
6056         /* disable access to nvram interface */
6057         bnx2x_disable_nvram_access(bp);
6058         bnx2x_release_nvram_lock(bp);
6059
6060         return rc;
6061 }
6062
6063 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
6064                              int buf_size)
6065 {
6066         int rc;
6067         u32 cmd_flags;
6068         u32 val;
6069         u32 written_so_far;
6070
6071         if (buf_size == 1) {    /* ethtool */
6072                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
6073         }
6074
6075         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
6076                 DP(NETIF_MSG_NVM,
6077                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
6078                    offset, buf_size);
6079                 return -EINVAL;
6080         }
6081
6082         if (offset + buf_size > bp->flash_size) {
6083                 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
6084                                   " buf_size (0x%x) > flash_size (0x%x)\n",
6085                    offset, buf_size, bp->flash_size);
6086                 return -EINVAL;
6087         }
6088
6089         /* request access to nvram interface */
6090         rc = bnx2x_acquire_nvram_lock(bp);
6091         if (rc)
6092                 return rc;
6093
6094         /* enable access to nvram interface */
6095         bnx2x_enable_nvram_access(bp);
6096
6097         written_so_far = 0;
6098         cmd_flags = MCPR_NVM_COMMAND_FIRST;
6099         while ((written_so_far < buf_size) && (rc == 0)) {
6100                 if (written_so_far == (buf_size - sizeof(u32)))
6101                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
6102                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
6103                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
6104                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
6105                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
6106
6107                 memcpy(&val, data_buf, 4);
6108                 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
6109
6110                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
6111
6112                 /* advance to the next dword */
6113                 offset += sizeof(u32);
6114                 data_buf += sizeof(u32);
6115                 written_so_far += sizeof(u32);
6116                 cmd_flags = 0;
6117         }
6118
6119         /* disable access to nvram interface */
6120         bnx2x_disable_nvram_access(bp);
6121         bnx2x_release_nvram_lock(bp);
6122
6123         return rc;
6124 }
6125
6126 static int bnx2x_set_eeprom(struct net_device *dev,
6127                             struct ethtool_eeprom *eeprom, u8 *eebuf)
6128 {
6129         struct bnx2x *bp = netdev_priv(dev);
6130         int rc;
6131
6132         DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
6133            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
6134            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
6135            eeprom->len, eeprom->len);
6136
6137         /* parameters already validated in ethtool_set_eeprom */
6138
6139         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
6140         if (eeprom->magic == 0x00504859) {
6141
6142                 bnx2x_phy_hw_lock(bp);
6143                 rc = bnx2x_flash_download(bp, bp->port,
6144                                      bp->link_params.ext_phy_config,
6145                                      (bp->state != BNX2X_STATE_CLOSED),
6146                                      eebuf, eeprom->len);
6147                 rc |= bnx2x_link_reset(&bp->link_params,
6148                                        &bp->link_vars);
6149                 rc |= bnx2x_phy_init(&bp->link_params,
6150                                      &bp->link_vars);
6151                 bnx2x_phy_hw_unlock(bp);
6152
6153         } else
6154                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6155
6156         return rc;
6157 }
6158
6159 static int bnx2x_get_coalesce(struct net_device *dev,
6160                               struct ethtool_coalesce *coal)
6161 {
6162         struct bnx2x *bp = netdev_priv(dev);
6163
6164         memset(coal, 0, sizeof(struct ethtool_coalesce));
6165
6166         coal->rx_coalesce_usecs = bp->rx_ticks;
6167         coal->tx_coalesce_usecs = bp->tx_ticks;
6168         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6169
6170         return 0;
6171 }
6172
6173 static int bnx2x_set_coalesce(struct net_device *dev,
6174                               struct ethtool_coalesce *coal)
6175 {
6176         struct bnx2x *bp = netdev_priv(dev);
6177
6178         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6179         if (bp->rx_ticks > 3000)
6180                 bp->rx_ticks = 3000;
6181
6182         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6183         if (bp->tx_ticks > 0x3000)
6184                 bp->tx_ticks = 0x3000;
6185
6186         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6187         if (bp->stats_ticks > 0xffff00)
6188                 bp->stats_ticks = 0xffff00;
6189         bp->stats_ticks &= 0xffff00;
6190
6191         if (netif_running(bp->dev))
6192                 bnx2x_update_coalesce(bp);
6193
6194         return 0;
6195 }
6196
6197 static void bnx2x_get_ringparam(struct net_device *dev,
6198                                 struct ethtool_ringparam *ering)
6199 {
6200         struct bnx2x *bp = netdev_priv(dev);
6201
6202         ering->rx_max_pending = MAX_RX_AVAIL;
6203         ering->rx_mini_max_pending = 0;
6204         ering->rx_jumbo_max_pending = 0;
6205
6206         ering->rx_pending = bp->rx_ring_size;
6207         ering->rx_mini_pending = 0;
6208         ering->rx_jumbo_pending = 0;
6209
6210         ering->tx_max_pending = MAX_TX_AVAIL;
6211         ering->tx_pending = bp->tx_ring_size;
6212 }
6213
6214 static int bnx2x_set_ringparam(struct net_device *dev,
6215                                struct ethtool_ringparam *ering)
6216 {
6217         struct bnx2x *bp = netdev_priv(dev);
6218
6219         if ((ering->rx_pending > MAX_RX_AVAIL) ||
6220             (ering->tx_pending > MAX_TX_AVAIL) ||
6221             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
6222                 return -EINVAL;
6223
6224         bp->rx_ring_size = ering->rx_pending;
6225         bp->tx_ring_size = ering->tx_pending;
6226
6227         if (netif_running(bp->dev)) {
6228                 bnx2x_nic_unload(bp, 0);
6229                 bnx2x_nic_load(bp, 0);
6230         }
6231
6232         return 0;
6233 }
6234
6235 static void bnx2x_get_pauseparam(struct net_device *dev,
6236                                  struct ethtool_pauseparam *epause)
6237 {
6238         struct bnx2x *bp = netdev_priv(dev);
6239
6240         epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
6241                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
6242
6243         epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
6244                             FLOW_CTRL_RX);
6245         epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
6246                             FLOW_CTRL_TX);
6247
6248         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
6249            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
6250            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
6251 }
6252
6253 static int bnx2x_set_pauseparam(struct net_device *dev,
6254                                 struct ethtool_pauseparam *epause)
6255 {
6256         struct bnx2x *bp = netdev_priv(dev);
6257
6258         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
6259            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
6260            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
6261
6262         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
6263
6264         if (epause->rx_pause)
6265                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
6266
6267         if (epause->tx_pause)
6268                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
6269
6270         if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
6271                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
6272
6273         if (epause->autoneg) {
6274                 if (!(bp->supported & SUPPORTED_Autoneg)) {
6275                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
6276                         return -EINVAL;
6277                 }
6278
6279                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
6280                         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
6281         }
6282
6283         DP(NETIF_MSG_LINK,
6284            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
6285         bnx2x_stop_stats(bp);
6286         bnx2x_link_set(bp);
6287
6288         return 0;
6289 }
6290
6291 static u32 bnx2x_get_rx_csum(struct net_device *dev)
6292 {
6293         struct bnx2x *bp = netdev_priv(dev);
6294
6295         return bp->rx_csum;
6296 }
6297
6298 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
6299 {
6300         struct bnx2x *bp = netdev_priv(dev);
6301
6302         bp->rx_csum = data;
6303         return 0;
6304 }
6305
6306 static int bnx2x_set_tso(struct net_device *dev, u32 data)
6307 {
6308         if (data)
6309                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
6310         else
6311                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
6312         return 0;
6313 }
6314
6315 static struct {
6316         char string[ETH_GSTRING_LEN];
6317 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
6318         { "MC Errors  (online)" }
6319 };
6320
6321 static int bnx2x_self_test_count(struct net_device *dev)
6322 {
6323         return BNX2X_NUM_TESTS;
6324 }
6325
6326 static void bnx2x_self_test(struct net_device *dev,
6327                             struct ethtool_test *etest, u64 *buf)
6328 {
6329         struct bnx2x *bp = netdev_priv(dev);
6330         int stats_state;
6331
6332         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
6333
6334         if (bp->state != BNX2X_STATE_OPEN) {
6335                 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
6336                 return;
6337         }
6338
6339         stats_state = bp->stats_state;
6340         bnx2x_stop_stats(bp);
6341
6342         if (bnx2x_mc_assert(bp) != 0) {
6343                 buf[0] = 1;
6344                 etest->flags |= ETH_TEST_FL_FAILED;
6345         }
6346
6347 #ifdef BNX2X_EXTRA_DEBUG
6348         bnx2x_panic_dump(bp);
6349 #endif
6350         bp->stats_state = stats_state;
6351 }
6352
6353 static struct {
6354         char string[ETH_GSTRING_LEN];
6355 } bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
6356         { "rx_bytes"},
6357         { "rx_error_bytes"},
6358         { "tx_bytes"},
6359         { "tx_error_bytes"},
6360         { "rx_ucast_packets"},
6361         { "rx_mcast_packets"},
6362         { "rx_bcast_packets"},
6363         { "tx_ucast_packets"},
6364         { "tx_mcast_packets"},
6365         { "tx_bcast_packets"},
6366         { "tx_mac_errors"},     /* 10 */
6367         { "tx_carrier_errors"},
6368         { "rx_crc_errors"},
6369         { "rx_align_errors"},
6370         { "tx_single_collisions"},
6371         { "tx_multi_collisions"},
6372         { "tx_deferred"},
6373         { "tx_excess_collisions"},
6374         { "tx_late_collisions"},
6375         { "tx_total_collisions"},
6376         { "rx_fragments"},      /* 20 */
6377         { "rx_jabbers"},
6378         { "rx_undersize_packets"},
6379         { "rx_oversize_packets"},
6380         { "rx_xon_frames"},
6381         { "rx_xoff_frames"},
6382         { "tx_xon_frames"},
6383         { "tx_xoff_frames"},
6384         { "rx_mac_ctrl_frames"},
6385         { "rx_filtered_packets"},
6386         { "rx_discards"},       /* 30 */
6387         { "brb_discard"},
6388         { "brb_truncate"},
6389         { "xxoverflow"}
6390 };
6391
6392 #define STATS_OFFSET32(offset_name) \
6393         (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
6394
6395 static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
6396         STATS_OFFSET32(total_bytes_received_hi),
6397         STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6398         STATS_OFFSET32(total_bytes_transmitted_hi),
6399         STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6400         STATS_OFFSET32(total_unicast_packets_received_hi),
6401         STATS_OFFSET32(total_multicast_packets_received_hi),
6402         STATS_OFFSET32(total_broadcast_packets_received_hi),
6403         STATS_OFFSET32(total_unicast_packets_transmitted_hi),
6404         STATS_OFFSET32(total_multicast_packets_transmitted_hi),
6405         STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
6406         STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
6407         STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6408         STATS_OFFSET32(crc_receive_errors),
6409         STATS_OFFSET32(alignment_errors),
6410         STATS_OFFSET32(single_collision_transmit_frames),
6411         STATS_OFFSET32(multiple_collision_transmit_frames),
6412         STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6413         STATS_OFFSET32(excessive_collision_frames),
6414         STATS_OFFSET32(late_collision_frames),
6415         STATS_OFFSET32(number_of_bugs_found_in_stats_spec),
6416         STATS_OFFSET32(runt_packets_received),                  /* 20 */
6417         STATS_OFFSET32(jabber_packets_received),
6418         STATS_OFFSET32(error_runt_packets_received),
6419         STATS_OFFSET32(error_jabber_packets_received),
6420         STATS_OFFSET32(pause_xon_frames_received),
6421         STATS_OFFSET32(pause_xoff_frames_received),
6422         STATS_OFFSET32(pause_xon_frames_transmitted),
6423         STATS_OFFSET32(pause_xoff_frames_transmitted),
6424         STATS_OFFSET32(control_frames_received),
6425         STATS_OFFSET32(mac_filter_discard),
6426         STATS_OFFSET32(no_buff_discard),                        /* 30 */
6427         STATS_OFFSET32(brb_discard),
6428         STATS_OFFSET32(brb_truncate_discard),
6429         STATS_OFFSET32(xxoverflow_discard)
6430 };
6431
6432 static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
6433         8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
6434         4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
6435         4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
6436         4, 4, 4, 4
6437 };
6438
6439 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6440 {
6441         switch (stringset) {
6442         case ETH_SS_STATS:
6443                 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
6444                 break;
6445
6446         case ETH_SS_TEST:
6447                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
6448                 break;
6449         }
6450 }
6451
6452 static int bnx2x_get_stats_count(struct net_device *dev)
6453 {
6454         return BNX2X_NUM_STATS;
6455 }
6456
6457 static void bnx2x_get_ethtool_stats(struct net_device *dev,
6458                                     struct ethtool_stats *stats, u64 *buf)
6459 {
6460         struct bnx2x *bp = netdev_priv(dev);
6461         u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
6462         int i;
6463
6464         for (i = 0; i < BNX2X_NUM_STATS; i++) {
6465                 if (bnx2x_stats_len_arr[i] == 0) {
6466                         /* skip this counter */
6467                         buf[i] = 0;
6468                         continue;
6469                 }
6470                 if (!hw_stats) {
6471                         buf[i] = 0;
6472                         continue;
6473                 }
6474                 if (bnx2x_stats_len_arr[i] == 4) {
6475                         /* 4-byte counter */
6476                        buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
6477                         continue;
6478                 }
6479                 /* 8-byte counter */
6480                 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
6481                                  *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
6482         }
6483 }
6484
6485 static int bnx2x_phys_id(struct net_device *dev, u32 data)
6486 {
6487         struct bnx2x *bp = netdev_priv(dev);
6488         int i;
6489
6490         if (data == 0)
6491                 data = 2;
6492
6493         for (i = 0; i < (data * 2); i++) {
6494                 if ((i % 2) == 0)
6495                         bnx2x_set_led(bp, bp->port, LED_MODE_OPER, SPEED_1000,
6496                                       bp->link_params.hw_led_mode,
6497                                       bp->link_params.chip_id);
6498                 else
6499                         bnx2x_set_led(bp, bp->port, LED_MODE_OFF, 0,
6500                                       bp->link_params.hw_led_mode,
6501                                       bp->link_params.chip_id);
6502
6503                 msleep_interruptible(500);
6504                 if (signal_pending(current))
6505                         break;
6506         }
6507
6508         if (bp->link_vars.link_up)
6509                 bnx2x_set_led(bp, bp->port, LED_MODE_OPER,
6510                               bp->link_vars.line_speed,
6511                               bp->link_params.hw_led_mode,
6512                               bp->link_params.chip_id);
6513
6514         return 0;
6515 }
6516
6517 static struct ethtool_ops bnx2x_ethtool_ops = {
6518         .get_settings           = bnx2x_get_settings,
6519         .set_settings           = bnx2x_set_settings,
6520         .get_drvinfo            = bnx2x_get_drvinfo,
6521         .get_wol                = bnx2x_get_wol,
6522         .set_wol                = bnx2x_set_wol,
6523         .get_msglevel           = bnx2x_get_msglevel,
6524         .set_msglevel           = bnx2x_set_msglevel,
6525         .nway_reset             = bnx2x_nway_reset,
6526         .get_link               = ethtool_op_get_link,
6527         .get_eeprom_len         = bnx2x_get_eeprom_len,
6528         .get_eeprom             = bnx2x_get_eeprom,
6529         .set_eeprom             = bnx2x_set_eeprom,
6530         .get_coalesce           = bnx2x_get_coalesce,
6531         .set_coalesce           = bnx2x_set_coalesce,
6532         .get_ringparam          = bnx2x_get_ringparam,
6533         .set_ringparam          = bnx2x_set_ringparam,
6534         .get_pauseparam         = bnx2x_get_pauseparam,
6535         .set_pauseparam         = bnx2x_set_pauseparam,
6536         .get_rx_csum            = bnx2x_get_rx_csum,
6537         .set_rx_csum            = bnx2x_set_rx_csum,
6538         .get_tx_csum            = ethtool_op_get_tx_csum,
6539         .set_tx_csum            = ethtool_op_set_tx_csum,
6540         .get_sg                 = ethtool_op_get_sg,
6541         .set_sg                 = ethtool_op_set_sg,
6542         .get_tso                = ethtool_op_get_tso,
6543         .set_tso                = bnx2x_set_tso,
6544         .self_test_count        = bnx2x_self_test_count,
6545         .self_test              = bnx2x_self_test,
6546         .get_strings            = bnx2x_get_strings,
6547         .phys_id                = bnx2x_phys_id,
6548         .get_stats_count        = bnx2x_get_stats_count,
6549         .get_ethtool_stats      = bnx2x_get_ethtool_stats
6550 };
6551
6552 /* end of ethtool_ops */
6553
6554 /****************************************************************************
6555 * General service functions
6556 ****************************************************************************/
6557
6558 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
6559 {
6560         u16 pmcsr;
6561
6562         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
6563
6564         switch (state) {
6565         case PCI_D0:
6566                 pci_write_config_word(bp->pdev,
6567                                       bp->pm_cap + PCI_PM_CTRL,
6568                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
6569                                        PCI_PM_CTRL_PME_STATUS));
6570
6571                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
6572                 /* delay required during transition out of D3hot */
6573                         msleep(20);
6574                 break;
6575
6576         case PCI_D3hot:
6577                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
6578                 pmcsr |= 3;
6579
6580                 if (bp->wol)
6581                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
6582
6583                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
6584                                       pmcsr);
6585
6586                 /* No more memory access after this point until
6587                 * device is brought back to D0.
6588                 */
6589                 break;
6590
6591         default:
6592                 return -EINVAL;
6593         }
6594         return 0;
6595 }
6596
6597 /*
6598  * net_device service functions
6599  */
6600
6601 /* called with netif_tx_lock from set_multicast */
6602 static void bnx2x_set_rx_mode(struct net_device *dev)
6603 {
6604         struct bnx2x *bp = netdev_priv(dev);
6605         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6606
6607         DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
6608
6609         if (dev->flags & IFF_PROMISC)
6610                 rx_mode = BNX2X_RX_MODE_PROMISC;
6611
6612         else if ((dev->flags & IFF_ALLMULTI) ||
6613                  (dev->mc_count > BNX2X_MAX_MULTICAST))
6614                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6615
6616         else { /* some multicasts */
6617                 int i, old, offset;
6618                 struct dev_mc_list *mclist;
6619                 struct mac_configuration_cmd *config =
6620                                                 bnx2x_sp(bp, mcast_config);
6621
6622                 for (i = 0, mclist = dev->mc_list;
6623                      mclist && (i < dev->mc_count);
6624                      i++, mclist = mclist->next) {
6625
6626                         config->config_table[i].cam_entry.msb_mac_addr =
6627                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
6628                         config->config_table[i].cam_entry.middle_mac_addr =
6629                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
6630                         config->config_table[i].cam_entry.lsb_mac_addr =
6631                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
6632                         config->config_table[i].cam_entry.flags =
6633                                                         cpu_to_le16(bp->port);
6634                         config->config_table[i].target_table_entry.flags = 0;
6635                         config->config_table[i].target_table_entry.
6636                                                                 client_id = 0;
6637                         config->config_table[i].target_table_entry.
6638                                                                 vlan_id = 0;
6639
6640                         DP(NETIF_MSG_IFUP,
6641                            "setting MCAST[%d] (%04x:%04x:%04x)\n",
6642                            i, config->config_table[i].cam_entry.msb_mac_addr,
6643                            config->config_table[i].cam_entry.middle_mac_addr,
6644                            config->config_table[i].cam_entry.lsb_mac_addr);
6645                 }
6646                 old = config->hdr.length_6b;
6647                 if (old > i) {
6648                         for (; i < old; i++) {
6649                                 if (CAM_IS_INVALID(config->config_table[i])) {
6650                                         i--; /* already invalidated */
6651                                         break;
6652                                 }
6653                                 /* invalidate */
6654                                 CAM_INVALIDATE(config->config_table[i]);
6655                         }
6656                 }
6657
6658                 if (CHIP_REV_IS_SLOW(bp))
6659                         offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
6660                 else
6661                         offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
6662
6663                 config->hdr.length_6b = i;
6664                 config->hdr.offset = offset;
6665                 config->hdr.reserved0 = 0;
6666                 config->hdr.reserved1 = 0;
6667
6668                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6669                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6670                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6671         }
6672
6673         bp->rx_mode = rx_mode;
6674         bnx2x_set_storm_rx_mode(bp);
6675 }
6676
6677 static int bnx2x_poll(struct napi_struct *napi, int budget)
6678 {
6679         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
6680                                                  napi);
6681         struct bnx2x *bp = fp->bp;
6682         int work_done = 0;
6683
6684 #ifdef BNX2X_STOP_ON_ERROR
6685         if (unlikely(bp->panic))
6686                 goto out_panic;
6687 #endif
6688
6689         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
6690         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
6691         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
6692
6693         bnx2x_update_fpsb_idx(fp);
6694
6695         if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
6696                 bnx2x_tx_int(fp, budget);
6697
6698
6699         if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
6700                 work_done = bnx2x_rx_int(fp, budget);
6701
6702
6703         rmb(); /* bnx2x_has_work() reads the status block */
6704
6705         /* must not complete if we consumed full budget */
6706         if ((work_done < budget) && !bnx2x_has_work(fp)) {
6707
6708 #ifdef BNX2X_STOP_ON_ERROR
6709 out_panic:
6710 #endif
6711                 netif_rx_complete(bp->dev, napi);
6712
6713                 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
6714                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
6715                 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
6716                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
6717         }
6718
6719         return work_done;
6720 }
6721
6722 /* Called with netif_tx_lock.
6723  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
6724  * netif_wake_queue().
6725  */
6726 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
6727 {
6728         struct bnx2x *bp = netdev_priv(dev);
6729         struct bnx2x_fastpath *fp;
6730         struct sw_tx_bd *tx_buf;
6731         struct eth_tx_bd *tx_bd;
6732         struct eth_tx_parse_bd *pbd = NULL;
6733         u16 pkt_prod, bd_prod;
6734         int nbd, fp_index = 0;
6735         dma_addr_t mapping;
6736
6737 #ifdef BNX2X_STOP_ON_ERROR
6738         if (unlikely(bp->panic))
6739                 return NETDEV_TX_BUSY;
6740 #endif
6741
6742         fp_index = smp_processor_id() % (bp->num_queues);
6743
6744         fp = &bp->fp[fp_index];
6745         if (unlikely(bnx2x_tx_avail(bp->fp) <
6746                                         (skb_shinfo(skb)->nr_frags + 3))) {
6747                 bp->slowpath->eth_stats.driver_xoff++,
6748                 netif_stop_queue(dev);
6749                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
6750                 return NETDEV_TX_BUSY;
6751         }
6752
6753         /*
6754         This is a bit ugly. First we use one BD which we mark as start,
6755         then for TSO or xsum we have a parsing info BD,
6756         and only then we have the rest of the TSO bds.
6757         (don't forget to mark the last one as last,
6758         and to unmap only AFTER you write to the BD ...)
6759         I would like to thank DovH for this mess.
6760         */
6761
6762         pkt_prod = fp->tx_pkt_prod++;
6763         bd_prod = fp->tx_bd_prod;
6764         bd_prod = TX_BD(bd_prod);
6765
6766         /* get a tx_buff and first bd */
6767         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
6768         tx_bd = &fp->tx_desc_ring[bd_prod];
6769
6770         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
6771         tx_bd->general_data = (UNICAST_ADDRESS <<
6772                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
6773         tx_bd->general_data |= 1; /* header nbd */
6774
6775         /* remember the first bd of the packet */
6776         tx_buf->first_bd = bd_prod;
6777
6778         DP(NETIF_MSG_TX_QUEUED,
6779            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
6780            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
6781
6782         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6783                 struct iphdr *iph = ip_hdr(skb);
6784                 u8 len;
6785
6786                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
6787
6788                 /* turn on parsing and get a bd */
6789                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6790                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
6791                 len = ((u8 *)iph - (u8 *)skb->data) / 2;
6792
6793                 /* for now NS flag is not used in Linux */
6794                 pbd->global_data = (len |
6795                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
6796                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
6797                 pbd->ip_hlen = ip_hdrlen(skb) / 2;
6798                 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
6799                 if (iph->protocol == IPPROTO_TCP) {
6800                         struct tcphdr *th = tcp_hdr(skb);
6801
6802                         tx_bd->bd_flags.as_bitfield |=
6803                                                 ETH_TX_BD_FLAGS_TCP_CSUM;
6804                         pbd->tcp_flags = pbd_tcp_flags(skb);
6805                         pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
6806                         pbd->tcp_pseudo_csum = swab16(th->check);
6807
6808                 } else if (iph->protocol == IPPROTO_UDP) {
6809                         struct udphdr *uh = udp_hdr(skb);
6810
6811                         tx_bd->bd_flags.as_bitfield |=
6812                                                 ETH_TX_BD_FLAGS_TCP_CSUM;
6813                         pbd->total_hlen += cpu_to_le16(4);
6814                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
6815                         pbd->cs_offset = 5; /* 10 >> 1 */
6816                         pbd->tcp_pseudo_csum = 0;
6817                         /* HW bug: we need to subtract 10 bytes before the
6818                          * UDP header from the csum
6819                          */
6820                         uh->check = (u16) ~csum_fold(csum_sub(uh->check,
6821                                 csum_partial(((u8 *)(uh)-10), 10, 0)));
6822                 }
6823         }
6824
6825         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
6826                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
6827                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
6828         } else {
6829                 tx_bd->vlan = cpu_to_le16(pkt_prod);
6830         }
6831
6832         mapping = pci_map_single(bp->pdev, skb->data,
6833                                  skb->len, PCI_DMA_TODEVICE);
6834
6835         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
6836         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6837         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
6838         tx_bd->nbd = cpu_to_le16(nbd);
6839         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
6840
6841         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
6842            "  nbytes %d  flags %x  vlan %u\n",
6843            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
6844            tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
6845
6846         if (skb_shinfo(skb)->gso_size &&
6847             (skb->len > (bp->dev->mtu + ETH_HLEN))) {
6848                 int hlen = 2 * le16_to_cpu(pbd->total_hlen);
6849
6850                 DP(NETIF_MSG_TX_QUEUED,
6851                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
6852                    skb->len, hlen, skb_headlen(skb),
6853                    skb_shinfo(skb)->gso_size);
6854
6855                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
6856
6857                 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
6858                         /* we split the first bd into headers and data bds
6859                          * to ease the pain of our fellow micocode engineers
6860                          * we use one mapping for both bds
6861                          * So far this has only been observed to happen
6862                          * in Other Operating Systems(TM)
6863                          */
6864
6865                         /* first fix first bd */
6866                         nbd++;
6867                         tx_bd->nbd = cpu_to_le16(nbd);
6868                         tx_bd->nbytes = cpu_to_le16(hlen);
6869
6870                         /* we only print this as an error
6871                          * because we don't think this will ever happen.
6872                          */
6873                         BNX2X_ERR("TSO split header size is %d (%x:%x)"
6874                                   "  nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
6875                                   tx_bd->addr_lo, tx_bd->nbd);
6876
6877                         /* now get a new data bd
6878                          * (after the pbd) and fill it */
6879                         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6880                         tx_bd = &fp->tx_desc_ring[bd_prod];
6881
6882                         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
6883                         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
6884                         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
6885                         tx_bd->vlan = cpu_to_le16(pkt_prod);
6886                         /* this marks the bd
6887                          * as one that has no individual mapping
6888                          * the FW ignores this flag in a bd not marked start
6889                          */
6890                         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
6891                         DP(NETIF_MSG_TX_QUEUED,
6892                            "TSO split data size is %d (%x:%x)\n",
6893                            tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
6894                 }
6895
6896                 if (!pbd) {
6897                         /* supposed to be unreached
6898                          * (and therefore not handled properly...)
6899                          */
6900                         BNX2X_ERR("LSO with no PBD\n");
6901                         BUG();
6902                 }
6903
6904                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
6905                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
6906                 pbd->ip_id = swab16(ip_hdr(skb)->id);
6907                 pbd->tcp_pseudo_csum =
6908                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
6909                                                           ip_hdr(skb)->daddr,
6910                                                           0, IPPROTO_TCP, 0));
6911                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
6912         }
6913
6914         {
6915                 int i;
6916
6917                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6918                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6919
6920                         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6921                         tx_bd = &fp->tx_desc_ring[bd_prod];
6922
6923                         mapping = pci_map_page(bp->pdev, frag->page,
6924                                                frag->page_offset,
6925                                                frag->size, PCI_DMA_TODEVICE);
6926
6927                         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
6928                         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6929                         tx_bd->nbytes = cpu_to_le16(frag->size);
6930                         tx_bd->vlan = cpu_to_le16(pkt_prod);
6931                         tx_bd->bd_flags.as_bitfield = 0;
6932                         DP(NETIF_MSG_TX_QUEUED, "frag %d  bd @%p"
6933                            "  addr (%x:%x)  nbytes %d  flags %x\n",
6934                            i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
6935                            tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
6936                 } /* for */
6937         }
6938
6939         /* now at last mark the bd as the last bd */
6940         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
6941
6942         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
6943            tx_bd, tx_bd->bd_flags.as_bitfield);
6944
6945         tx_buf->skb = skb;
6946
6947         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6948
6949         /* now send a tx doorbell, counting the next bd
6950          * if the packet contains or ends with it
6951          */
6952         if (TX_BD_POFF(bd_prod) < nbd)
6953                 nbd++;
6954
6955         if (pbd)
6956                 DP(NETIF_MSG_TX_QUEUED,
6957                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
6958                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
6959                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
6960                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
6961                    pbd->tcp_send_seq, pbd->total_hlen);
6962
6963         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u  bd %d\n", nbd, bd_prod);
6964
6965         fp->hw_tx_prods->bds_prod =
6966                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
6967         mb(); /* FW restriction: must not reorder writing nbd and packets */
6968         fp->hw_tx_prods->packets_prod =
6969                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
6970         DOORBELL(bp, fp_index, 0);
6971
6972         mmiowb();
6973
6974         fp->tx_bd_prod = bd_prod;
6975         dev->trans_start = jiffies;
6976
6977         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
6978                 netif_stop_queue(dev);
6979                 bp->slowpath->eth_stats.driver_xoff++;
6980                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
6981                         netif_wake_queue(dev);
6982         }
6983         fp->tx_pkt++;
6984
6985         return NETDEV_TX_OK;
6986 }
6987
6988 /* Called with rtnl_lock */
6989 static int bnx2x_open(struct net_device *dev)
6990 {
6991         struct bnx2x *bp = netdev_priv(dev);
6992
6993         bnx2x_set_power_state(bp, PCI_D0);
6994
6995         return bnx2x_nic_load(bp, 1);
6996 }
6997
6998 /* Called with rtnl_lock */
6999 static int bnx2x_close(struct net_device *dev)
7000 {
7001         struct bnx2x *bp = netdev_priv(dev);
7002
7003         /* Unload the driver, release IRQs */
7004         bnx2x_nic_unload(bp, 1);
7005
7006         if (!CHIP_REV_IS_SLOW(bp))
7007                 bnx2x_set_power_state(bp, PCI_D3hot);
7008
7009         return 0;
7010 }
7011
7012 /* Called with rtnl_lock */
7013 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
7014 {
7015         struct sockaddr *addr = p;
7016         struct bnx2x *bp = netdev_priv(dev);
7017
7018         if (!is_valid_ether_addr(addr->sa_data))
7019                 return -EINVAL;
7020
7021         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7022         if (netif_running(dev))
7023                 bnx2x_set_mac_addr(bp);
7024
7025         return 0;
7026 }
7027
7028 /* called with rtnl_lock */
7029 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7030 {
7031         struct mii_ioctl_data *data = if_mii(ifr);
7032         struct bnx2x *bp = netdev_priv(dev);
7033         int err;
7034
7035         switch (cmd) {
7036         case SIOCGMIIPHY:
7037                 data->phy_id = bp->phy_addr;
7038
7039                 /* fallthrough */
7040
7041         case SIOCGMIIREG: {
7042                 u16 mii_regval;
7043
7044                 if (!netif_running(dev))
7045                         return -EAGAIN;
7046
7047                 mutex_lock(&bp->phy_mutex);
7048                 err = bnx2x_cl45_read(bp, bp->port, 0, bp->phy_addr,
7049                                       DEFAULT_PHY_DEV_ADDR,
7050                                       (data->reg_num & 0x1f), &mii_regval);
7051                 data->val_out = mii_regval;
7052                 mutex_unlock(&bp->phy_mutex);
7053                 return err;
7054         }
7055
7056         case SIOCSMIIREG:
7057                 if (!capable(CAP_NET_ADMIN))
7058                         return -EPERM;
7059
7060                 if (!netif_running(dev))
7061                         return -EAGAIN;
7062
7063                 mutex_lock(&bp->phy_mutex);
7064                 err = bnx2x_cl45_write(bp, bp->port, 0, bp->phy_addr,
7065                                        DEFAULT_PHY_DEV_ADDR,
7066                                        (data->reg_num & 0x1f), data->val_in);
7067                 mutex_unlock(&bp->phy_mutex);
7068                 return err;
7069
7070         default:
7071                 /* do nothing */
7072                 break;
7073         }
7074
7075         return -EOPNOTSUPP;
7076 }
7077
7078 /* Called with rtnl_lock */
7079 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
7080 {
7081         struct bnx2x *bp = netdev_priv(dev);
7082
7083         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
7084             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
7085                 return -EINVAL;
7086
7087         /* This does not race with packet allocation
7088          * because the actual alloc size is
7089          * only updated as part of load
7090          */
7091         dev->mtu = new_mtu;
7092
7093         if (netif_running(dev)) {
7094                 bnx2x_nic_unload(bp, 0);
7095                 bnx2x_nic_load(bp, 0);
7096         }
7097         return 0;
7098 }
7099
7100 static void bnx2x_tx_timeout(struct net_device *dev)
7101 {
7102         struct bnx2x *bp = netdev_priv(dev);
7103
7104 #ifdef BNX2X_STOP_ON_ERROR
7105         if (!bp->panic)
7106                 bnx2x_panic();
7107 #endif
7108         /* This allows the netif to be shutdown gracefully before resetting */
7109         schedule_work(&bp->reset_task);
7110 }
7111
7112 #ifdef BCM_VLAN
7113 /* Called with rtnl_lock */
7114 static void bnx2x_vlan_rx_register(struct net_device *dev,
7115                                    struct vlan_group *vlgrp)
7116 {
7117         struct bnx2x *bp = netdev_priv(dev);
7118
7119         bp->vlgrp = vlgrp;
7120         if (netif_running(dev))
7121                 bnx2x_set_client_config(bp);
7122 }
7123 #endif
7124
7125 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7126 static void poll_bnx2x(struct net_device *dev)
7127 {
7128         struct bnx2x *bp = netdev_priv(dev);
7129
7130         disable_irq(bp->pdev->irq);
7131         bnx2x_interrupt(bp->pdev->irq, dev);
7132         enable_irq(bp->pdev->irq);
7133 }
7134 #endif
7135
7136 static void bnx2x_reset_task(struct work_struct *work)
7137 {
7138         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7139
7140 #ifdef BNX2X_STOP_ON_ERROR
7141         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7142                   " so reset not done to allow debug dump,\n"
7143          KERN_ERR " you will need to reboot when done\n");
7144         return;
7145 #endif
7146
7147         if (!netif_running(bp->dev))
7148                 return;
7149
7150         rtnl_lock();
7151
7152         if (bp->state != BNX2X_STATE_OPEN) {
7153                 DP(NETIF_MSG_TX_ERR, "state is %x, returning\n", bp->state);
7154                 goto reset_task_exit;
7155         }
7156
7157         bnx2x_nic_unload(bp, 0);
7158         bnx2x_nic_load(bp, 0);
7159
7160 reset_task_exit:
7161         rtnl_unlock();
7162 }
7163
7164 static int __devinit bnx2x_init_board(struct pci_dev *pdev,
7165                                       struct net_device *dev)
7166 {
7167         struct bnx2x *bp;
7168         int rc;
7169
7170         SET_NETDEV_DEV(dev, &pdev->dev);
7171         bp = netdev_priv(dev);
7172
7173         bp->flags = 0;
7174         bp->port = PCI_FUNC(pdev->devfn);
7175
7176         rc = pci_enable_device(pdev);
7177         if (rc) {
7178                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
7179                 goto err_out;
7180         }
7181
7182         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7183                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
7184                        " aborting\n");
7185                 rc = -ENODEV;
7186                 goto err_out_disable;
7187         }
7188
7189         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7190                 printk(KERN_ERR PFX "Cannot find second PCI device"
7191                        " base address, aborting\n");
7192                 rc = -ENODEV;
7193                 goto err_out_disable;
7194         }
7195
7196         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7197         if (rc) {
7198                 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
7199                        " aborting\n");
7200                 goto err_out_disable;
7201         }
7202
7203         pci_set_master(pdev);
7204
7205         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7206         if (bp->pm_cap == 0) {
7207                 printk(KERN_ERR PFX "Cannot find power management"
7208                        " capability, aborting\n");
7209                 rc = -EIO;
7210                 goto err_out_release;
7211         }
7212
7213         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7214         if (bp->pcie_cap == 0) {
7215                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
7216                        " aborting\n");
7217                 rc = -EIO;
7218                 goto err_out_release;
7219         }
7220
7221         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
7222                 bp->flags |= USING_DAC_FLAG;
7223                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
7224                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
7225                                " failed, aborting\n");
7226                         rc = -EIO;
7227                         goto err_out_release;
7228                 }
7229
7230         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
7231                 printk(KERN_ERR PFX "System does not support DMA,"
7232                        " aborting\n");
7233                 rc = -EIO;
7234                 goto err_out_release;
7235         }
7236
7237         bp->dev = dev;
7238         bp->pdev = pdev;
7239
7240         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7241         INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7242
7243         dev->base_addr = pci_resource_start(pdev, 0);
7244
7245         dev->irq = pdev->irq;
7246
7247         bp->regview = ioremap_nocache(dev->base_addr,
7248                                       pci_resource_len(pdev, 0));
7249         if (!bp->regview) {
7250                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
7251                 rc = -ENOMEM;
7252                 goto err_out_release;
7253         }
7254
7255         bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
7256                                         pci_resource_len(pdev, 2));
7257         if (!bp->doorbells) {
7258                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
7259                 rc = -ENOMEM;
7260                 goto err_out_unmap;
7261         }
7262
7263         bnx2x_set_power_state(bp, PCI_D0);
7264
7265         bnx2x_get_hwinfo(bp);
7266
7267         if (CHIP_REV(bp) == CHIP_REV_FPGA) {
7268                 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
7269                        " will only init first device\n");
7270                 onefunc = 1;
7271                 nomcp = 1;
7272         }
7273
7274         if (nomcp) {
7275                 printk(KERN_ERR PFX "MCP disabled, will only"
7276                        " init first device\n");
7277                 onefunc = 1;
7278         }
7279
7280         if (onefunc && bp->port) {
7281                 printk(KERN_ERR PFX "Second device disabled, exiting\n");
7282                 rc = -ENODEV;
7283                 goto err_out_unmap;
7284         }
7285
7286         bp->tx_ring_size = MAX_TX_AVAIL;
7287         bp->rx_ring_size = MAX_RX_AVAIL;
7288
7289         bp->rx_csum = 1;
7290
7291         bp->rx_offset = 0;
7292
7293         bp->tx_quick_cons_trip_int = 0xff;
7294         bp->tx_quick_cons_trip = 0xff;
7295         bp->tx_ticks_int = 50;
7296         bp->tx_ticks = 50;
7297
7298         bp->rx_quick_cons_trip_int = 0xff;
7299         bp->rx_quick_cons_trip = 0xff;
7300         bp->rx_ticks_int = 25;
7301         bp->rx_ticks = 25;
7302
7303         bp->stats_ticks = 1000000 & 0xffff00;
7304
7305         bp->timer_interval = HZ;
7306         bp->current_interval = (poll ? poll : HZ);
7307
7308         init_timer(&bp->timer);
7309         bp->timer.expires = jiffies + bp->current_interval;
7310         bp->timer.data = (unsigned long) bp;
7311         bp->timer.function = bnx2x_timer;
7312
7313         return 0;
7314
7315 err_out_unmap:
7316         if (bp->regview) {
7317                 iounmap(bp->regview);
7318                 bp->regview = NULL;
7319         }
7320
7321         if (bp->doorbells) {
7322                 iounmap(bp->doorbells);
7323                 bp->doorbells = NULL;
7324         }
7325
7326 err_out_release:
7327         pci_release_regions(pdev);
7328
7329 err_out_disable:
7330         pci_disable_device(pdev);
7331         pci_set_drvdata(pdev, NULL);
7332
7333 err_out:
7334         return rc;
7335 }
7336
7337 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
7338 {
7339         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7340
7341         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7342         return val;
7343 }
7344
7345 /* return value of 1=2.5GHz 2=5GHz */
7346 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
7347 {
7348         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7349
7350         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7351         return val;
7352 }
7353
7354 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7355                                     const struct pci_device_id *ent)
7356 {
7357         static int version_printed;
7358         struct net_device *dev = NULL;
7359         struct bnx2x *bp;
7360         int rc;
7361         int port = PCI_FUNC(pdev->devfn);
7362         DECLARE_MAC_BUF(mac);
7363
7364         if (version_printed++ == 0)
7365                 printk(KERN_INFO "%s", version);
7366
7367         /* dev zeroed in init_etherdev */
7368         dev = alloc_etherdev(sizeof(*bp));
7369         if (!dev)
7370                 return -ENOMEM;
7371
7372         netif_carrier_off(dev);
7373
7374         bp = netdev_priv(dev);
7375         bp->msglevel = debug;
7376
7377         if (port && onefunc) {
7378                 printk(KERN_ERR PFX "second function disabled. exiting\n");
7379                 free_netdev(dev);
7380                 return 0;
7381         }
7382
7383         rc = bnx2x_init_board(pdev, dev);
7384         if (rc < 0) {
7385                 free_netdev(dev);
7386                 return rc;
7387         }
7388
7389         dev->hard_start_xmit = bnx2x_start_xmit;
7390         dev->watchdog_timeo = TX_TIMEOUT;
7391
7392         dev->ethtool_ops = &bnx2x_ethtool_ops;
7393         dev->open = bnx2x_open;
7394         dev->stop = bnx2x_close;
7395         dev->set_multicast_list = bnx2x_set_rx_mode;
7396         dev->set_mac_address = bnx2x_change_mac_addr;
7397         dev->do_ioctl = bnx2x_ioctl;
7398         dev->change_mtu = bnx2x_change_mtu;
7399         dev->tx_timeout = bnx2x_tx_timeout;
7400 #ifdef BCM_VLAN
7401         dev->vlan_rx_register = bnx2x_vlan_rx_register;
7402 #endif
7403 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7404         dev->poll_controller = poll_bnx2x;
7405 #endif
7406         dev->features |= NETIF_F_SG;
7407         if (bp->flags & USING_DAC_FLAG)
7408                 dev->features |= NETIF_F_HIGHDMA;
7409         dev->features |= NETIF_F_IP_CSUM;
7410 #ifdef BCM_VLAN
7411         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7412 #endif
7413         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7414
7415         rc = register_netdev(dev);
7416         if (rc) {
7417                 dev_err(&pdev->dev, "Cannot register net device\n");
7418                 if (bp->regview)
7419                         iounmap(bp->regview);
7420                 if (bp->doorbells)
7421                         iounmap(bp->doorbells);
7422                 pci_release_regions(pdev);
7423                 pci_disable_device(pdev);
7424                 pci_set_drvdata(pdev, NULL);
7425                 free_netdev(dev);
7426                 return rc;
7427         }
7428
7429         pci_set_drvdata(pdev, dev);
7430
7431         bp->name = board_info[ent->driver_data].name;
7432         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
7433                " IRQ %d, ", dev->name, bp->name,
7434                ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7435                ((CHIP_ID(bp) & 0x0ff0) >> 4),
7436                bnx2x_get_pcie_width(bp),
7437                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
7438                dev->base_addr, bp->pdev->irq);
7439         printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
7440         return 0;
7441 }
7442
7443 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7444 {
7445         struct net_device *dev = pci_get_drvdata(pdev);
7446         struct bnx2x *bp;
7447
7448         if (!dev) {
7449                 /* we get here if init_one() fails */
7450                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
7451                 return;
7452         }
7453
7454         bp = netdev_priv(dev);
7455
7456         unregister_netdev(dev);
7457
7458         if (bp->regview)
7459                 iounmap(bp->regview);
7460
7461         if (bp->doorbells)
7462                 iounmap(bp->doorbells);
7463
7464         free_netdev(dev);
7465         pci_release_regions(pdev);
7466         pci_disable_device(pdev);
7467         pci_set_drvdata(pdev, NULL);
7468 }
7469
7470 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
7471 {
7472         struct net_device *dev = pci_get_drvdata(pdev);
7473         struct bnx2x *bp;
7474
7475         if (!dev)
7476                 return 0;
7477
7478         if (!netif_running(dev))
7479                 return 0;
7480
7481         bp = netdev_priv(dev);
7482
7483         bnx2x_nic_unload(bp, 0);
7484
7485         netif_device_detach(dev);
7486
7487         pci_save_state(pdev);
7488         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
7489
7490         return 0;
7491 }
7492
7493 static int bnx2x_resume(struct pci_dev *pdev)
7494 {
7495         struct net_device *dev = pci_get_drvdata(pdev);
7496         struct bnx2x *bp;
7497         int rc;
7498
7499         if (!dev) {
7500                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
7501                 return -ENODEV;
7502         }
7503
7504         if (!netif_running(dev))
7505                 return 0;
7506
7507         bp = netdev_priv(dev);
7508
7509         pci_restore_state(pdev);
7510         bnx2x_set_power_state(bp, PCI_D0);
7511         netif_device_attach(dev);
7512
7513         rc = bnx2x_nic_load(bp, 0);
7514         if (rc)
7515                 return rc;
7516
7517         return 0;
7518 }
7519
7520 static struct pci_driver bnx2x_pci_driver = {
7521         .name       = DRV_MODULE_NAME,
7522         .id_table   = bnx2x_pci_tbl,
7523         .probe      = bnx2x_init_one,
7524         .remove     = __devexit_p(bnx2x_remove_one),
7525         .suspend    = bnx2x_suspend,
7526         .resume     = bnx2x_resume,
7527 };
7528
7529 static int __init bnx2x_init(void)
7530 {
7531         return pci_register_driver(&bnx2x_pci_driver);
7532 }
7533
7534 static void __exit bnx2x_cleanup(void)
7535 {
7536         pci_unregister_driver(&bnx2x_pci_driver);
7537 }
7538
7539 module_init(bnx2x_init);
7540 module_exit(bnx2x_cleanup);
7541