]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/bnx2x_main.c
Merge branch 'misc' into release
[linux-2.6-omap-h63xx.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.23"
61 #define DRV_MODULE_RELDATE      "2008/11/03"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_tpa;
77 static int use_inta;
78 static int poll;
79 static int debug;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 static int use_multi;
82
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
91
92 #ifdef BNX2X_MULTI
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95 #endif
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99         BCM57710 = 0,
100         BCM57711 = 1,
101         BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM57710 XGb" },
109         { "Broadcom NetXtreme II BCM57711 XGb" },
110         { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131  * locking is done by mcp
132  */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138                                PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178                       u32 len32)
179 {
180         struct dmae_command *dmae = &bp->init_dmae;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int cnt = 200;
183
184         if (!bp->dmae_ready) {
185                 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188                    "  using indirect\n", dst_addr, len32);
189                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190                 return;
191         }
192
193         mutex_lock(&bp->dmae_mutex);
194
195         memset(dmae, 0, sizeof(struct dmae_command));
196
197         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203                         DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207         dmae->src_addr_lo = U64_LO(dma_addr);
208         dmae->src_addr_hi = U64_HI(dma_addr);
209         dmae->dst_addr_lo = dst_addr >> 2;
210         dmae->dst_addr_hi = 0;
211         dmae->len = len32;
212         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_val = DMAE_COMP_VAL;
215
216         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218                     "dst_addr [%x:%08x (%08x)]\n"
219            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227         *wb_comp = 0;
228
229         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231         udelay(5);
232
233         while (*wb_comp != DMAE_COMP_VAL) {
234                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236                 if (!cnt) {
237                         BNX2X_ERR("dmae timeout!\n");
238                         break;
239                 }
240                 cnt--;
241                 /* adjust delay for emulation/FPGA */
242                 if (CHIP_REV_IS_SLOW(bp))
243                         msleep(100);
244                 else
245                         udelay(5);
246         }
247
248         mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253         struct dmae_command *dmae = &bp->init_dmae;
254         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255         int cnt = 200;
256
257         if (!bp->dmae_ready) {
258                 u32 *data = bnx2x_sp(bp, wb_data[0]);
259                 int i;
260
261                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262                    "  using indirect\n", src_addr, len32);
263                 for (i = 0; i < len32; i++)
264                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265                 return;
266         }
267
268         mutex_lock(&bp->dmae_mutex);
269
270         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271         memset(dmae, 0, sizeof(struct dmae_command));
272
273         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279                         DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283         dmae->src_addr_lo = src_addr >> 2;
284         dmae->src_addr_hi = 0;
285         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287         dmae->len = len32;
288         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_val = DMAE_COMP_VAL;
291
292         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294                     "dst_addr [%x:%08x (%08x)]\n"
295            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300         *wb_comp = 0;
301
302         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304         udelay(5);
305
306         while (*wb_comp != DMAE_COMP_VAL) {
307
308                 if (!cnt) {
309                         BNX2X_ERR("dmae timeout!\n");
310                         break;
311                 }
312                 cnt--;
313                 /* adjust delay for emulation/FPGA */
314                 if (CHIP_REV_IS_SLOW(bp))
315                         msleep(100);
316                 else
317                         udelay(5);
318         }
319         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323         mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329         u32 wb_write[2];
330
331         wb_write[0] = val_hi;
332         wb_write[1] = val_lo;
333         REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339         u32 wb_data[2];
340
341         REG_RD_DMAE(bp, reg, wb_data, 2);
342
343         return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349         char last_idx;
350         int i, rc = 0;
351         u32 row0, row1, row2, row3;
352
353         /* XSTORM */
354         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
356         if (last_idx)
357                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359         /* print the asserts */
360         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363                               XSTORM_ASSERT_LIST_OFFSET(i));
364                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373                                   " 0x%08x 0x%08x 0x%08x\n",
374                                   i, row3, row2, row1, row0);
375                         rc++;
376                 } else {
377                         break;
378                 }
379         }
380
381         /* TSTORM */
382         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
384         if (last_idx)
385                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387         /* print the asserts */
388         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391                               TSTORM_ASSERT_LIST_OFFSET(i));
392                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401                                   " 0x%08x 0x%08x 0x%08x\n",
402                                   i, row3, row2, row1, row0);
403                         rc++;
404                 } else {
405                         break;
406                 }
407         }
408
409         /* CSTORM */
410         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
412         if (last_idx)
413                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415         /* print the asserts */
416         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419                               CSTORM_ASSERT_LIST_OFFSET(i));
420                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429                                   " 0x%08x 0x%08x 0x%08x\n",
430                                   i, row3, row2, row1, row0);
431                         rc++;
432                 } else {
433                         break;
434                 }
435         }
436
437         /* USTORM */
438         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439                            USTORM_ASSERT_LIST_INDEX_OFFSET);
440         if (last_idx)
441                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443         /* print the asserts */
444         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447                               USTORM_ASSERT_LIST_OFFSET(i));
448                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
450                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
452                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457                                   " 0x%08x 0x%08x 0x%08x\n",
458                                   i, row3, row2, row1, row0);
459                         rc++;
460                 } else {
461                         break;
462                 }
463         }
464
465         return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470         u32 mark, offset;
471         u32 data[9];
472         int word;
473
474         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475         mark = ((mark + 0x3) & ~0x3);
476         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479                 for (word = 0; word < 8; word++)
480                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481                                                   offset + 4*word));
482                 data[8] = 0x0;
483                 printk(KERN_CONT "%s", (char *)data);
484         }
485         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486                 for (word = 0; word < 8; word++)
487                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488                                                   offset + 4*word));
489                 data[8] = 0x0;
490                 printk(KERN_CONT "%s", (char *)data);
491         }
492         printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497         int i;
498         u16 j, start, end;
499
500         bp->stats_state = STATS_STATE_DISABLED;
501         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503         BNX2X_ERR("begin crash dump -----------------\n");
504
505         for_each_queue(bp, i) {
506                 struct bnx2x_fastpath *fp = &bp->fp[i];
507                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516                           fp->rx_bd_prod, fp->rx_bd_cons,
517                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523                           fp->status_blk->c_status_block.status_block_index,
524                           fp->fp_u_idx,
525                           fp->status_blk->u_status_block.status_block_index,
526                           hw_prods->packets_prod, hw_prods->bds_prod);
527
528                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530                 for (j = start; j < end; j++) {
531                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534                                   sw_bd->skb, sw_bd->first_bd);
535                 }
536
537                 start = TX_BD(fp->tx_bd_cons - 10);
538                 end = TX_BD(fp->tx_bd_cons + 254);
539                 for (j = start; j < end; j++) {
540                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544                 }
545
546                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548                 for (j = start; j < end; j++) {
549                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
554                 }
555
556                 start = RX_SGE(fp->rx_sge_prod);
557                 end = RX_SGE(fp->last_max_sge);
558                 for (j = start; j < end; j++) {
559                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563                                   j, rx_sge[1], rx_sge[0], sw_page->page);
564                 }
565
566                 start = RCQ_BD(fp->rx_comp_cons - 10);
567                 end = RCQ_BD(fp->rx_comp_cons + 503);
568                 for (j = start; j < end; j++) {
569                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
573                 }
574         }
575
576         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578                   "  spq_prod_idx(%u)\n",
579                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582         bnx2x_fw_dump(bp);
583         bnx2x_mc_assert(bp);
584         BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589         int port = BP_PORT(bp);
590         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591         u32 val = REG_RD(bp, addr);
592         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594         if (msix) {
595                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598         } else {
599                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
602                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603
604                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
605                    val, port, addr, msix);
606
607                 REG_WR(bp, addr, val);
608
609                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610         }
611
612         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
613            val, port, addr, msix);
614
615         REG_WR(bp, addr, val);
616
617         if (CHIP_IS_E1H(bp)) {
618                 /* init leading/trailing edge */
619                 if (IS_E1HMF(bp)) {
620                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621                         if (bp->port.pmf)
622                                 /* enable nig attention */
623                                 val |= 0x0100;
624                 } else
625                         val = 0xffff;
626
627                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629         }
630 }
631
632 static void bnx2x_int_disable(struct bnx2x *bp)
633 {
634         int port = BP_PORT(bp);
635         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636         u32 val = REG_RD(bp, addr);
637
638         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644            val, port, addr);
645
646         REG_WR(bp, addr, val);
647         if (REG_RD(bp, addr) != val)
648                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649 }
650
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
652 {
653         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654         int i;
655
656         /* disable interrupt handling */
657         atomic_inc(&bp->intr_sem);
658         if (disable_hw)
659                 /* prevent the HW from sending interrupts */
660                 bnx2x_int_disable(bp);
661
662         /* make sure all ISRs are done */
663         if (msix) {
664                 for_each_queue(bp, i)
665                         synchronize_irq(bp->msix_table[i].vector);
666
667                 /* one more for the Slow Path IRQ */
668                 synchronize_irq(bp->msix_table[i].vector);
669         } else
670                 synchronize_irq(bp->pdev->irq);
671
672         /* make sure sp_task is not running */
673         cancel_delayed_work(&bp->sp_task);
674         flush_workqueue(bnx2x_wq);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 /* free skb in the packet ring at pos idx
737  * return idx of last bd freed
738  */
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740                              u16 idx)
741 {
742         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743         struct eth_tx_bd *tx_bd;
744         struct sk_buff *skb = tx_buf->skb;
745         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746         int nbd;
747
748         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
749            idx, tx_buf, skb);
750
751         /* unmap first bd */
752         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753         tx_bd = &fp->tx_desc_ring[bd_idx];
754         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757         nbd = le16_to_cpu(tx_bd->nbd) - 1;
758         new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760         if (nbd > (MAX_SKB_FRAGS + 2)) {
761                 BNX2X_ERR("BAD nbd!\n");
762                 bnx2x_panic();
763         }
764 #endif
765
766         /* Skip a parse bd and the TSO split header bd
767            since they have no mapping */
768         if (nbd)
769                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772                                            ETH_TX_BD_FLAGS_TCP_CSUM |
773                                            ETH_TX_BD_FLAGS_SW_LSO)) {
774                 if (--nbd)
775                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776                 tx_bd = &fp->tx_desc_ring[bd_idx];
777                 /* is this a TSO split header bd? */
778                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779                         if (--nbd)
780                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781                 }
782         }
783
784         /* now free frags */
785         while (nbd > 0) {
786
787                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788                 tx_bd = &fp->tx_desc_ring[bd_idx];
789                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791                 if (--nbd)
792                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793         }
794
795         /* release skb */
796         WARN_ON(!skb);
797         dev_kfree_skb(skb);
798         tx_buf->first_bd = 0;
799         tx_buf->skb = NULL;
800
801         return new_cons;
802 }
803
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 {
806         s16 used;
807         u16 prod;
808         u16 cons;
809
810         barrier(); /* Tell compiler that prod and cons can change */
811         prod = fp->tx_bd_prod;
812         cons = fp->tx_bd_cons;
813
814         /* NUM_TX_RINGS = number of "next-page" entries
815            It will be used as a threshold */
816         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817
818 #ifdef BNX2X_STOP_ON_ERROR
819         WARN_ON(used < 0);
820         WARN_ON(used > fp->bp->tx_ring_size);
821         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
823
824         return (s16)(fp->bp->tx_ring_size) - used;
825 }
826
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 {
829         struct bnx2x *bp = fp->bp;
830         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831         int done = 0;
832
833 #ifdef BNX2X_STOP_ON_ERROR
834         if (unlikely(bp->panic))
835                 return;
836 #endif
837
838         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839         sw_cons = fp->tx_pkt_cons;
840
841         while (sw_cons != hw_cons) {
842                 u16 pkt_cons;
843
844                 pkt_cons = TX_BD(sw_cons);
845
846                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
848                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
849                    hw_cons, sw_cons, pkt_cons);
850
851 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852                         rmb();
853                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854                 }
855 */
856                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857                 sw_cons++;
858                 done++;
859
860                 if (done == work)
861                         break;
862         }
863
864         fp->tx_pkt_cons = sw_cons;
865         fp->tx_bd_cons = bd_cons;
866
867         /* Need to make the tx_cons update visible to start_xmit()
868          * before checking for netif_queue_stopped().  Without the
869          * memory barrier, there is a small possibility that start_xmit()
870          * will miss it and cause the queue to be stopped forever.
871          */
872         smp_mb();
873
874         /* TBD need a thresh? */
875         if (unlikely(netif_queue_stopped(bp->dev))) {
876
877                 netif_tx_lock(bp->dev);
878
879                 if (netif_queue_stopped(bp->dev) &&
880                     (bp->state == BNX2X_STATE_OPEN) &&
881                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882                         netif_wake_queue(bp->dev);
883
884                 netif_tx_unlock(bp->dev);
885         }
886 }
887
888
889 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890                            union eth_rx_cqe *rr_cqe)
891 {
892         struct bnx2x *bp = fp->bp;
893         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895
896         DP(BNX2X_MSG_SP,
897            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
898            FP_IDX(fp), cid, command, bp->state,
899            rr_cqe->ramrod_cqe.ramrod_type);
900
901         bp->spq_left++;
902
903         if (FP_IDX(fp)) {
904                 switch (command | fp->state) {
905                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906                                                 BNX2X_FP_STATE_OPENING):
907                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908                            cid);
909                         fp->state = BNX2X_FP_STATE_OPEN;
910                         break;
911
912                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914                            cid);
915                         fp->state = BNX2X_FP_STATE_HALTED;
916                         break;
917
918                 default:
919                         BNX2X_ERR("unexpected MC reply (%d)  "
920                                   "fp->state is %x\n", command, fp->state);
921                         break;
922                 }
923                 mb(); /* force bnx2x_wait_ramrod() to see the change */
924                 return;
925         }
926
927         switch (command | bp->state) {
928         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930                 bp->state = BNX2X_STATE_OPEN;
931                 break;
932
933         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936                 fp->state = BNX2X_FP_STATE_HALTED;
937                 break;
938
939         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
940                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
941                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
942                 break;
943
944
945         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
946         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
947                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
948                 bp->set_mac_pending = 0;
949                 break;
950
951         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
952                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
953                 break;
954
955         default:
956                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
957                           command, bp->state);
958                 break;
959         }
960         mb(); /* force bnx2x_wait_ramrod() to see the change */
961 }
962
963 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964                                      struct bnx2x_fastpath *fp, u16 index)
965 {
966         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967         struct page *page = sw_buf->page;
968         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970         /* Skip "next page" elements */
971         if (!page)
972                 return;
973
974         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976         __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978         sw_buf->page = NULL;
979         sge->addr_hi = 0;
980         sge->addr_lo = 0;
981 }
982
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984                                            struct bnx2x_fastpath *fp, int last)
985 {
986         int i;
987
988         for (i = 0; i < last; i++)
989                 bnx2x_free_rx_sge(bp, fp, i);
990 }
991
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993                                      struct bnx2x_fastpath *fp, u16 index)
994 {
995         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998         dma_addr_t mapping;
999
1000         if (unlikely(page == NULL))
1001                 return -ENOMEM;
1002
1003         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1004                                PCI_DMA_FROMDEVICE);
1005         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007                 return -ENOMEM;
1008         }
1009
1010         sw_buf->page = page;
1011         pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016         return 0;
1017 }
1018
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020                                      struct bnx2x_fastpath *fp, u16 index)
1021 {
1022         struct sk_buff *skb;
1023         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025         dma_addr_t mapping;
1026
1027         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028         if (unlikely(skb == NULL))
1029                 return -ENOMEM;
1030
1031         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1032                                  PCI_DMA_FROMDEVICE);
1033         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1034                 dev_kfree_skb(skb);
1035                 return -ENOMEM;
1036         }
1037
1038         rx_buf->skb = skb;
1039         pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044         return 0;
1045 }
1046
1047 /* note that we are not allocating a new skb,
1048  * we are just moving one from cons to prod
1049  * we are not creating a new mapping,
1050  * so there is no need to check for dma_mapping_error().
1051  */
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053                                struct sk_buff *skb, u16 cons, u16 prod)
1054 {
1055         struct bnx2x *bp = fp->bp;
1056         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061         pci_dma_sync_single_for_device(bp->pdev,
1062                                        pci_unmap_addr(cons_rx_buf, mapping),
1063                                        bp->rx_offset + RX_COPY_THRESH,
1064                                        PCI_DMA_FROMDEVICE);
1065
1066         prod_rx_buf->skb = cons_rx_buf->skb;
1067         pci_unmap_addr_set(prod_rx_buf, mapping,
1068                            pci_unmap_addr(cons_rx_buf, mapping));
1069         *prod_bd = *cons_bd;
1070 }
1071
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073                                              u16 idx)
1074 {
1075         u16 last_max = fp->last_max_sge;
1076
1077         if (SUB_S16(idx, last_max) > 0)
1078                 fp->last_max_sge = idx;
1079 }
1080
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082 {
1083         int i, j;
1084
1085         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086                 int idx = RX_SGE_CNT * i - 1;
1087
1088                 for (j = 0; j < 2; j++) {
1089                         SGE_MASK_CLEAR_BIT(fp, idx);
1090                         idx--;
1091                 }
1092         }
1093 }
1094
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096                                   struct eth_fast_path_rx_cqe *fp_cqe)
1097 {
1098         struct bnx2x *bp = fp->bp;
1099         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1101                       SGE_PAGE_SHIFT;
1102         u16 last_max, last_elem, first_elem;
1103         u16 delta = 0;
1104         u16 i;
1105
1106         if (!sge_len)
1107                 return;
1108
1109         /* First mark all used pages */
1110         for (i = 0; i < sge_len; i++)
1111                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116         /* Here we assume that the last SGE index is the biggest */
1117         prefetch((void *)(fp->sge_mask));
1118         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120         last_max = RX_SGE(fp->last_max_sge);
1121         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124         /* If ring is not full */
1125         if (last_elem + 1 != first_elem)
1126                 last_elem++;
1127
1128         /* Now update the prod */
1129         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130                 if (likely(fp->sge_mask[i]))
1131                         break;
1132
1133                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134                 delta += RX_SGE_MASK_ELEM_SZ;
1135         }
1136
1137         if (delta > 0) {
1138                 fp->rx_sge_prod += delta;
1139                 /* clear page-end entries */
1140                 bnx2x_clear_sge_mask_next_elems(fp);
1141         }
1142
1143         DP(NETIF_MSG_RX_STATUS,
1144            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1145            fp->last_max_sge, fp->rx_sge_prod);
1146 }
1147
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149 {
1150         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151         memset(fp->sge_mask, 0xff,
1152                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
1154         /* Clear the two last indices in the page to 1:
1155            these are the indices that correspond to the "next" element,
1156            hence will never be indicated and should be removed from
1157            the calculations. */
1158         bnx2x_clear_sge_mask_next_elems(fp);
1159 }
1160
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162                             struct sk_buff *skb, u16 cons, u16 prod)
1163 {
1164         struct bnx2x *bp = fp->bp;
1165         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168         dma_addr_t mapping;
1169
1170         /* move empty skb from pool to prod and map it */
1171         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1174         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176         /* move partial skb from cons to pool (don't unmap yet) */
1177         fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179         /* mark bin state as start - print error if current state != stop */
1180         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183         fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185         /* point prod_bd to new skb */
1186         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189 #ifdef BNX2X_STOP_ON_ERROR
1190         fp->tpa_queue_used |= (1 << queue);
1191 #ifdef __powerpc64__
1192         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193 #else
1194         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195 #endif
1196            fp->tpa_queue_used);
1197 #endif
1198 }
1199
1200 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201                                struct sk_buff *skb,
1202                                struct eth_fast_path_rx_cqe *fp_cqe,
1203                                u16 cqe_idx)
1204 {
1205         struct sw_rx_page *rx_pg, old_rx_pg;
1206         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1207         u32 i, frag_len, frag_size, pages;
1208         int err;
1209         int j;
1210
1211         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1212         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1213
1214         /* This is needed in order to enable forwarding support */
1215         if (frag_size)
1216                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1217                                                max(frag_size, (u32)len_on_bd));
1218
1219 #ifdef BNX2X_STOP_ON_ERROR
1220         if (pages >
1221             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1222                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223                           pages, cqe_idx);
1224                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1225                           fp_cqe->pkt_len, len_on_bd);
1226                 bnx2x_panic();
1227                 return -EINVAL;
1228         }
1229 #endif
1230
1231         /* Run through the SGL and compose the fragmented skb */
1232         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235                 /* FW gives the indices of the SGE as if the ring is an array
1236                    (meaning that "next" element will consume 2 indices) */
1237                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1238                 rx_pg = &fp->rx_page_ring[sge_idx];
1239                 old_rx_pg = *rx_pg;
1240
1241                 /* If we fail to allocate a substitute page, we simply stop
1242                    where we are and drop the whole packet */
1243                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1244                 if (unlikely(err)) {
1245                         bp->eth_stats.rx_skb_alloc_failed++;
1246                         return err;
1247                 }
1248
1249                 /* Unmap the page as we r going to pass it to the stack */
1250                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1251                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1252
1253                 /* Add one frag and update the appropriate fields in the skb */
1254                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1255
1256                 skb->data_len += frag_len;
1257                 skb->truesize += frag_len;
1258                 skb->len += frag_len;
1259
1260                 frag_size -= frag_len;
1261         }
1262
1263         return 0;
1264 }
1265
1266 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1268                            u16 cqe_idx)
1269 {
1270         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1271         struct sk_buff *skb = rx_buf->skb;
1272         /* alloc new skb */
1273         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1274
1275         /* Unmap skb in the pool anyway, as we are going to change
1276            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1277            fails. */
1278         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1280
1281         if (likely(new_skb)) {
1282                 /* fix ip xsum and give it to the stack */
1283                 /* (no need to map the new skb) */
1284 #ifdef BCM_VLAN
1285                 int is_vlan_cqe =
1286                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1287                          PARSING_FLAGS_VLAN);
1288                 int is_not_hwaccel_vlan_cqe =
1289                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1290 #endif
1291
1292                 prefetch(skb);
1293                 prefetch(((char *)(skb)) + 128);
1294
1295 #ifdef BNX2X_STOP_ON_ERROR
1296                 if (pad + len > bp->rx_buf_size) {
1297                         BNX2X_ERR("skb_put is about to fail...  "
1298                                   "pad %d  len %d  rx_buf_size %d\n",
1299                                   pad, len, bp->rx_buf_size);
1300                         bnx2x_panic();
1301                         return;
1302                 }
1303 #endif
1304
1305                 skb_reserve(skb, pad);
1306                 skb_put(skb, len);
1307
1308                 skb->protocol = eth_type_trans(skb, bp->dev);
1309                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1310
1311                 {
1312                         struct iphdr *iph;
1313
1314                         iph = (struct iphdr *)skb->data;
1315 #ifdef BCM_VLAN
1316                         /* If there is no Rx VLAN offloading -
1317                            take VLAN tag into an account */
1318                         if (unlikely(is_not_hwaccel_vlan_cqe))
1319                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1320 #endif
1321                         iph->check = 0;
1322                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1323                 }
1324
1325                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1326                                          &cqe->fast_path_cqe, cqe_idx)) {
1327 #ifdef BCM_VLAN
1328                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1329                             (!is_not_hwaccel_vlan_cqe))
1330                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1331                                                 le16_to_cpu(cqe->fast_path_cqe.
1332                                                             vlan_tag));
1333                         else
1334 #endif
1335                                 netif_receive_skb(skb);
1336                 } else {
1337                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1338                            " - dropping packet!\n");
1339                         dev_kfree_skb(skb);
1340                 }
1341
1342
1343                 /* put new skb in bin */
1344                 fp->tpa_pool[queue].skb = new_skb;
1345
1346         } else {
1347                 /* else drop the packet and keep the buffer in the bin */
1348                 DP(NETIF_MSG_RX_STATUS,
1349                    "Failed to allocate new skb - dropping packet!\n");
1350                 bp->eth_stats.rx_skb_alloc_failed++;
1351         }
1352
1353         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1354 }
1355
1356 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1357                                         struct bnx2x_fastpath *fp,
1358                                         u16 bd_prod, u16 rx_comp_prod,
1359                                         u16 rx_sge_prod)
1360 {
1361         struct tstorm_eth_rx_producers rx_prods = {0};
1362         int i;
1363
1364         /* Update producers */
1365         rx_prods.bd_prod = bd_prod;
1366         rx_prods.cqe_prod = rx_comp_prod;
1367         rx_prods.sge_prod = rx_sge_prod;
1368
1369         /*
1370          * Make sure that the BD and SGE data is updated before updating the
1371          * producers since FW might read the BD/SGE right after the producer
1372          * is updated.
1373          * This is only applicable for weak-ordered memory model archs such
1374          * as IA-64. The following barrier is also mandatory since FW will
1375          * assumes BDs must have buffers.
1376          */
1377         wmb();
1378
1379         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1380                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1381                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1382                        ((u32 *)&rx_prods)[i]);
1383
1384         mmiowb(); /* keep prod updates ordered */
1385
1386         DP(NETIF_MSG_RX_STATUS,
1387            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1388            bd_prod, rx_comp_prod, rx_sge_prod);
1389 }
1390
1391 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1392 {
1393         struct bnx2x *bp = fp->bp;
1394         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1395         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1396         int rx_pkt = 0;
1397
1398 #ifdef BNX2X_STOP_ON_ERROR
1399         if (unlikely(bp->panic))
1400                 return 0;
1401 #endif
1402
1403         /* CQ "next element" is of the size of the regular element,
1404            that's why it's ok here */
1405         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1406         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1407                 hw_comp_cons++;
1408
1409         bd_cons = fp->rx_bd_cons;
1410         bd_prod = fp->rx_bd_prod;
1411         bd_prod_fw = bd_prod;
1412         sw_comp_cons = fp->rx_comp_cons;
1413         sw_comp_prod = fp->rx_comp_prod;
1414
1415         /* Memory barrier necessary as speculative reads of the rx
1416          * buffer can be ahead of the index in the status block
1417          */
1418         rmb();
1419
1420         DP(NETIF_MSG_RX_STATUS,
1421            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1422            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1423
1424         while (sw_comp_cons != hw_comp_cons) {
1425                 struct sw_rx_bd *rx_buf = NULL;
1426                 struct sk_buff *skb;
1427                 union eth_rx_cqe *cqe;
1428                 u8 cqe_fp_flags;
1429                 u16 len, pad;
1430
1431                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1432                 bd_prod = RX_BD(bd_prod);
1433                 bd_cons = RX_BD(bd_cons);
1434
1435                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1436                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1437
1438                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1439                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1440                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1441                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1442                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1443                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1444
1445                 /* is this a slowpath msg? */
1446                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1447                         bnx2x_sp_event(fp, cqe);
1448                         goto next_cqe;
1449
1450                 /* this is an rx packet */
1451                 } else {
1452                         rx_buf = &fp->rx_buf_ring[bd_cons];
1453                         skb = rx_buf->skb;
1454                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1455                         pad = cqe->fast_path_cqe.placement_offset;
1456
1457                         /* If CQE is marked both TPA_START and TPA_END
1458                            it is a non-TPA CQE */
1459                         if ((!fp->disable_tpa) &&
1460                             (TPA_TYPE(cqe_fp_flags) !=
1461                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1462                                 u16 queue = cqe->fast_path_cqe.queue_index;
1463
1464                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1465                                         DP(NETIF_MSG_RX_STATUS,
1466                                            "calling tpa_start on queue %d\n",
1467                                            queue);
1468
1469                                         bnx2x_tpa_start(fp, queue, skb,
1470                                                         bd_cons, bd_prod);
1471                                         goto next_rx;
1472                                 }
1473
1474                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1475                                         DP(NETIF_MSG_RX_STATUS,
1476                                            "calling tpa_stop on queue %d\n",
1477                                            queue);
1478
1479                                         if (!BNX2X_RX_SUM_FIX(cqe))
1480                                                 BNX2X_ERR("STOP on none TCP "
1481                                                           "data\n");
1482
1483                                         /* This is a size of the linear data
1484                                            on this skb */
1485                                         len = le16_to_cpu(cqe->fast_path_cqe.
1486                                                                 len_on_bd);
1487                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1488                                                     len, cqe, comp_ring_cons);
1489 #ifdef BNX2X_STOP_ON_ERROR
1490                                         if (bp->panic)
1491                                                 return -EINVAL;
1492 #endif
1493
1494                                         bnx2x_update_sge_prod(fp,
1495                                                         &cqe->fast_path_cqe);
1496                                         goto next_cqe;
1497                                 }
1498                         }
1499
1500                         pci_dma_sync_single_for_device(bp->pdev,
1501                                         pci_unmap_addr(rx_buf, mapping),
1502                                                        pad + RX_COPY_THRESH,
1503                                                        PCI_DMA_FROMDEVICE);
1504                         prefetch(skb);
1505                         prefetch(((char *)(skb)) + 128);
1506
1507                         /* is this an error packet? */
1508                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1509                                 DP(NETIF_MSG_RX_ERR,
1510                                    "ERROR  flags %x  rx packet %u\n",
1511                                    cqe_fp_flags, sw_comp_cons);
1512                                 bp->eth_stats.rx_err_discard_pkt++;
1513                                 goto reuse_rx;
1514                         }
1515
1516                         /* Since we don't have a jumbo ring
1517                          * copy small packets if mtu > 1500
1518                          */
1519                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1520                             (len <= RX_COPY_THRESH)) {
1521                                 struct sk_buff *new_skb;
1522
1523                                 new_skb = netdev_alloc_skb(bp->dev,
1524                                                            len + pad);
1525                                 if (new_skb == NULL) {
1526                                         DP(NETIF_MSG_RX_ERR,
1527                                            "ERROR  packet dropped "
1528                                            "because of alloc failure\n");
1529                                         bp->eth_stats.rx_skb_alloc_failed++;
1530                                         goto reuse_rx;
1531                                 }
1532
1533                                 /* aligned copy */
1534                                 skb_copy_from_linear_data_offset(skb, pad,
1535                                                     new_skb->data + pad, len);
1536                                 skb_reserve(new_skb, pad);
1537                                 skb_put(new_skb, len);
1538
1539                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1540
1541                                 skb = new_skb;
1542
1543                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1544                                 pci_unmap_single(bp->pdev,
1545                                         pci_unmap_addr(rx_buf, mapping),
1546                                                  bp->rx_buf_size,
1547                                                  PCI_DMA_FROMDEVICE);
1548                                 skb_reserve(skb, pad);
1549                                 skb_put(skb, len);
1550
1551                         } else {
1552                                 DP(NETIF_MSG_RX_ERR,
1553                                    "ERROR  packet dropped because "
1554                                    "of alloc failure\n");
1555                                 bp->eth_stats.rx_skb_alloc_failed++;
1556 reuse_rx:
1557                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1558                                 goto next_rx;
1559                         }
1560
1561                         skb->protocol = eth_type_trans(skb, bp->dev);
1562
1563                         skb->ip_summed = CHECKSUM_NONE;
1564                         if (bp->rx_csum) {
1565                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1566                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1567                                 else
1568                                         bp->eth_stats.hw_csum_err++;
1569                         }
1570                 }
1571
1572 #ifdef BCM_VLAN
1573                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1574                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1575                      PARSING_FLAGS_VLAN))
1576                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1577                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1578                 else
1579 #endif
1580                         netif_receive_skb(skb);
1581
1582
1583 next_rx:
1584                 rx_buf->skb = NULL;
1585
1586                 bd_cons = NEXT_RX_IDX(bd_cons);
1587                 bd_prod = NEXT_RX_IDX(bd_prod);
1588                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1589                 rx_pkt++;
1590 next_cqe:
1591                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1592                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1593
1594                 if (rx_pkt == budget)
1595                         break;
1596         } /* while */
1597
1598         fp->rx_bd_cons = bd_cons;
1599         fp->rx_bd_prod = bd_prod_fw;
1600         fp->rx_comp_cons = sw_comp_cons;
1601         fp->rx_comp_prod = sw_comp_prod;
1602
1603         /* Update producers */
1604         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1605                              fp->rx_sge_prod);
1606
1607         fp->rx_pkt += rx_pkt;
1608         fp->rx_calls++;
1609
1610         return rx_pkt;
1611 }
1612
1613 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1614 {
1615         struct bnx2x_fastpath *fp = fp_cookie;
1616         struct bnx2x *bp = fp->bp;
1617         int index = FP_IDX(fp);
1618
1619         /* Return here if interrupt is disabled */
1620         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1621                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1622                 return IRQ_HANDLED;
1623         }
1624
1625         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1626            index, FP_SB_ID(fp));
1627         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1628
1629 #ifdef BNX2X_STOP_ON_ERROR
1630         if (unlikely(bp->panic))
1631                 return IRQ_HANDLED;
1632 #endif
1633
1634         prefetch(fp->rx_cons_sb);
1635         prefetch(fp->tx_cons_sb);
1636         prefetch(&fp->status_blk->c_status_block.status_block_index);
1637         prefetch(&fp->status_blk->u_status_block.status_block_index);
1638
1639         netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1640
1641         return IRQ_HANDLED;
1642 }
1643
1644 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1645 {
1646         struct net_device *dev = dev_instance;
1647         struct bnx2x *bp = netdev_priv(dev);
1648         u16 status = bnx2x_ack_int(bp);
1649         u16 mask;
1650
1651         /* Return here if interrupt is shared and it's not for us */
1652         if (unlikely(status == 0)) {
1653                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1654                 return IRQ_NONE;
1655         }
1656         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1657
1658         /* Return here if interrupt is disabled */
1659         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1660                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1661                 return IRQ_HANDLED;
1662         }
1663
1664 #ifdef BNX2X_STOP_ON_ERROR
1665         if (unlikely(bp->panic))
1666                 return IRQ_HANDLED;
1667 #endif
1668
1669         mask = 0x2 << bp->fp[0].sb_id;
1670         if (status & mask) {
1671                 struct bnx2x_fastpath *fp = &bp->fp[0];
1672
1673                 prefetch(fp->rx_cons_sb);
1674                 prefetch(fp->tx_cons_sb);
1675                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1676                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1677
1678                 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1679
1680                 status &= ~mask;
1681         }
1682
1683
1684         if (unlikely(status & 0x1)) {
1685                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1686
1687                 status &= ~0x1;
1688                 if (!status)
1689                         return IRQ_HANDLED;
1690         }
1691
1692         if (status)
1693                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1694                    status);
1695
1696         return IRQ_HANDLED;
1697 }
1698
1699 /* end of fast path */
1700
1701 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1702
1703 /* Link */
1704
1705 /*
1706  * General service functions
1707  */
1708
1709 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1710 {
1711         u32 lock_status;
1712         u32 resource_bit = (1 << resource);
1713         int func = BP_FUNC(bp);
1714         u32 hw_lock_control_reg;
1715         int cnt;
1716
1717         /* Validating that the resource is within range */
1718         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1719                 DP(NETIF_MSG_HW,
1720                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1721                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1722                 return -EINVAL;
1723         }
1724
1725         if (func <= 5) {
1726                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1727         } else {
1728                 hw_lock_control_reg =
1729                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1730         }
1731
1732         /* Validating that the resource is not already taken */
1733         lock_status = REG_RD(bp, hw_lock_control_reg);
1734         if (lock_status & resource_bit) {
1735                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1736                    lock_status, resource_bit);
1737                 return -EEXIST;
1738         }
1739
1740         /* Try for 5 second every 5ms */
1741         for (cnt = 0; cnt < 1000; cnt++) {
1742                 /* Try to acquire the lock */
1743                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1744                 lock_status = REG_RD(bp, hw_lock_control_reg);
1745                 if (lock_status & resource_bit)
1746                         return 0;
1747
1748                 msleep(5);
1749         }
1750         DP(NETIF_MSG_HW, "Timeout\n");
1751         return -EAGAIN;
1752 }
1753
1754 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1755 {
1756         u32 lock_status;
1757         u32 resource_bit = (1 << resource);
1758         int func = BP_FUNC(bp);
1759         u32 hw_lock_control_reg;
1760
1761         /* Validating that the resource is within range */
1762         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1763                 DP(NETIF_MSG_HW,
1764                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1765                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1766                 return -EINVAL;
1767         }
1768
1769         if (func <= 5) {
1770                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1771         } else {
1772                 hw_lock_control_reg =
1773                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1774         }
1775
1776         /* Validating that the resource is currently taken */
1777         lock_status = REG_RD(bp, hw_lock_control_reg);
1778         if (!(lock_status & resource_bit)) {
1779                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1780                    lock_status, resource_bit);
1781                 return -EFAULT;
1782         }
1783
1784         REG_WR(bp, hw_lock_control_reg, resource_bit);
1785         return 0;
1786 }
1787
1788 /* HW Lock for shared dual port PHYs */
1789 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1790 {
1791         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1792
1793         mutex_lock(&bp->port.phy_mutex);
1794
1795         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1796             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1797                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1798 }
1799
1800 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1801 {
1802         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1803
1804         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1805             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1806                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1807
1808         mutex_unlock(&bp->port.phy_mutex);
1809 }
1810
1811 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1812 {
1813         /* The GPIO should be swapped if swap register is set and active */
1814         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1815                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1816         int gpio_shift = gpio_num +
1817                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1818         u32 gpio_mask = (1 << gpio_shift);
1819         u32 gpio_reg;
1820
1821         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1822                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1823                 return -EINVAL;
1824         }
1825
1826         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1827         /* read GPIO and mask except the float bits */
1828         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1829
1830         switch (mode) {
1831         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1832                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1833                    gpio_num, gpio_shift);
1834                 /* clear FLOAT and set CLR */
1835                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1836                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1837                 break;
1838
1839         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1840                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1841                    gpio_num, gpio_shift);
1842                 /* clear FLOAT and set SET */
1843                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1844                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1845                 break;
1846
1847         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1848                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1849                    gpio_num, gpio_shift);
1850                 /* set FLOAT */
1851                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1852                 break;
1853
1854         default:
1855                 break;
1856         }
1857
1858         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1859         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1860
1861         return 0;
1862 }
1863
1864 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1865 {
1866         u32 spio_mask = (1 << spio_num);
1867         u32 spio_reg;
1868
1869         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1870             (spio_num > MISC_REGISTERS_SPIO_7)) {
1871                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1872                 return -EINVAL;
1873         }
1874
1875         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1876         /* read SPIO and mask except the float bits */
1877         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1878
1879         switch (mode) {
1880         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1881                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1882                 /* clear FLOAT and set CLR */
1883                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1884                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1885                 break;
1886
1887         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1888                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1889                 /* clear FLOAT and set SET */
1890                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1891                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1892                 break;
1893
1894         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1895                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1896                 /* set FLOAT */
1897                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1898                 break;
1899
1900         default:
1901                 break;
1902         }
1903
1904         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1905         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1906
1907         return 0;
1908 }
1909
1910 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1911 {
1912         switch (bp->link_vars.ieee_fc &
1913                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1914         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1915                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1916                                           ADVERTISED_Pause);
1917                 break;
1918         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1919                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1920                                          ADVERTISED_Pause);
1921                 break;
1922         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1923                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1924                 break;
1925         default:
1926                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1927                                           ADVERTISED_Pause);
1928                 break;
1929         }
1930 }
1931
1932 static void bnx2x_link_report(struct bnx2x *bp)
1933 {
1934         if (bp->link_vars.link_up) {
1935                 if (bp->state == BNX2X_STATE_OPEN)
1936                         netif_carrier_on(bp->dev);
1937                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1938
1939                 printk("%d Mbps ", bp->link_vars.line_speed);
1940
1941                 if (bp->link_vars.duplex == DUPLEX_FULL)
1942                         printk("full duplex");
1943                 else
1944                         printk("half duplex");
1945
1946                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1947                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1948                                 printk(", receive ");
1949                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1950                                         printk("& transmit ");
1951                         } else {
1952                                 printk(", transmit ");
1953                         }
1954                         printk("flow control ON");
1955                 }
1956                 printk("\n");
1957
1958         } else { /* link_down */
1959                 netif_carrier_off(bp->dev);
1960                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1961         }
1962 }
1963
1964 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1965 {
1966         if (!BP_NOMCP(bp)) {
1967                 u8 rc;
1968
1969                 /* Initialize link parameters structure variables */
1970                 /* It is recommended to turn off RX FC for jumbo frames
1971                    for better performance */
1972                 if (IS_E1HMF(bp))
1973                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1974                 else if (bp->dev->mtu > 5000)
1975                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1976                 else
1977                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1978
1979                 bnx2x_acquire_phy_lock(bp);
1980                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1981                 bnx2x_release_phy_lock(bp);
1982
1983                 bnx2x_calc_fc_adv(bp);
1984
1985                 if (bp->link_vars.link_up)
1986                         bnx2x_link_report(bp);
1987
1988
1989                 return rc;
1990         }
1991         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1992         return -EINVAL;
1993 }
1994
1995 static void bnx2x_link_set(struct bnx2x *bp)
1996 {
1997         if (!BP_NOMCP(bp)) {
1998                 bnx2x_acquire_phy_lock(bp);
1999                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2000                 bnx2x_release_phy_lock(bp);
2001
2002                 bnx2x_calc_fc_adv(bp);
2003         } else
2004                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2005 }
2006
2007 static void bnx2x__link_reset(struct bnx2x *bp)
2008 {
2009         if (!BP_NOMCP(bp)) {
2010                 bnx2x_acquire_phy_lock(bp);
2011                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2012                 bnx2x_release_phy_lock(bp);
2013         } else
2014                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2015 }
2016
2017 static u8 bnx2x_link_test(struct bnx2x *bp)
2018 {
2019         u8 rc;
2020
2021         bnx2x_acquire_phy_lock(bp);
2022         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2023         bnx2x_release_phy_lock(bp);
2024
2025         return rc;
2026 }
2027
2028 /* Calculates the sum of vn_min_rates.
2029    It's needed for further normalizing of the min_rates.
2030
2031    Returns:
2032      sum of vn_min_rates
2033        or
2034      0 - if all the min_rates are 0.
2035      In the later case fairness algorithm should be deactivated.
2036      If not all min_rates are zero then those that are zeroes will
2037      be set to 1.
2038  */
2039 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2040 {
2041         int i, port = BP_PORT(bp);
2042         u32 wsum = 0;
2043         int all_zero = 1;
2044
2045         for (i = 0; i < E1HVN_MAX; i++) {
2046                 u32 vn_cfg =
2047                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2048                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2049                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2050                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2051                         /* If min rate is zero - set it to 1 */
2052                         if (!vn_min_rate)
2053                                 vn_min_rate = DEF_MIN_RATE;
2054                         else
2055                                 all_zero = 0;
2056
2057                         wsum += vn_min_rate;
2058                 }
2059         }
2060
2061         /* ... only if all min rates are zeros - disable FAIRNESS */
2062         if (all_zero)
2063                 return 0;
2064
2065         return wsum;
2066 }
2067
2068 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2069                                    int en_fness,
2070                                    u16 port_rate,
2071                                    struct cmng_struct_per_port *m_cmng_port)
2072 {
2073         u32 r_param = port_rate / 8;
2074         int port = BP_PORT(bp);
2075         int i;
2076
2077         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2078
2079         /* Enable minmax only if we are in e1hmf mode */
2080         if (IS_E1HMF(bp)) {
2081                 u32 fair_periodic_timeout_usec;
2082                 u32 t_fair;
2083
2084                 /* Enable rate shaping and fairness */
2085                 m_cmng_port->flags.cmng_vn_enable = 1;
2086                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2087                 m_cmng_port->flags.rate_shaping_enable = 1;
2088
2089                 if (!en_fness)
2090                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2091                            "  fairness will be disabled\n");
2092
2093                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2094                 m_cmng_port->rs_vars.rs_periodic_timeout =
2095                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2096
2097                 /* this is the threshold below which no timer arming will occur
2098                    1.25 coefficient is for the threshold to be a little bigger
2099                    than the real time, to compensate for timer in-accuracy */
2100                 m_cmng_port->rs_vars.rs_threshold =
2101                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2102
2103                 /* resolution of fairness timer */
2104                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2105                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2106                 t_fair = T_FAIR_COEF / port_rate;
2107
2108                 /* this is the threshold below which we won't arm
2109                    the timer anymore */
2110                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2111
2112                 /* we multiply by 1e3/8 to get bytes/msec.
2113                    We don't want the credits to pass a credit
2114                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2115                 m_cmng_port->fair_vars.upper_bound =
2116                                                 r_param * t_fair * FAIR_MEM;
2117                 /* since each tick is 4 usec */
2118                 m_cmng_port->fair_vars.fairness_timeout =
2119                                                 fair_periodic_timeout_usec / 4;
2120
2121         } else {
2122                 /* Disable rate shaping and fairness */
2123                 m_cmng_port->flags.cmng_vn_enable = 0;
2124                 m_cmng_port->flags.fairness_enable = 0;
2125                 m_cmng_port->flags.rate_shaping_enable = 0;
2126
2127                 DP(NETIF_MSG_IFUP,
2128                    "Single function mode  minmax will be disabled\n");
2129         }
2130
2131         /* Store it to internal memory */
2132         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2133                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2134                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2135                        ((u32 *)(m_cmng_port))[i]);
2136 }
2137
2138 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2139                                    u32 wsum, u16 port_rate,
2140                                  struct cmng_struct_per_port *m_cmng_port)
2141 {
2142         struct rate_shaping_vars_per_vn m_rs_vn;
2143         struct fairness_vars_per_vn m_fair_vn;
2144         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2145         u16 vn_min_rate, vn_max_rate;
2146         int i;
2147
2148         /* If function is hidden - set min and max to zeroes */
2149         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2150                 vn_min_rate = 0;
2151                 vn_max_rate = 0;
2152
2153         } else {
2154                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2155                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2156                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2157                    if current min rate is zero - set it to 1.
2158                    This is a requirement of the algorithm. */
2159                 if ((vn_min_rate == 0) && wsum)
2160                         vn_min_rate = DEF_MIN_RATE;
2161                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2162                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2163         }
2164
2165         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2166            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2167
2168         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2169         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2170
2171         /* global vn counter - maximal Mbps for this vn */
2172         m_rs_vn.vn_counter.rate = vn_max_rate;
2173
2174         /* quota - number of bytes transmitted in this period */
2175         m_rs_vn.vn_counter.quota =
2176                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2177
2178 #ifdef BNX2X_PER_PROT_QOS
2179         /* per protocol counter */
2180         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2181                 /* maximal Mbps for this protocol */
2182                 m_rs_vn.protocol_counters[protocol].rate =
2183                                                 protocol_max_rate[protocol];
2184                 /* the quota in each timer period -
2185                    number of bytes transmitted in this period */
2186                 m_rs_vn.protocol_counters[protocol].quota =
2187                         (u32)(rs_periodic_timeout_usec *
2188                           ((double)m_rs_vn.
2189                                    protocol_counters[protocol].rate/8));
2190         }
2191 #endif
2192
2193         if (wsum) {
2194                 /* credit for each period of the fairness algorithm:
2195                    number of bytes in T_FAIR (the vn share the port rate).
2196                    wsum should not be larger than 10000, thus
2197                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2198                 m_fair_vn.vn_credit_delta =
2199                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2200                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2201                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2202                    m_fair_vn.vn_credit_delta);
2203         }
2204
2205 #ifdef BNX2X_PER_PROT_QOS
2206         do {
2207                 u32 protocolWeightSum = 0;
2208
2209                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2210                         protocolWeightSum +=
2211                                         drvInit.protocol_min_rate[protocol];
2212                 /* per protocol counter -
2213                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2214                 if (protocolWeightSum > 0) {
2215                         for (protocol = 0;
2216                              protocol < NUM_OF_PROTOCOLS; protocol++)
2217                                 /* credit for each period of the
2218                                    fairness algorithm - number of bytes in
2219                                    T_FAIR (the protocol share the vn rate) */
2220                                 m_fair_vn.protocol_credit_delta[protocol] =
2221                                         (u32)((vn_min_rate / 8) * t_fair *
2222                                         protocol_min_rate / protocolWeightSum);
2223                 }
2224         } while (0);
2225 #endif
2226
2227         /* Store it to internal memory */
2228         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2229                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2230                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2231                        ((u32 *)(&m_rs_vn))[i]);
2232
2233         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2234                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2235                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2236                        ((u32 *)(&m_fair_vn))[i]);
2237 }
2238
2239 /* This function is called upon link interrupt */
2240 static void bnx2x_link_attn(struct bnx2x *bp)
2241 {
2242         int vn;
2243
2244         /* Make sure that we are synced with the current statistics */
2245         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2246
2247         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2248
2249         if (bp->link_vars.link_up) {
2250
2251                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2252                         struct host_port_stats *pstats;
2253
2254                         pstats = bnx2x_sp(bp, port_stats);
2255                         /* reset old bmac stats */
2256                         memset(&(pstats->mac_stx[0]), 0,
2257                                sizeof(struct mac_stx));
2258                 }
2259                 if ((bp->state == BNX2X_STATE_OPEN) ||
2260                     (bp->state == BNX2X_STATE_DISABLED))
2261                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2262         }
2263
2264         /* indicate link status */
2265         bnx2x_link_report(bp);
2266
2267         if (IS_E1HMF(bp)) {
2268                 int func;
2269
2270                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2271                         if (vn == BP_E1HVN(bp))
2272                                 continue;
2273
2274                         func = ((vn << 1) | BP_PORT(bp));
2275
2276                         /* Set the attention towards other drivers
2277                            on the same port */
2278                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2279                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2280                 }
2281         }
2282
2283         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2284                 struct cmng_struct_per_port m_cmng_port;
2285                 u32 wsum;
2286                 int port = BP_PORT(bp);
2287
2288                 /* Init RATE SHAPING and FAIRNESS contexts */
2289                 wsum = bnx2x_calc_vn_wsum(bp);
2290                 bnx2x_init_port_minmax(bp, (int)wsum,
2291                                         bp->link_vars.line_speed,
2292                                         &m_cmng_port);
2293                 if (IS_E1HMF(bp))
2294                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2295                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2296                                         wsum, bp->link_vars.line_speed,
2297                                                      &m_cmng_port);
2298         }
2299 }
2300
2301 static void bnx2x__link_status_update(struct bnx2x *bp)
2302 {
2303         if (bp->state != BNX2X_STATE_OPEN)
2304                 return;
2305
2306         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2307
2308         if (bp->link_vars.link_up)
2309                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2310         else
2311                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2312
2313         /* indicate link status */
2314         bnx2x_link_report(bp);
2315 }
2316
2317 static void bnx2x_pmf_update(struct bnx2x *bp)
2318 {
2319         int port = BP_PORT(bp);
2320         u32 val;
2321
2322         bp->port.pmf = 1;
2323         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2324
2325         /* enable nig attention */
2326         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2327         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2328         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2329
2330         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2331 }
2332
2333 /* end of Link */
2334
2335 /* slow path */
2336
2337 /*
2338  * General service functions
2339  */
2340
2341 /* the slow path queue is odd since completions arrive on the fastpath ring */
2342 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2343                          u32 data_hi, u32 data_lo, int common)
2344 {
2345         int func = BP_FUNC(bp);
2346
2347         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2348            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2349            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2350            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2351            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2352
2353 #ifdef BNX2X_STOP_ON_ERROR
2354         if (unlikely(bp->panic))
2355                 return -EIO;
2356 #endif
2357
2358         spin_lock_bh(&bp->spq_lock);
2359
2360         if (!bp->spq_left) {
2361                 BNX2X_ERR("BUG! SPQ ring full!\n");
2362                 spin_unlock_bh(&bp->spq_lock);
2363                 bnx2x_panic();
2364                 return -EBUSY;
2365         }
2366
2367         /* CID needs port number to be encoded int it */
2368         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2369                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2370                                      HW_CID(bp, cid)));
2371         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2372         if (common)
2373                 bp->spq_prod_bd->hdr.type |=
2374                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2375
2376         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2377         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2378
2379         bp->spq_left--;
2380
2381         if (bp->spq_prod_bd == bp->spq_last_bd) {
2382                 bp->spq_prod_bd = bp->spq;
2383                 bp->spq_prod_idx = 0;
2384                 DP(NETIF_MSG_TIMER, "end of spq\n");
2385
2386         } else {
2387                 bp->spq_prod_bd++;
2388                 bp->spq_prod_idx++;
2389         }
2390
2391         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2392                bp->spq_prod_idx);
2393
2394         spin_unlock_bh(&bp->spq_lock);
2395         return 0;
2396 }
2397
2398 /* acquire split MCP access lock register */
2399 static int bnx2x_acquire_alr(struct bnx2x *bp)
2400 {
2401         u32 i, j, val;
2402         int rc = 0;
2403
2404         might_sleep();
2405         i = 100;
2406         for (j = 0; j < i*10; j++) {
2407                 val = (1UL << 31);
2408                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2410                 if (val & (1L << 31))
2411                         break;
2412
2413                 msleep(5);
2414         }
2415         if (!(val & (1L << 31))) {
2416                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2417                 rc = -EBUSY;
2418         }
2419
2420         return rc;
2421 }
2422
2423 /* release split MCP access lock register */
2424 static void bnx2x_release_alr(struct bnx2x *bp)
2425 {
2426         u32 val = 0;
2427
2428         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2429 }
2430
2431 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2432 {
2433         struct host_def_status_block *def_sb = bp->def_status_blk;
2434         u16 rc = 0;
2435
2436         barrier(); /* status block is written to by the chip */
2437         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2438                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2439                 rc |= 1;
2440         }
2441         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2442                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2443                 rc |= 2;
2444         }
2445         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2446                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2447                 rc |= 4;
2448         }
2449         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2450                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2451                 rc |= 8;
2452         }
2453         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2454                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2455                 rc |= 16;
2456         }
2457         return rc;
2458 }
2459
2460 /*
2461  * slow path service functions
2462  */
2463
2464 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2465 {
2466         int port = BP_PORT(bp);
2467         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2468                        COMMAND_REG_ATTN_BITS_SET);
2469         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2470                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2471         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2472                                        NIG_REG_MASK_INTERRUPT_PORT0;
2473         u32 aeu_mask;
2474
2475         if (bp->attn_state & asserted)
2476                 BNX2X_ERR("IGU ERROR\n");
2477
2478         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2479         aeu_mask = REG_RD(bp, aeu_addr);
2480
2481         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2482            aeu_mask, asserted);
2483         aeu_mask &= ~(asserted & 0xff);
2484         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2485
2486         REG_WR(bp, aeu_addr, aeu_mask);
2487         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2488
2489         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2490         bp->attn_state |= asserted;
2491         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2492
2493         if (asserted & ATTN_HARD_WIRED_MASK) {
2494                 if (asserted & ATTN_NIG_FOR_FUNC) {
2495
2496                         bnx2x_acquire_phy_lock(bp);
2497
2498                         /* save nig interrupt mask */
2499                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2500                         REG_WR(bp, nig_int_mask_addr, 0);
2501
2502                         bnx2x_link_attn(bp);
2503
2504                         /* handle unicore attn? */
2505                 }
2506                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2507                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2508
2509                 if (asserted & GPIO_2_FUNC)
2510                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2511
2512                 if (asserted & GPIO_3_FUNC)
2513                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2514
2515                 if (asserted & GPIO_4_FUNC)
2516                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2517
2518                 if (port == 0) {
2519                         if (asserted & ATTN_GENERAL_ATTN_1) {
2520                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2521                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2522                         }
2523                         if (asserted & ATTN_GENERAL_ATTN_2) {
2524                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2525                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2526                         }
2527                         if (asserted & ATTN_GENERAL_ATTN_3) {
2528                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2529                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2530                         }
2531                 } else {
2532                         if (asserted & ATTN_GENERAL_ATTN_4) {
2533                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2534                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2535                         }
2536                         if (asserted & ATTN_GENERAL_ATTN_5) {
2537                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2538                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2539                         }
2540                         if (asserted & ATTN_GENERAL_ATTN_6) {
2541                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2542                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2543                         }
2544                 }
2545
2546         } /* if hardwired */
2547
2548         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2549            asserted, hc_addr);
2550         REG_WR(bp, hc_addr, asserted);
2551
2552         /* now set back the mask */
2553         if (asserted & ATTN_NIG_FOR_FUNC) {
2554                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2555                 bnx2x_release_phy_lock(bp);
2556         }
2557 }
2558
2559 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2560 {
2561         int port = BP_PORT(bp);
2562         int reg_offset;
2563         u32 val;
2564
2565         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2566                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2567
2568         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2569
2570                 val = REG_RD(bp, reg_offset);
2571                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2572                 REG_WR(bp, reg_offset, val);
2573
2574                 BNX2X_ERR("SPIO5 hw attention\n");
2575
2576                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2577                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2578                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2579                         /* Fan failure attention */
2580
2581                         /* The PHY reset is controlled by GPIO 1 */
2582                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2583                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2584                         /* Low power mode is controlled by GPIO 2 */
2585                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2586                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2587                         /* mark the failure */
2588                         bp->link_params.ext_phy_config &=
2589                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2590                         bp->link_params.ext_phy_config |=
2591                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2592                         SHMEM_WR(bp,
2593                                  dev_info.port_hw_config[port].
2594                                                         external_phy_config,
2595                                  bp->link_params.ext_phy_config);
2596                         /* log the failure */
2597                         printk(KERN_ERR PFX "Fan Failure on Network"
2598                                " Controller %s has caused the driver to"
2599                                " shutdown the card to prevent permanent"
2600                                " damage.  Please contact Dell Support for"
2601                                " assistance\n", bp->dev->name);
2602                         break;
2603
2604                 default:
2605                         break;
2606                 }
2607         }
2608
2609         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2610
2611                 val = REG_RD(bp, reg_offset);
2612                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2613                 REG_WR(bp, reg_offset, val);
2614
2615                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2616                           (attn & HW_INTERRUT_ASSERT_SET_0));
2617                 bnx2x_panic();
2618         }
2619 }
2620
2621 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2622 {
2623         u32 val;
2624
2625         if (attn & BNX2X_DOORQ_ASSERT) {
2626
2627                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2628                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2629                 /* DORQ discard attention */
2630                 if (val & 0x2)
2631                         BNX2X_ERR("FATAL error from DORQ\n");
2632         }
2633
2634         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2635
2636                 int port = BP_PORT(bp);
2637                 int reg_offset;
2638
2639                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2640                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2641
2642                 val = REG_RD(bp, reg_offset);
2643                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2644                 REG_WR(bp, reg_offset, val);
2645
2646                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2647                           (attn & HW_INTERRUT_ASSERT_SET_1));
2648                 bnx2x_panic();
2649         }
2650 }
2651
2652 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2653 {
2654         u32 val;
2655
2656         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2657
2658                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2659                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2660                 /* CFC error attention */
2661                 if (val & 0x2)
2662                         BNX2X_ERR("FATAL error from CFC\n");
2663         }
2664
2665         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2666
2667                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2668                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2669                 /* RQ_USDMDP_FIFO_OVERFLOW */
2670                 if (val & 0x18000)
2671                         BNX2X_ERR("FATAL error from PXP\n");
2672         }
2673
2674         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2675
2676                 int port = BP_PORT(bp);
2677                 int reg_offset;
2678
2679                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2680                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2681
2682                 val = REG_RD(bp, reg_offset);
2683                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2684                 REG_WR(bp, reg_offset, val);
2685
2686                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2687                           (attn & HW_INTERRUT_ASSERT_SET_2));
2688                 bnx2x_panic();
2689         }
2690 }
2691
2692 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2693 {
2694         u32 val;
2695
2696         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2697
2698                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2699                         int func = BP_FUNC(bp);
2700
2701                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2702                         bnx2x__link_status_update(bp);
2703                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2704                                                         DRV_STATUS_PMF)
2705                                 bnx2x_pmf_update(bp);
2706
2707                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2708
2709                         BNX2X_ERR("MC assert!\n");
2710                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2711                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2712                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2713                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2714                         bnx2x_panic();
2715
2716                 } else if (attn & BNX2X_MCP_ASSERT) {
2717
2718                         BNX2X_ERR("MCP assert!\n");
2719                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2720                         bnx2x_fw_dump(bp);
2721
2722                 } else
2723                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2724         }
2725
2726         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2727                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2728                 if (attn & BNX2X_GRC_TIMEOUT) {
2729                         val = CHIP_IS_E1H(bp) ?
2730                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2731                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2732                 }
2733                 if (attn & BNX2X_GRC_RSV) {
2734                         val = CHIP_IS_E1H(bp) ?
2735                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2736                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2737                 }
2738                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2739         }
2740 }
2741
2742 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2743 {
2744         struct attn_route attn;
2745         struct attn_route group_mask;
2746         int port = BP_PORT(bp);
2747         int index;
2748         u32 reg_addr;
2749         u32 val;
2750         u32 aeu_mask;
2751
2752         /* need to take HW lock because MCP or other port might also
2753            try to handle this event */
2754         bnx2x_acquire_alr(bp);
2755
2756         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2757         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2758         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2759         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2760         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2761            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2762
2763         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2764                 if (deasserted & (1 << index)) {
2765                         group_mask = bp->attn_group[index];
2766
2767                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2768                            index, group_mask.sig[0], group_mask.sig[1],
2769                            group_mask.sig[2], group_mask.sig[3]);
2770
2771                         bnx2x_attn_int_deasserted3(bp,
2772                                         attn.sig[3] & group_mask.sig[3]);
2773                         bnx2x_attn_int_deasserted1(bp,
2774                                         attn.sig[1] & group_mask.sig[1]);
2775                         bnx2x_attn_int_deasserted2(bp,
2776                                         attn.sig[2] & group_mask.sig[2]);
2777                         bnx2x_attn_int_deasserted0(bp,
2778                                         attn.sig[0] & group_mask.sig[0]);
2779
2780                         if ((attn.sig[0] & group_mask.sig[0] &
2781                                                 HW_PRTY_ASSERT_SET_0) ||
2782                             (attn.sig[1] & group_mask.sig[1] &
2783                                                 HW_PRTY_ASSERT_SET_1) ||
2784                             (attn.sig[2] & group_mask.sig[2] &
2785                                                 HW_PRTY_ASSERT_SET_2))
2786                                 BNX2X_ERR("FATAL HW block parity attention\n");
2787                 }
2788         }
2789
2790         bnx2x_release_alr(bp);
2791
2792         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2793
2794         val = ~deasserted;
2795         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2796            val, reg_addr);
2797         REG_WR(bp, reg_addr, val);
2798
2799         if (~bp->attn_state & deasserted)
2800                 BNX2X_ERR("IGU ERROR\n");
2801
2802         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2803                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2804
2805         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2806         aeu_mask = REG_RD(bp, reg_addr);
2807
2808         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2809            aeu_mask, deasserted);
2810         aeu_mask |= (deasserted & 0xff);
2811         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2812
2813         REG_WR(bp, reg_addr, aeu_mask);
2814         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2815
2816         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2817         bp->attn_state &= ~deasserted;
2818         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2819 }
2820
2821 static void bnx2x_attn_int(struct bnx2x *bp)
2822 {
2823         /* read local copy of bits */
2824         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2825                                                                 attn_bits);
2826         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2827                                                                 attn_bits_ack);
2828         u32 attn_state = bp->attn_state;
2829
2830         /* look for changed bits */
2831         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2832         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2833
2834         DP(NETIF_MSG_HW,
2835            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2836            attn_bits, attn_ack, asserted, deasserted);
2837
2838         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2839                 BNX2X_ERR("BAD attention state\n");
2840
2841         /* handle bits that were raised */
2842         if (asserted)
2843                 bnx2x_attn_int_asserted(bp, asserted);
2844
2845         if (deasserted)
2846                 bnx2x_attn_int_deasserted(bp, deasserted);
2847 }
2848
2849 static void bnx2x_sp_task(struct work_struct *work)
2850 {
2851         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2852         u16 status;
2853
2854
2855         /* Return here if interrupt is disabled */
2856         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2857                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2858                 return;
2859         }
2860
2861         status = bnx2x_update_dsb_idx(bp);
2862 /*      if (status == 0)                                     */
2863 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2864
2865         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2866
2867         /* HW attentions */
2868         if (status & 0x1)
2869                 bnx2x_attn_int(bp);
2870
2871         /* CStorm events: query_stats, port delete ramrod */
2872         if (status & 0x2)
2873                 bp->stats_pending = 0;
2874
2875         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2876                      IGU_INT_NOP, 1);
2877         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2878                      IGU_INT_NOP, 1);
2879         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2880                      IGU_INT_NOP, 1);
2881         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2882                      IGU_INT_NOP, 1);
2883         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2884                      IGU_INT_ENABLE, 1);
2885
2886 }
2887
2888 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2889 {
2890         struct net_device *dev = dev_instance;
2891         struct bnx2x *bp = netdev_priv(dev);
2892
2893         /* Return here if interrupt is disabled */
2894         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2895                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2896                 return IRQ_HANDLED;
2897         }
2898
2899         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2900
2901 #ifdef BNX2X_STOP_ON_ERROR
2902         if (unlikely(bp->panic))
2903                 return IRQ_HANDLED;
2904 #endif
2905
2906         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2907
2908         return IRQ_HANDLED;
2909 }
2910
2911 /* end of slow path */
2912
2913 /* Statistics */
2914
2915 /****************************************************************************
2916 * Macros
2917 ****************************************************************************/
2918
2919 /* sum[hi:lo] += add[hi:lo] */
2920 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2921         do { \
2922                 s_lo += a_lo; \
2923                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2924         } while (0)
2925
2926 /* difference = minuend - subtrahend */
2927 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2928         do { \
2929                 if (m_lo < s_lo) { \
2930                         /* underflow */ \
2931                         d_hi = m_hi - s_hi; \
2932                         if (d_hi > 0) { \
2933                                 /* we can 'loan' 1 */ \
2934                                 d_hi--; \
2935                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2936                         } else { \
2937                                 /* m_hi <= s_hi */ \
2938                                 d_hi = 0; \
2939                                 d_lo = 0; \
2940                         } \
2941                 } else { \
2942                         /* m_lo >= s_lo */ \
2943                         if (m_hi < s_hi) { \
2944                                 d_hi = 0; \
2945                                 d_lo = 0; \
2946                         } else { \
2947                                 /* m_hi >= s_hi */ \
2948                                 d_hi = m_hi - s_hi; \
2949                                 d_lo = m_lo - s_lo; \
2950                         } \
2951                 } \
2952         } while (0)
2953
2954 #define UPDATE_STAT64(s, t) \
2955         do { \
2956                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2957                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2958                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2959                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2960                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2961                        pstats->mac_stx[1].t##_lo, diff.lo); \
2962         } while (0)
2963
2964 #define UPDATE_STAT64_NIG(s, t) \
2965         do { \
2966                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2967                         diff.lo, new->s##_lo, old->s##_lo); \
2968                 ADD_64(estats->t##_hi, diff.hi, \
2969                        estats->t##_lo, diff.lo); \
2970         } while (0)
2971
2972 /* sum[hi:lo] += add */
2973 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2974         do { \
2975                 s_lo += a; \
2976                 s_hi += (s_lo < a) ? 1 : 0; \
2977         } while (0)
2978
2979 #define UPDATE_EXTEND_STAT(s) \
2980         do { \
2981                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2982                               pstats->mac_stx[1].s##_lo, \
2983                               new->s); \
2984         } while (0)
2985
2986 #define UPDATE_EXTEND_TSTAT(s, t) \
2987         do { \
2988                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2989                 old_tclient->s = le32_to_cpu(tclient->s); \
2990                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2991         } while (0)
2992
2993 #define UPDATE_EXTEND_XSTAT(s, t) \
2994         do { \
2995                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2996                 old_xclient->s = le32_to_cpu(xclient->s); \
2997                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2998         } while (0)
2999
3000 /*
3001  * General service functions
3002  */
3003
3004 static inline long bnx2x_hilo(u32 *hiref)
3005 {
3006         u32 lo = *(hiref + 1);
3007 #if (BITS_PER_LONG == 64)
3008         u32 hi = *hiref;
3009
3010         return HILO_U64(hi, lo);
3011 #else
3012         return lo;
3013 #endif
3014 }
3015
3016 /*
3017  * Init service functions
3018  */
3019
3020 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3021 {
3022         if (!bp->stats_pending) {
3023                 struct eth_query_ramrod_data ramrod_data = {0};
3024                 int rc;
3025
3026                 ramrod_data.drv_counter = bp->stats_counter++;
3027                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3028                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3029
3030                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3031                                    ((u32 *)&ramrod_data)[1],
3032                                    ((u32 *)&ramrod_data)[0], 0);
3033                 if (rc == 0) {
3034                         /* stats ramrod has it's own slot on the spq */
3035                         bp->spq_left++;
3036                         bp->stats_pending = 1;
3037                 }
3038         }
3039 }
3040
3041 static void bnx2x_stats_init(struct bnx2x *bp)
3042 {
3043         int port = BP_PORT(bp);
3044
3045         bp->executer_idx = 0;
3046         bp->stats_counter = 0;
3047
3048         /* port stats */
3049         if (!BP_NOMCP(bp))
3050                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3051         else
3052                 bp->port.port_stx = 0;
3053         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3054
3055         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3056         bp->port.old_nig_stats.brb_discard =
3057                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3058         bp->port.old_nig_stats.brb_truncate =
3059                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3060         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3061                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3062         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3063                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3064
3065         /* function stats */
3066         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3067         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3068         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3069         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3070
3071         bp->stats_state = STATS_STATE_DISABLED;
3072         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3073                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3074 }
3075
3076 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3077 {
3078         struct dmae_command *dmae = &bp->stats_dmae;
3079         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3080
3081         *stats_comp = DMAE_COMP_VAL;
3082
3083         /* loader */
3084         if (bp->executer_idx) {
3085                 int loader_idx = PMF_DMAE_C(bp);
3086
3087                 memset(dmae, 0, sizeof(struct dmae_command));
3088
3089                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3090                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3091                                 DMAE_CMD_DST_RESET |
3092 #ifdef __BIG_ENDIAN
3093                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3094 #else
3095                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3096 #endif
3097                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3098                                                DMAE_CMD_PORT_0) |
3099                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3100                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3101                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3102                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3103                                      sizeof(struct dmae_command) *
3104                                      (loader_idx + 1)) >> 2;
3105                 dmae->dst_addr_hi = 0;
3106                 dmae->len = sizeof(struct dmae_command) >> 2;
3107                 if (CHIP_IS_E1(bp))
3108                         dmae->len--;
3109                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3110                 dmae->comp_addr_hi = 0;
3111                 dmae->comp_val = 1;
3112
3113                 *stats_comp = 0;
3114                 bnx2x_post_dmae(bp, dmae, loader_idx);
3115
3116         } else if (bp->func_stx) {
3117                 *stats_comp = 0;
3118                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3119         }
3120 }
3121
3122 static int bnx2x_stats_comp(struct bnx2x *bp)
3123 {
3124         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3125         int cnt = 10;
3126
3127         might_sleep();
3128         while (*stats_comp != DMAE_COMP_VAL) {
3129                 if (!cnt) {
3130                         BNX2X_ERR("timeout waiting for stats finished\n");
3131                         break;
3132                 }
3133                 cnt--;
3134                 msleep(1);
3135         }
3136         return 1;
3137 }
3138
3139 /*
3140  * Statistics service functions
3141  */
3142
3143 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3144 {
3145         struct dmae_command *dmae;
3146         u32 opcode;
3147         int loader_idx = PMF_DMAE_C(bp);
3148         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3149
3150         /* sanity */
3151         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3152                 BNX2X_ERR("BUG!\n");
3153                 return;
3154         }
3155
3156         bp->executer_idx = 0;
3157
3158         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3159                   DMAE_CMD_C_ENABLE |
3160                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3161 #ifdef __BIG_ENDIAN
3162                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3163 #else
3164                   DMAE_CMD_ENDIANITY_DW_SWAP |
3165 #endif
3166                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3167                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3168
3169         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3170         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3171         dmae->src_addr_lo = bp->port.port_stx >> 2;
3172         dmae->src_addr_hi = 0;
3173         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3174         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3175         dmae->len = DMAE_LEN32_RD_MAX;
3176         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3177         dmae->comp_addr_hi = 0;
3178         dmae->comp_val = 1;
3179
3180         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3182         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3183         dmae->src_addr_hi = 0;
3184         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3185                                    DMAE_LEN32_RD_MAX * 4);
3186         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3187                                    DMAE_LEN32_RD_MAX * 4);
3188         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3189         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3190         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3191         dmae->comp_val = DMAE_COMP_VAL;
3192
3193         *stats_comp = 0;
3194         bnx2x_hw_stats_post(bp);
3195         bnx2x_stats_comp(bp);
3196 }
3197
3198 static void bnx2x_port_stats_init(struct bnx2x *bp)
3199 {
3200         struct dmae_command *dmae;
3201         int port = BP_PORT(bp);
3202         int vn = BP_E1HVN(bp);
3203         u32 opcode;
3204         int loader_idx = PMF_DMAE_C(bp);
3205         u32 mac_addr;
3206         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3207
3208         /* sanity */
3209         if (!bp->link_vars.link_up || !bp->port.pmf) {
3210                 BNX2X_ERR("BUG!\n");
3211                 return;
3212         }
3213
3214         bp->executer_idx = 0;
3215
3216         /* MCP */
3217         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3218                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3219                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3220 #ifdef __BIG_ENDIAN
3221                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3222 #else
3223                   DMAE_CMD_ENDIANITY_DW_SWAP |
3224 #endif
3225                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3226                   (vn << DMAE_CMD_E1HVN_SHIFT));
3227
3228         if (bp->port.port_stx) {
3229
3230                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3231                 dmae->opcode = opcode;
3232                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3233                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3234                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3235                 dmae->dst_addr_hi = 0;
3236                 dmae->len = sizeof(struct host_port_stats) >> 2;
3237                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3238                 dmae->comp_addr_hi = 0;
3239                 dmae->comp_val = 1;
3240         }
3241
3242         if (bp->func_stx) {
3243
3244                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3245                 dmae->opcode = opcode;
3246                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3247                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3248                 dmae->dst_addr_lo = bp->func_stx >> 2;
3249                 dmae->dst_addr_hi = 0;
3250                 dmae->len = sizeof(struct host_func_stats) >> 2;
3251                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252                 dmae->comp_addr_hi = 0;
3253                 dmae->comp_val = 1;
3254         }
3255
3256         /* MAC */
3257         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3258                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3259                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3260 #ifdef __BIG_ENDIAN
3261                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3262 #else
3263                   DMAE_CMD_ENDIANITY_DW_SWAP |
3264 #endif
3265                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3266                   (vn << DMAE_CMD_E1HVN_SHIFT));
3267
3268         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3269
3270                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3271                                    NIG_REG_INGRESS_BMAC0_MEM);
3272
3273                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3274                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3275                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3276                 dmae->opcode = opcode;
3277                 dmae->src_addr_lo = (mac_addr +
3278                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3279                 dmae->src_addr_hi = 0;
3280                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3281                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3282                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3283                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3284                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285                 dmae->comp_addr_hi = 0;
3286                 dmae->comp_val = 1;
3287
3288                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3289                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3290                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3291                 dmae->opcode = opcode;
3292                 dmae->src_addr_lo = (mac_addr +
3293                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3294                 dmae->src_addr_hi = 0;
3295                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3296                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3297                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3298                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3299                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3300                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3301                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302                 dmae->comp_addr_hi = 0;
3303                 dmae->comp_val = 1;
3304
3305         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3306
3307                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3308
3309                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3310                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3311                 dmae->opcode = opcode;
3312                 dmae->src_addr_lo = (mac_addr +
3313                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3314                 dmae->src_addr_hi = 0;
3315                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3316                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3317                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3318                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3319                 dmae->comp_addr_hi = 0;
3320                 dmae->comp_val = 1;
3321
3322                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3323                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324                 dmae->opcode = opcode;
3325                 dmae->src_addr_lo = (mac_addr +
3326                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3327                 dmae->src_addr_hi = 0;
3328                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3329                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3330                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3331                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3332                 dmae->len = 1;
3333                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3334                 dmae->comp_addr_hi = 0;
3335                 dmae->comp_val = 1;
3336
3337                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3338                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339                 dmae->opcode = opcode;
3340                 dmae->src_addr_lo = (mac_addr +
3341                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3342                 dmae->src_addr_hi = 0;
3343                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3344                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3345                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3346                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3347                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3348                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3349                 dmae->comp_addr_hi = 0;
3350                 dmae->comp_val = 1;
3351         }
3352
3353         /* NIG */
3354         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355         dmae->opcode = opcode;
3356         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3357                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3358         dmae->src_addr_hi = 0;
3359         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3360         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3361         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3362         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363         dmae->comp_addr_hi = 0;
3364         dmae->comp_val = 1;
3365
3366         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367         dmae->opcode = opcode;
3368         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3369                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3370         dmae->src_addr_hi = 0;
3371         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3372                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3373         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3374                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3375         dmae->len = (2*sizeof(u32)) >> 2;
3376         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3377         dmae->comp_addr_hi = 0;
3378         dmae->comp_val = 1;
3379
3380         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3381         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3382                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3383                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3384 #ifdef __BIG_ENDIAN
3385                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3386 #else
3387                         DMAE_CMD_ENDIANITY_DW_SWAP |
3388 #endif
3389                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3390                         (vn << DMAE_CMD_E1HVN_SHIFT));
3391         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3392                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3393         dmae->src_addr_hi = 0;
3394         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3395                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3396         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3397                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3398         dmae->len = (2*sizeof(u32)) >> 2;
3399         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3400         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3401         dmae->comp_val = DMAE_COMP_VAL;
3402
3403         *stats_comp = 0;
3404 }
3405
3406 static void bnx2x_func_stats_init(struct bnx2x *bp)
3407 {
3408         struct dmae_command *dmae = &bp->stats_dmae;
3409         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3410
3411         /* sanity */
3412         if (!bp->func_stx) {
3413                 BNX2X_ERR("BUG!\n");
3414                 return;
3415         }
3416
3417         bp->executer_idx = 0;
3418         memset(dmae, 0, sizeof(struct dmae_command));
3419
3420         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3421                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3422                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3423 #ifdef __BIG_ENDIAN
3424                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3425 #else
3426                         DMAE_CMD_ENDIANITY_DW_SWAP |
3427 #endif
3428                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3429                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3430         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3431         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3432         dmae->dst_addr_lo = bp->func_stx >> 2;
3433         dmae->dst_addr_hi = 0;
3434         dmae->len = sizeof(struct host_func_stats) >> 2;
3435         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3436         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3437         dmae->comp_val = DMAE_COMP_VAL;
3438
3439         *stats_comp = 0;
3440 }
3441
3442 static void bnx2x_stats_start(struct bnx2x *bp)
3443 {
3444         if (bp->port.pmf)
3445                 bnx2x_port_stats_init(bp);
3446
3447         else if (bp->func_stx)
3448                 bnx2x_func_stats_init(bp);
3449
3450         bnx2x_hw_stats_post(bp);
3451         bnx2x_storm_stats_post(bp);
3452 }
3453
3454 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3455 {
3456         bnx2x_stats_comp(bp);
3457         bnx2x_stats_pmf_update(bp);
3458         bnx2x_stats_start(bp);
3459 }
3460
3461 static void bnx2x_stats_restart(struct bnx2x *bp)
3462 {
3463         bnx2x_stats_comp(bp);
3464         bnx2x_stats_start(bp);
3465 }
3466
3467 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3468 {
3469         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3470         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3471         struct regpair diff;
3472
3473         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3474         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3475         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3476         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3477         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3478         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3479         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3480         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3481         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3482         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3483         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3484         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3485         UPDATE_STAT64(tx_stat_gt127,
3486                                 tx_stat_etherstatspkts65octetsto127octets);
3487         UPDATE_STAT64(tx_stat_gt255,
3488                                 tx_stat_etherstatspkts128octetsto255octets);
3489         UPDATE_STAT64(tx_stat_gt511,
3490                                 tx_stat_etherstatspkts256octetsto511octets);
3491         UPDATE_STAT64(tx_stat_gt1023,
3492                                 tx_stat_etherstatspkts512octetsto1023octets);
3493         UPDATE_STAT64(tx_stat_gt1518,
3494                                 tx_stat_etherstatspkts1024octetsto1522octets);
3495         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3496         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3497         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3498         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3499         UPDATE_STAT64(tx_stat_gterr,
3500                                 tx_stat_dot3statsinternalmactransmiterrors);
3501         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3502 }
3503
3504 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3505 {
3506         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3507         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3508
3509         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3510         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3511         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3512         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3513         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3514         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3515         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3516         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3517         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3518         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3519         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3520         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3521         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3522         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3523         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3524         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3525         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3526         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3527         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3528         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3529         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3530         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3531         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3532         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3533         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3534         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3535         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3536         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3537         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3538         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3539         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3540 }
3541
3542 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3543 {
3544         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3545         struct nig_stats *old = &(bp->port.old_nig_stats);
3546         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3547         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3548         struct regpair diff;
3549
3550         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3551                 bnx2x_bmac_stats_update(bp);
3552
3553         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3554                 bnx2x_emac_stats_update(bp);
3555
3556         else { /* unreached */
3557                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3558                 return -1;
3559         }
3560
3561         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3562                       new->brb_discard - old->brb_discard);
3563         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3564                       new->brb_truncate - old->brb_truncate);
3565
3566         UPDATE_STAT64_NIG(egress_mac_pkt0,
3567                                         etherstatspkts1024octetsto1522octets);
3568         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3569
3570         memcpy(old, new, sizeof(struct nig_stats));
3571
3572         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3573                sizeof(struct mac_stx));
3574         estats->brb_drop_hi = pstats->brb_drop_hi;
3575         estats->brb_drop_lo = pstats->brb_drop_lo;
3576
3577         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3578
3579         return 0;
3580 }
3581
3582 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3583 {
3584         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3585         int cl_id = BP_CL_ID(bp);
3586         struct tstorm_per_port_stats *tport =
3587                                 &stats->tstorm_common.port_statistics;
3588         struct tstorm_per_client_stats *tclient =
3589                         &stats->tstorm_common.client_statistics[cl_id];
3590         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3591         struct xstorm_per_client_stats *xclient =
3592                         &stats->xstorm_common.client_statistics[cl_id];
3593         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3594         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3595         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3596         u32 diff;
3597
3598         /* are storm stats valid? */
3599         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3600                                                         bp->stats_counter) {
3601                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3602                    "  tstorm counter (%d) != stats_counter (%d)\n",
3603                    tclient->stats_counter, bp->stats_counter);
3604                 return -1;
3605         }
3606         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3607                                                         bp->stats_counter) {
3608                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3609                    "  xstorm counter (%d) != stats_counter (%d)\n",
3610                    xclient->stats_counter, bp->stats_counter);
3611                 return -2;
3612         }
3613
3614         fstats->total_bytes_received_hi =
3615         fstats->valid_bytes_received_hi =
3616                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3617         fstats->total_bytes_received_lo =
3618         fstats->valid_bytes_received_lo =
3619                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3620
3621         estats->error_bytes_received_hi =
3622                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3623         estats->error_bytes_received_lo =
3624                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3625         ADD_64(estats->error_bytes_received_hi,
3626                estats->rx_stat_ifhcinbadoctets_hi,
3627                estats->error_bytes_received_lo,
3628                estats->rx_stat_ifhcinbadoctets_lo);
3629
3630         ADD_64(fstats->total_bytes_received_hi,
3631                estats->error_bytes_received_hi,
3632                fstats->total_bytes_received_lo,
3633                estats->error_bytes_received_lo);
3634
3635         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3636         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3637                                 total_multicast_packets_received);
3638         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3639                                 total_broadcast_packets_received);
3640
3641         fstats->total_bytes_transmitted_hi =
3642                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3643         fstats->total_bytes_transmitted_lo =
3644                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3645
3646         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3647                                 total_unicast_packets_transmitted);
3648         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3649                                 total_multicast_packets_transmitted);
3650         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3651                                 total_broadcast_packets_transmitted);
3652
3653         memcpy(estats, &(fstats->total_bytes_received_hi),
3654                sizeof(struct host_func_stats) - 2*sizeof(u32));
3655
3656         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3657         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3658         estats->brb_truncate_discard =
3659                                 le32_to_cpu(tport->brb_truncate_discard);
3660         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3661
3662         old_tclient->rcv_unicast_bytes.hi =
3663                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3664         old_tclient->rcv_unicast_bytes.lo =
3665                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3666         old_tclient->rcv_broadcast_bytes.hi =
3667                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3668         old_tclient->rcv_broadcast_bytes.lo =
3669                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3670         old_tclient->rcv_multicast_bytes.hi =
3671                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3672         old_tclient->rcv_multicast_bytes.lo =
3673                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3674         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3675
3676         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3677         old_tclient->packets_too_big_discard =
3678                                 le32_to_cpu(tclient->packets_too_big_discard);
3679         estats->no_buff_discard =
3680         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3681         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3682
3683         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3684         old_xclient->unicast_bytes_sent.hi =
3685                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3686         old_xclient->unicast_bytes_sent.lo =
3687                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3688         old_xclient->multicast_bytes_sent.hi =
3689                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3690         old_xclient->multicast_bytes_sent.lo =
3691                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3692         old_xclient->broadcast_bytes_sent.hi =
3693                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3694         old_xclient->broadcast_bytes_sent.lo =
3695                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3696
3697         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3698
3699         return 0;
3700 }
3701
3702 static void bnx2x_net_stats_update(struct bnx2x *bp)
3703 {
3704         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3705         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3706         struct net_device_stats *nstats = &bp->dev->stats;
3707
3708         nstats->rx_packets =
3709                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3710                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3711                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3712
3713         nstats->tx_packets =
3714                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3715                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3716                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3717
3718         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3719
3720         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3721
3722         nstats->rx_dropped = old_tclient->checksum_discard +
3723                              estats->mac_discard;
3724         nstats->tx_dropped = 0;
3725
3726         nstats->multicast =
3727                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3728
3729         nstats->collisions =
3730                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3731                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3732                         estats->tx_stat_dot3statslatecollisions_lo +
3733                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3734
3735         estats->jabber_packets_received =
3736                                 old_tclient->packets_too_big_discard +
3737                                 estats->rx_stat_dot3statsframestoolong_lo;
3738
3739         nstats->rx_length_errors =
3740                                 estats->rx_stat_etherstatsundersizepkts_lo +
3741                                 estats->jabber_packets_received;
3742         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3743         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3744         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3745         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3746         nstats->rx_missed_errors = estats->xxoverflow_discard;
3747
3748         nstats->rx_errors = nstats->rx_length_errors +
3749                             nstats->rx_over_errors +
3750                             nstats->rx_crc_errors +
3751                             nstats->rx_frame_errors +
3752                             nstats->rx_fifo_errors +
3753                             nstats->rx_missed_errors;
3754
3755         nstats->tx_aborted_errors =
3756                         estats->tx_stat_dot3statslatecollisions_lo +
3757                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3758         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3759         nstats->tx_fifo_errors = 0;
3760         nstats->tx_heartbeat_errors = 0;
3761         nstats->tx_window_errors = 0;
3762
3763         nstats->tx_errors = nstats->tx_aborted_errors +
3764                             nstats->tx_carrier_errors;
3765 }
3766
3767 static void bnx2x_stats_update(struct bnx2x *bp)
3768 {
3769         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3770         int update = 0;
3771
3772         if (*stats_comp != DMAE_COMP_VAL)
3773                 return;
3774
3775         if (bp->port.pmf)
3776                 update = (bnx2x_hw_stats_update(bp) == 0);
3777
3778         update |= (bnx2x_storm_stats_update(bp) == 0);
3779
3780         if (update)
3781                 bnx2x_net_stats_update(bp);
3782
3783         else {
3784                 if (bp->stats_pending) {
3785                         bp->stats_pending++;
3786                         if (bp->stats_pending == 3) {
3787                                 BNX2X_ERR("stats not updated for 3 times\n");
3788                                 bnx2x_panic();
3789                                 return;
3790                         }
3791                 }
3792         }
3793
3794         if (bp->msglevel & NETIF_MSG_TIMER) {
3795                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3796                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3797                 struct net_device_stats *nstats = &bp->dev->stats;
3798                 int i;
3799
3800                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3801                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3802                                   "  tx pkt (%lx)\n",
3803                        bnx2x_tx_avail(bp->fp),
3804                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3805                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3806                                   "  rx pkt (%lx)\n",
3807                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3808                              bp->fp->rx_comp_cons),
3809                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3810                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3811                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3812                        estats->driver_xoff, estats->brb_drop_lo);
3813                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3814                         "packets_too_big_discard %u  no_buff_discard %u  "
3815                         "mac_discard %u  mac_filter_discard %u  "
3816                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3817                         "ttl0_discard %u\n",
3818                        old_tclient->checksum_discard,
3819                        old_tclient->packets_too_big_discard,
3820                        old_tclient->no_buff_discard, estats->mac_discard,
3821                        estats->mac_filter_discard, estats->xxoverflow_discard,
3822                        estats->brb_truncate_discard,
3823                        old_tclient->ttl0_discard);
3824
3825                 for_each_queue(bp, i) {
3826                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3827                                bnx2x_fp(bp, i, tx_pkt),
3828                                bnx2x_fp(bp, i, rx_pkt),
3829                                bnx2x_fp(bp, i, rx_calls));
3830                 }
3831         }
3832
3833         bnx2x_hw_stats_post(bp);
3834         bnx2x_storm_stats_post(bp);
3835 }
3836
3837 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3838 {
3839         struct dmae_command *dmae;
3840         u32 opcode;
3841         int loader_idx = PMF_DMAE_C(bp);
3842         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3843
3844         bp->executer_idx = 0;
3845
3846         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3847                   DMAE_CMD_C_ENABLE |
3848                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3849 #ifdef __BIG_ENDIAN
3850                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3851 #else
3852                   DMAE_CMD_ENDIANITY_DW_SWAP |
3853 #endif
3854                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3855                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3856
3857         if (bp->port.port_stx) {
3858
3859                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860                 if (bp->func_stx)
3861                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3862                 else
3863                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3864                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3865                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3866                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3867                 dmae->dst_addr_hi = 0;
3868                 dmae->len = sizeof(struct host_port_stats) >> 2;
3869                 if (bp->func_stx) {
3870                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3871                         dmae->comp_addr_hi = 0;
3872                         dmae->comp_val = 1;
3873                 } else {
3874                         dmae->comp_addr_lo =
3875                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3876                         dmae->comp_addr_hi =
3877                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3878                         dmae->comp_val = DMAE_COMP_VAL;
3879
3880                         *stats_comp = 0;
3881                 }
3882         }
3883
3884         if (bp->func_stx) {
3885
3886                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3887                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3888                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3889                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3890                 dmae->dst_addr_lo = bp->func_stx >> 2;
3891                 dmae->dst_addr_hi = 0;
3892                 dmae->len = sizeof(struct host_func_stats) >> 2;
3893                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3894                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3895                 dmae->comp_val = DMAE_COMP_VAL;
3896
3897                 *stats_comp = 0;
3898         }
3899 }
3900
3901 static void bnx2x_stats_stop(struct bnx2x *bp)
3902 {
3903         int update = 0;
3904
3905         bnx2x_stats_comp(bp);
3906
3907         if (bp->port.pmf)
3908                 update = (bnx2x_hw_stats_update(bp) == 0);
3909
3910         update |= (bnx2x_storm_stats_update(bp) == 0);
3911
3912         if (update) {
3913                 bnx2x_net_stats_update(bp);
3914
3915                 if (bp->port.pmf)
3916                         bnx2x_port_stats_stop(bp);
3917
3918                 bnx2x_hw_stats_post(bp);
3919                 bnx2x_stats_comp(bp);
3920         }
3921 }
3922
3923 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3924 {
3925 }
3926
3927 static const struct {
3928         void (*action)(struct bnx2x *bp);
3929         enum bnx2x_stats_state next_state;
3930 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3931 /* state        event   */
3932 {
3933 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3934 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3935 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3936 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3937 },
3938 {
3939 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3940 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3941 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3942 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3943 }
3944 };
3945
3946 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3947 {
3948         enum bnx2x_stats_state state = bp->stats_state;
3949
3950         bnx2x_stats_stm[state][event].action(bp);
3951         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3952
3953         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3954                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3955                    state, event, bp->stats_state);
3956 }
3957
3958 static void bnx2x_timer(unsigned long data)
3959 {
3960         struct bnx2x *bp = (struct bnx2x *) data;
3961
3962         if (!netif_running(bp->dev))
3963                 return;
3964
3965         if (atomic_read(&bp->intr_sem) != 0)
3966                 goto timer_restart;
3967
3968         if (poll) {
3969                 struct bnx2x_fastpath *fp = &bp->fp[0];
3970                 int rc;
3971
3972                 bnx2x_tx_int(fp, 1000);
3973                 rc = bnx2x_rx_int(fp, 1000);
3974         }
3975
3976         if (!BP_NOMCP(bp)) {
3977                 int func = BP_FUNC(bp);
3978                 u32 drv_pulse;
3979                 u32 mcp_pulse;
3980
3981                 ++bp->fw_drv_pulse_wr_seq;
3982                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3983                 /* TBD - add SYSTEM_TIME */
3984                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3985                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3986
3987                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3988                              MCP_PULSE_SEQ_MASK);
3989                 /* The delta between driver pulse and mcp response
3990                  * should be 1 (before mcp response) or 0 (after mcp response)
3991                  */
3992                 if ((drv_pulse != mcp_pulse) &&
3993                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3994                         /* someone lost a heartbeat... */
3995                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3996                                   drv_pulse, mcp_pulse);
3997                 }
3998         }
3999
4000         if ((bp->state == BNX2X_STATE_OPEN) ||
4001             (bp->state == BNX2X_STATE_DISABLED))
4002                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4003
4004 timer_restart:
4005         mod_timer(&bp->timer, jiffies + bp->current_interval);
4006 }
4007
4008 /* end of Statistics */
4009
4010 /* nic init */
4011
4012 /*
4013  * nic init service functions
4014  */
4015
4016 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4017 {
4018         int port = BP_PORT(bp);
4019
4020         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4021                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4022                         sizeof(struct ustorm_status_block)/4);
4023         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4024                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4025                         sizeof(struct cstorm_status_block)/4);
4026 }
4027
4028 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4029                           dma_addr_t mapping, int sb_id)
4030 {
4031         int port = BP_PORT(bp);
4032         int func = BP_FUNC(bp);
4033         int index;
4034         u64 section;
4035
4036         /* USTORM */
4037         section = ((u64)mapping) + offsetof(struct host_status_block,
4038                                             u_status_block);
4039         sb->u_status_block.status_block_id = sb_id;
4040
4041         REG_WR(bp, BAR_USTRORM_INTMEM +
4042                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4043         REG_WR(bp, BAR_USTRORM_INTMEM +
4044                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4045                U64_HI(section));
4046         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4047                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4048
4049         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4050                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4051                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4052
4053         /* CSTORM */
4054         section = ((u64)mapping) + offsetof(struct host_status_block,
4055                                             c_status_block);
4056         sb->c_status_block.status_block_id = sb_id;
4057
4058         REG_WR(bp, BAR_CSTRORM_INTMEM +
4059                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4060         REG_WR(bp, BAR_CSTRORM_INTMEM +
4061                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4062                U64_HI(section));
4063         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4064                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4065
4066         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4067                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4068                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4069
4070         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4071 }
4072
4073 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4074 {
4075         int func = BP_FUNC(bp);
4076
4077         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4078                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4079                         sizeof(struct ustorm_def_status_block)/4);
4080         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4081                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4082                         sizeof(struct cstorm_def_status_block)/4);
4083         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4084                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4085                         sizeof(struct xstorm_def_status_block)/4);
4086         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4087                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4088                         sizeof(struct tstorm_def_status_block)/4);
4089 }
4090
4091 static void bnx2x_init_def_sb(struct bnx2x *bp,
4092                               struct host_def_status_block *def_sb,
4093                               dma_addr_t mapping, int sb_id)
4094 {
4095         int port = BP_PORT(bp);
4096         int func = BP_FUNC(bp);
4097         int index, val, reg_offset;
4098         u64 section;
4099
4100         /* ATTN */
4101         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4102                                             atten_status_block);
4103         def_sb->atten_status_block.status_block_id = sb_id;
4104
4105         bp->attn_state = 0;
4106
4107         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4108                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4109
4110         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4111                 bp->attn_group[index].sig[0] = REG_RD(bp,
4112                                                      reg_offset + 0x10*index);
4113                 bp->attn_group[index].sig[1] = REG_RD(bp,
4114                                                reg_offset + 0x4 + 0x10*index);
4115                 bp->attn_group[index].sig[2] = REG_RD(bp,
4116                                                reg_offset + 0x8 + 0x10*index);
4117                 bp->attn_group[index].sig[3] = REG_RD(bp,
4118                                                reg_offset + 0xc + 0x10*index);
4119         }
4120
4121         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4122                              HC_REG_ATTN_MSG0_ADDR_L);
4123
4124         REG_WR(bp, reg_offset, U64_LO(section));
4125         REG_WR(bp, reg_offset + 4, U64_HI(section));
4126
4127         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4128
4129         val = REG_RD(bp, reg_offset);
4130         val |= sb_id;
4131         REG_WR(bp, reg_offset, val);
4132
4133         /* USTORM */
4134         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135                                             u_def_status_block);
4136         def_sb->u_def_status_block.status_block_id = sb_id;
4137
4138         REG_WR(bp, BAR_USTRORM_INTMEM +
4139                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4140         REG_WR(bp, BAR_USTRORM_INTMEM +
4141                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4142                U64_HI(section));
4143         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4144                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4145
4146         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4147                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4148                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4149
4150         /* CSTORM */
4151         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152                                             c_def_status_block);
4153         def_sb->c_def_status_block.status_block_id = sb_id;
4154
4155         REG_WR(bp, BAR_CSTRORM_INTMEM +
4156                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4157         REG_WR(bp, BAR_CSTRORM_INTMEM +
4158                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4159                U64_HI(section));
4160         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4161                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4162
4163         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4164                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4165                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4166
4167         /* TSTORM */
4168         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4169                                             t_def_status_block);
4170         def_sb->t_def_status_block.status_block_id = sb_id;
4171
4172         REG_WR(bp, BAR_TSTRORM_INTMEM +
4173                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4174         REG_WR(bp, BAR_TSTRORM_INTMEM +
4175                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4176                U64_HI(section));
4177         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4178                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4179
4180         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4181                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4182                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4183
4184         /* XSTORM */
4185         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4186                                             x_def_status_block);
4187         def_sb->x_def_status_block.status_block_id = sb_id;
4188
4189         REG_WR(bp, BAR_XSTRORM_INTMEM +
4190                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4191         REG_WR(bp, BAR_XSTRORM_INTMEM +
4192                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4193                U64_HI(section));
4194         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4195                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4196
4197         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4198                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4199                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4200
4201         bp->stats_pending = 0;
4202         bp->set_mac_pending = 0;
4203
4204         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4205 }
4206
4207 static void bnx2x_update_coalesce(struct bnx2x *bp)
4208 {
4209         int port = BP_PORT(bp);
4210         int i;
4211
4212         for_each_queue(bp, i) {
4213                 int sb_id = bp->fp[i].sb_id;
4214
4215                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4216                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4217                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4218                                                     U_SB_ETH_RX_CQ_INDEX),
4219                         bp->rx_ticks/12);
4220                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4221                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4222                                                      U_SB_ETH_RX_CQ_INDEX),
4223                          bp->rx_ticks ? 0 : 1);
4224                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4225                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226                                                      U_SB_ETH_RX_BD_INDEX),
4227                          bp->rx_ticks ? 0 : 1);
4228
4229                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4230                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4231                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4232                                                     C_SB_ETH_TX_CQ_INDEX),
4233                         bp->tx_ticks/12);
4234                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4235                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4236                                                      C_SB_ETH_TX_CQ_INDEX),
4237                          bp->tx_ticks ? 0 : 1);
4238         }
4239 }
4240
4241 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4242                                        struct bnx2x_fastpath *fp, int last)
4243 {
4244         int i;
4245
4246         for (i = 0; i < last; i++) {
4247                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4248                 struct sk_buff *skb = rx_buf->skb;
4249
4250                 if (skb == NULL) {
4251                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4252                         continue;
4253                 }
4254
4255                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4256                         pci_unmap_single(bp->pdev,
4257                                          pci_unmap_addr(rx_buf, mapping),
4258                                          bp->rx_buf_size,
4259                                          PCI_DMA_FROMDEVICE);
4260
4261                 dev_kfree_skb(skb);
4262                 rx_buf->skb = NULL;
4263         }
4264 }
4265
4266 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4267 {
4268         int func = BP_FUNC(bp);
4269         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4270                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4271         u16 ring_prod, cqe_ring_prod;
4272         int i, j;
4273
4274         bp->rx_buf_size = bp->dev->mtu;
4275         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4276                 BCM_RX_ETH_PAYLOAD_ALIGN;
4277
4278         if (bp->flags & TPA_ENABLE_FLAG) {
4279                 DP(NETIF_MSG_IFUP,
4280                    "rx_buf_size %d  effective_mtu %d\n",
4281                    bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4282
4283                 for_each_queue(bp, j) {
4284                         struct bnx2x_fastpath *fp = &bp->fp[j];
4285
4286                         for (i = 0; i < max_agg_queues; i++) {
4287                                 fp->tpa_pool[i].skb =
4288                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4289                                 if (!fp->tpa_pool[i].skb) {
4290                                         BNX2X_ERR("Failed to allocate TPA "
4291                                                   "skb pool for queue[%d] - "
4292                                                   "disabling TPA on this "
4293                                                   "queue!\n", j);
4294                                         bnx2x_free_tpa_pool(bp, fp, i);
4295                                         fp->disable_tpa = 1;
4296                                         break;
4297                                 }
4298                                 pci_unmap_addr_set((struct sw_rx_bd *)
4299                                                         &bp->fp->tpa_pool[i],
4300                                                    mapping, 0);
4301                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4302                         }
4303                 }
4304         }
4305
4306         for_each_queue(bp, j) {
4307                 struct bnx2x_fastpath *fp = &bp->fp[j];
4308
4309                 fp->rx_bd_cons = 0;
4310                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4311                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4312
4313                 /* "next page" elements initialization */
4314                 /* SGE ring */
4315                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4316                         struct eth_rx_sge *sge;
4317
4318                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4319                         sge->addr_hi =
4320                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4321                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4322                         sge->addr_lo =
4323                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4324                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4325                 }
4326
4327                 bnx2x_init_sge_ring_bit_mask(fp);
4328
4329                 /* RX BD ring */
4330                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4331                         struct eth_rx_bd *rx_bd;
4332
4333                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4334                         rx_bd->addr_hi =
4335                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4336                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4337                         rx_bd->addr_lo =
4338                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4339                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4340                 }
4341
4342                 /* CQ ring */
4343                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4344                         struct eth_rx_cqe_next_page *nextpg;
4345
4346                         nextpg = (struct eth_rx_cqe_next_page *)
4347                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4348                         nextpg->addr_hi =
4349                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4350                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4351                         nextpg->addr_lo =
4352                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4353                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4354                 }
4355
4356                 /* Allocate SGEs and initialize the ring elements */
4357                 for (i = 0, ring_prod = 0;
4358                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4359
4360                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4361                                 BNX2X_ERR("was only able to allocate "
4362                                           "%d rx sges\n", i);
4363                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4364                                 /* Cleanup already allocated elements */
4365                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4366                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4367                                 fp->disable_tpa = 1;
4368                                 ring_prod = 0;
4369                                 break;
4370                         }
4371                         ring_prod = NEXT_SGE_IDX(ring_prod);
4372                 }
4373                 fp->rx_sge_prod = ring_prod;
4374
4375                 /* Allocate BDs and initialize BD ring */
4376                 fp->rx_comp_cons = 0;
4377                 cqe_ring_prod = ring_prod = 0;
4378                 for (i = 0; i < bp->rx_ring_size; i++) {
4379                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4380                                 BNX2X_ERR("was only able to allocate "
4381                                           "%d rx skbs\n", i);
4382                                 bp->eth_stats.rx_skb_alloc_failed++;
4383                                 break;
4384                         }
4385                         ring_prod = NEXT_RX_IDX(ring_prod);
4386                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4387                         WARN_ON(ring_prod <= i);
4388                 }
4389
4390                 fp->rx_bd_prod = ring_prod;
4391                 /* must not have more available CQEs than BDs */
4392                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4393                                        cqe_ring_prod);
4394                 fp->rx_pkt = fp->rx_calls = 0;
4395
4396                 /* Warning!
4397                  * this will generate an interrupt (to the TSTORM)
4398                  * must only be done after chip is initialized
4399                  */
4400                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4401                                      fp->rx_sge_prod);
4402                 if (j != 0)
4403                         continue;
4404
4405                 REG_WR(bp, BAR_USTRORM_INTMEM +
4406                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4407                        U64_LO(fp->rx_comp_mapping));
4408                 REG_WR(bp, BAR_USTRORM_INTMEM +
4409                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4410                        U64_HI(fp->rx_comp_mapping));
4411         }
4412 }
4413
4414 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4415 {
4416         int i, j;
4417
4418         for_each_queue(bp, j) {
4419                 struct bnx2x_fastpath *fp = &bp->fp[j];
4420
4421                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4422                         struct eth_tx_bd *tx_bd =
4423                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4424
4425                         tx_bd->addr_hi =
4426                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4427                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4428                         tx_bd->addr_lo =
4429                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4430                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4431                 }
4432
4433                 fp->tx_pkt_prod = 0;
4434                 fp->tx_pkt_cons = 0;
4435                 fp->tx_bd_prod = 0;
4436                 fp->tx_bd_cons = 0;
4437                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4438                 fp->tx_pkt = 0;
4439         }
4440 }
4441
4442 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4443 {
4444         int func = BP_FUNC(bp);
4445
4446         spin_lock_init(&bp->spq_lock);
4447
4448         bp->spq_left = MAX_SPQ_PENDING;
4449         bp->spq_prod_idx = 0;
4450         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4451         bp->spq_prod_bd = bp->spq;
4452         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4453
4454         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4455                U64_LO(bp->spq_mapping));
4456         REG_WR(bp,
4457                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4458                U64_HI(bp->spq_mapping));
4459
4460         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4461                bp->spq_prod_idx);
4462 }
4463
4464 static void bnx2x_init_context(struct bnx2x *bp)
4465 {
4466         int i;
4467
4468         for_each_queue(bp, i) {
4469                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4470                 struct bnx2x_fastpath *fp = &bp->fp[i];
4471                 u8 sb_id = FP_SB_ID(fp);
4472
4473                 context->xstorm_st_context.tx_bd_page_base_hi =
4474                                                 U64_HI(fp->tx_desc_mapping);
4475                 context->xstorm_st_context.tx_bd_page_base_lo =
4476                                                 U64_LO(fp->tx_desc_mapping);
4477                 context->xstorm_st_context.db_data_addr_hi =
4478                                                 U64_HI(fp->tx_prods_mapping);
4479                 context->xstorm_st_context.db_data_addr_lo =
4480                                                 U64_LO(fp->tx_prods_mapping);
4481                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4482                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4483
4484                 context->ustorm_st_context.common.sb_index_numbers =
4485                                                 BNX2X_RX_SB_INDEX_NUM;
4486                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4487                 context->ustorm_st_context.common.status_block_id = sb_id;
4488                 context->ustorm_st_context.common.flags =
4489                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4490                 context->ustorm_st_context.common.mc_alignment_size =
4491                         BCM_RX_ETH_PAYLOAD_ALIGN;
4492                 context->ustorm_st_context.common.bd_buff_size =
4493                                                 bp->rx_buf_size;
4494                 context->ustorm_st_context.common.bd_page_base_hi =
4495                                                 U64_HI(fp->rx_desc_mapping);
4496                 context->ustorm_st_context.common.bd_page_base_lo =
4497                                                 U64_LO(fp->rx_desc_mapping);
4498                 if (!fp->disable_tpa) {
4499                         context->ustorm_st_context.common.flags |=
4500                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4501                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4502                         context->ustorm_st_context.common.sge_buff_size =
4503                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4504                         context->ustorm_st_context.common.sge_page_base_hi =
4505                                                 U64_HI(fp->rx_sge_mapping);
4506                         context->ustorm_st_context.common.sge_page_base_lo =
4507                                                 U64_LO(fp->rx_sge_mapping);
4508                 }
4509
4510                 context->cstorm_st_context.sb_index_number =
4511                                                 C_SB_ETH_TX_CQ_INDEX;
4512                 context->cstorm_st_context.status_block_id = sb_id;
4513
4514                 context->xstorm_ag_context.cdu_reserved =
4515                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4516                                                CDU_REGION_NUMBER_XCM_AG,
4517                                                ETH_CONNECTION_TYPE);
4518                 context->ustorm_ag_context.cdu_usage =
4519                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4520                                                CDU_REGION_NUMBER_UCM_AG,
4521                                                ETH_CONNECTION_TYPE);
4522         }
4523 }
4524
4525 static void bnx2x_init_ind_table(struct bnx2x *bp)
4526 {
4527         int func = BP_FUNC(bp);
4528         int i;
4529
4530         if (!is_multi(bp))
4531                 return;
4532
4533         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4534         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4535                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4536                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4537                         BP_CL_ID(bp) + (i % bp->num_queues));
4538 }
4539
4540 static void bnx2x_set_client_config(struct bnx2x *bp)
4541 {
4542         struct tstorm_eth_client_config tstorm_client = {0};
4543         int port = BP_PORT(bp);
4544         int i;
4545
4546         tstorm_client.mtu = bp->dev->mtu;
4547         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4548         tstorm_client.config_flags =
4549                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4550 #ifdef BCM_VLAN
4551         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4552                 tstorm_client.config_flags |=
4553                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4554                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4555         }
4556 #endif
4557
4558         if (bp->flags & TPA_ENABLE_FLAG) {
4559                 tstorm_client.max_sges_for_packet =
4560                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4561                 tstorm_client.max_sges_for_packet =
4562                         ((tstorm_client.max_sges_for_packet +
4563                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4564                         PAGES_PER_SGE_SHIFT;
4565
4566                 tstorm_client.config_flags |=
4567                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4568         }
4569
4570         for_each_queue(bp, i) {
4571                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4572                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4573                        ((u32 *)&tstorm_client)[0]);
4574                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4575                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4576                        ((u32 *)&tstorm_client)[1]);
4577         }
4578
4579         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4580            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4581 }
4582
4583 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4584 {
4585         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4586         int mode = bp->rx_mode;
4587         int mask = (1 << BP_L_ID(bp));
4588         int func = BP_FUNC(bp);
4589         int i;
4590
4591         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4592
4593         switch (mode) {
4594         case BNX2X_RX_MODE_NONE: /* no Rx */
4595                 tstorm_mac_filter.ucast_drop_all = mask;
4596                 tstorm_mac_filter.mcast_drop_all = mask;
4597                 tstorm_mac_filter.bcast_drop_all = mask;
4598                 break;
4599         case BNX2X_RX_MODE_NORMAL:
4600                 tstorm_mac_filter.bcast_accept_all = mask;
4601                 break;
4602         case BNX2X_RX_MODE_ALLMULTI:
4603                 tstorm_mac_filter.mcast_accept_all = mask;
4604                 tstorm_mac_filter.bcast_accept_all = mask;
4605                 break;
4606         case BNX2X_RX_MODE_PROMISC:
4607                 tstorm_mac_filter.ucast_accept_all = mask;
4608                 tstorm_mac_filter.mcast_accept_all = mask;
4609                 tstorm_mac_filter.bcast_accept_all = mask;
4610                 break;
4611         default:
4612                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4613                 break;
4614         }
4615
4616         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4617                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4618                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4619                        ((u32 *)&tstorm_mac_filter)[i]);
4620
4621 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4622                    ((u32 *)&tstorm_mac_filter)[i]); */
4623         }
4624
4625         if (mode != BNX2X_RX_MODE_NONE)
4626                 bnx2x_set_client_config(bp);
4627 }
4628
4629 static void bnx2x_init_internal_common(struct bnx2x *bp)
4630 {
4631         int i;
4632
4633         if (bp->flags & TPA_ENABLE_FLAG) {
4634                 struct tstorm_eth_tpa_exist tpa = {0};
4635
4636                 tpa.tpa_exist = 1;
4637
4638                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4639                        ((u32 *)&tpa)[0]);
4640                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4641                        ((u32 *)&tpa)[1]);
4642         }
4643
4644         /* Zero this manually as its initialization is
4645            currently missing in the initTool */
4646         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4647                 REG_WR(bp, BAR_USTRORM_INTMEM +
4648                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4649 }
4650
4651 static void bnx2x_init_internal_port(struct bnx2x *bp)
4652 {
4653         int port = BP_PORT(bp);
4654
4655         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4656         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4657         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4658         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4659 }
4660
4661 static void bnx2x_init_internal_func(struct bnx2x *bp)
4662 {
4663         struct tstorm_eth_function_common_config tstorm_config = {0};
4664         struct stats_indication_flags stats_flags = {0};
4665         int port = BP_PORT(bp);
4666         int func = BP_FUNC(bp);
4667         int i;
4668         u16 max_agg_size;
4669
4670         if (is_multi(bp)) {
4671                 tstorm_config.config_flags = MULTI_FLAGS;
4672                 tstorm_config.rss_result_mask = MULTI_MASK;
4673         }
4674
4675         tstorm_config.leading_client_id = BP_L_ID(bp);
4676
4677         REG_WR(bp, BAR_TSTRORM_INTMEM +
4678                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4679                (*(u32 *)&tstorm_config));
4680
4681         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4682         bnx2x_set_storm_rx_mode(bp);
4683
4684         /* reset xstorm per client statistics */
4685         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4686                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4687                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4688                        i*4, 0);
4689         }
4690         /* reset tstorm per client statistics */
4691         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4692                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4693                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4694                        i*4, 0);
4695         }
4696
4697         /* Init statistics related context */
4698         stats_flags.collect_eth = 1;
4699
4700         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4701                ((u32 *)&stats_flags)[0]);
4702         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4703                ((u32 *)&stats_flags)[1]);
4704
4705         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4706                ((u32 *)&stats_flags)[0]);
4707         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4708                ((u32 *)&stats_flags)[1]);
4709
4710         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4711                ((u32 *)&stats_flags)[0]);
4712         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4713                ((u32 *)&stats_flags)[1]);
4714
4715         REG_WR(bp, BAR_XSTRORM_INTMEM +
4716                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4717                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4718         REG_WR(bp, BAR_XSTRORM_INTMEM +
4719                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4720                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4721
4722         REG_WR(bp, BAR_TSTRORM_INTMEM +
4723                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4724                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4725         REG_WR(bp, BAR_TSTRORM_INTMEM +
4726                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4727                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4728
4729         if (CHIP_IS_E1H(bp)) {
4730                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4731                         IS_E1HMF(bp));
4732                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4733                         IS_E1HMF(bp));
4734                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4735                         IS_E1HMF(bp));
4736                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4737                         IS_E1HMF(bp));
4738
4739                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4740                          bp->e1hov);
4741         }
4742
4743         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4744         max_agg_size =
4745                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4746                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4747                     (u32)0xffff);
4748         for_each_queue(bp, i) {
4749                 struct bnx2x_fastpath *fp = &bp->fp[i];
4750
4751                 REG_WR(bp, BAR_USTRORM_INTMEM +
4752                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4753                        U64_LO(fp->rx_comp_mapping));
4754                 REG_WR(bp, BAR_USTRORM_INTMEM +
4755                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4756                        U64_HI(fp->rx_comp_mapping));
4757
4758                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4759                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4760                          max_agg_size);
4761         }
4762 }
4763
4764 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4765 {
4766         switch (load_code) {
4767         case FW_MSG_CODE_DRV_LOAD_COMMON:
4768                 bnx2x_init_internal_common(bp);
4769                 /* no break */
4770
4771         case FW_MSG_CODE_DRV_LOAD_PORT:
4772                 bnx2x_init_internal_port(bp);
4773                 /* no break */
4774
4775         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4776                 bnx2x_init_internal_func(bp);
4777                 break;
4778
4779         default:
4780                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4781                 break;
4782         }
4783 }
4784
4785 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4786 {
4787         int i;
4788
4789         for_each_queue(bp, i) {
4790                 struct bnx2x_fastpath *fp = &bp->fp[i];
4791
4792                 fp->bp = bp;
4793                 fp->state = BNX2X_FP_STATE_CLOSED;
4794                 fp->index = i;
4795                 fp->cl_id = BP_L_ID(bp) + i;
4796                 fp->sb_id = fp->cl_id;
4797                 DP(NETIF_MSG_IFUP,
4798                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4799                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4800                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4801                               FP_SB_ID(fp));
4802                 bnx2x_update_fpsb_idx(fp);
4803         }
4804
4805         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4806                           DEF_SB_ID);
4807         bnx2x_update_dsb_idx(bp);
4808         bnx2x_update_coalesce(bp);
4809         bnx2x_init_rx_rings(bp);
4810         bnx2x_init_tx_ring(bp);
4811         bnx2x_init_sp_ring(bp);
4812         bnx2x_init_context(bp);
4813         bnx2x_init_internal(bp, load_code);
4814         bnx2x_init_ind_table(bp);
4815         bnx2x_stats_init(bp);
4816
4817         /* At this point, we are ready for interrupts */
4818         atomic_set(&bp->intr_sem, 0);
4819
4820         /* flush all before enabling interrupts */
4821         mb();
4822         mmiowb();
4823
4824         bnx2x_int_enable(bp);
4825 }
4826
4827 /* end of nic init */
4828
4829 /*
4830  * gzip service functions
4831  */
4832
4833 static int bnx2x_gunzip_init(struct bnx2x *bp)
4834 {
4835         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4836                                               &bp->gunzip_mapping);
4837         if (bp->gunzip_buf  == NULL)
4838                 goto gunzip_nomem1;
4839
4840         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4841         if (bp->strm  == NULL)
4842                 goto gunzip_nomem2;
4843
4844         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4845                                       GFP_KERNEL);
4846         if (bp->strm->workspace == NULL)
4847                 goto gunzip_nomem3;
4848
4849         return 0;
4850
4851 gunzip_nomem3:
4852         kfree(bp->strm);
4853         bp->strm = NULL;
4854
4855 gunzip_nomem2:
4856         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4857                             bp->gunzip_mapping);
4858         bp->gunzip_buf = NULL;
4859
4860 gunzip_nomem1:
4861         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4862                " un-compression\n", bp->dev->name);
4863         return -ENOMEM;
4864 }
4865
4866 static void bnx2x_gunzip_end(struct bnx2x *bp)
4867 {
4868         kfree(bp->strm->workspace);
4869
4870         kfree(bp->strm);
4871         bp->strm = NULL;
4872
4873         if (bp->gunzip_buf) {
4874                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4875                                     bp->gunzip_mapping);
4876                 bp->gunzip_buf = NULL;
4877         }
4878 }
4879
4880 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4881 {
4882         int n, rc;
4883
4884         /* check gzip header */
4885         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4886                 return -EINVAL;
4887
4888         n = 10;
4889
4890 #define FNAME                           0x8
4891
4892         if (zbuf[3] & FNAME)
4893                 while ((zbuf[n++] != 0) && (n < len));
4894
4895         bp->strm->next_in = zbuf + n;
4896         bp->strm->avail_in = len - n;
4897         bp->strm->next_out = bp->gunzip_buf;
4898         bp->strm->avail_out = FW_BUF_SIZE;
4899
4900         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4901         if (rc != Z_OK)
4902                 return rc;
4903
4904         rc = zlib_inflate(bp->strm, Z_FINISH);
4905         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4906                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4907                        bp->dev->name, bp->strm->msg);
4908
4909         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4910         if (bp->gunzip_outlen & 0x3)
4911                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4912                                     " gunzip_outlen (%d) not aligned\n",
4913                        bp->dev->name, bp->gunzip_outlen);
4914         bp->gunzip_outlen >>= 2;
4915
4916         zlib_inflateEnd(bp->strm);
4917
4918         if (rc == Z_STREAM_END)
4919                 return 0;
4920
4921         return rc;
4922 }
4923
4924 /* nic load/unload */
4925
4926 /*
4927  * General service functions
4928  */
4929
4930 /* send a NIG loopback debug packet */
4931 static void bnx2x_lb_pckt(struct bnx2x *bp)
4932 {
4933         u32 wb_write[3];
4934
4935         /* Ethernet source and destination addresses */
4936         wb_write[0] = 0x55555555;
4937         wb_write[1] = 0x55555555;
4938         wb_write[2] = 0x20;             /* SOP */
4939         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4940
4941         /* NON-IP protocol */
4942         wb_write[0] = 0x09000000;
4943         wb_write[1] = 0x55555555;
4944         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4945         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4946 }
4947
4948 /* some of the internal memories
4949  * are not directly readable from the driver
4950  * to test them we send debug packets
4951  */
4952 static int bnx2x_int_mem_test(struct bnx2x *bp)
4953 {
4954         int factor;
4955         int count, i;
4956         u32 val = 0;
4957
4958         if (CHIP_REV_IS_FPGA(bp))
4959                 factor = 120;
4960         else if (CHIP_REV_IS_EMUL(bp))
4961                 factor = 200;
4962         else
4963                 factor = 1;
4964
4965         DP(NETIF_MSG_HW, "start part1\n");
4966
4967         /* Disable inputs of parser neighbor blocks */
4968         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4969         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4970         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4971         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4972
4973         /*  Write 0 to parser credits for CFC search request */
4974         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4975
4976         /* send Ethernet packet */
4977         bnx2x_lb_pckt(bp);
4978
4979         /* TODO do i reset NIG statistic? */
4980         /* Wait until NIG register shows 1 packet of size 0x10 */
4981         count = 1000 * factor;
4982         while (count) {
4983
4984                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4985                 val = *bnx2x_sp(bp, wb_data[0]);
4986                 if (val == 0x10)
4987                         break;
4988
4989                 msleep(10);
4990                 count--;
4991         }
4992         if (val != 0x10) {
4993                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4994                 return -1;
4995         }
4996
4997         /* Wait until PRS register shows 1 packet */
4998         count = 1000 * factor;
4999         while (count) {
5000                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5001                 if (val == 1)
5002                         break;
5003
5004                 msleep(10);
5005                 count--;
5006         }
5007         if (val != 0x1) {
5008                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5009                 return -2;
5010         }
5011
5012         /* Reset and init BRB, PRS */
5013         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5014         msleep(50);
5015         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5016         msleep(50);
5017         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5018         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5019
5020         DP(NETIF_MSG_HW, "part2\n");
5021
5022         /* Disable inputs of parser neighbor blocks */
5023         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5024         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5025         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5026         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5027
5028         /* Write 0 to parser credits for CFC search request */
5029         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5030
5031         /* send 10 Ethernet packets */
5032         for (i = 0; i < 10; i++)
5033                 bnx2x_lb_pckt(bp);
5034
5035         /* Wait until NIG register shows 10 + 1
5036            packets of size 11*0x10 = 0xb0 */
5037         count = 1000 * factor;
5038         while (count) {
5039
5040                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5041                 val = *bnx2x_sp(bp, wb_data[0]);
5042                 if (val == 0xb0)
5043                         break;
5044
5045                 msleep(10);
5046                 count--;
5047         }
5048         if (val != 0xb0) {
5049                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5050                 return -3;
5051         }
5052
5053         /* Wait until PRS register shows 2 packets */
5054         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5055         if (val != 2)
5056                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5057
5058         /* Write 1 to parser credits for CFC search request */
5059         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5060
5061         /* Wait until PRS register shows 3 packets */
5062         msleep(10 * factor);
5063         /* Wait until NIG register shows 1 packet of size 0x10 */
5064         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5065         if (val != 3)
5066                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5067
5068         /* clear NIG EOP FIFO */
5069         for (i = 0; i < 11; i++)
5070                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5071         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5072         if (val != 1) {
5073                 BNX2X_ERR("clear of NIG failed\n");
5074                 return -4;
5075         }
5076
5077         /* Reset and init BRB, PRS, NIG */
5078         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5079         msleep(50);
5080         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5081         msleep(50);
5082         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5083         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5084 #ifndef BCM_ISCSI
5085         /* set NIC mode */
5086         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5087 #endif
5088
5089         /* Enable inputs of parser neighbor blocks */
5090         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5091         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5092         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5093         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5094
5095         DP(NETIF_MSG_HW, "done\n");
5096
5097         return 0; /* OK */
5098 }
5099
5100 static void enable_blocks_attention(struct bnx2x *bp)
5101 {
5102         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5103         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5104         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5105         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5106         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5107         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5108         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5109         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5110         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5111 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5112 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5113         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5114         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5115         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5116 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5117 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5118         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5119         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5120         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5121         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5122 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5123 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5124         if (CHIP_REV_IS_FPGA(bp))
5125                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5126         else
5127                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5128         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5129         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5130         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5131 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5132 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5133         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5134         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5135 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5136         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5137 }
5138
5139
5140 static int bnx2x_init_common(struct bnx2x *bp)
5141 {
5142         u32 val, i;
5143
5144         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5145
5146         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5147         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5148
5149         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5150         if (CHIP_IS_E1H(bp))
5151                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5152
5153         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5154         msleep(30);
5155         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5156
5157         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5158         if (CHIP_IS_E1(bp)) {
5159                 /* enable HW interrupt from PXP on USDM overflow
5160                    bit 16 on INT_MASK_0 */
5161                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5162         }
5163
5164         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5165         bnx2x_init_pxp(bp);
5166
5167 #ifdef __BIG_ENDIAN
5168         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5169         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5170         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5171         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5172         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5173
5174 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5175         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5176         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5177         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5178         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5179 #endif
5180
5181         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5182 #ifdef BCM_ISCSI
5183         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5184         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5185         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5186 #endif
5187
5188         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5189                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5190
5191         /* let the HW do it's magic ... */
5192         msleep(100);
5193         /* finish PXP init */
5194         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5195         if (val != 1) {
5196                 BNX2X_ERR("PXP2 CFG failed\n");
5197                 return -EBUSY;
5198         }
5199         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5200         if (val != 1) {
5201                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5202                 return -EBUSY;
5203         }
5204
5205         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5206         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5207
5208         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5209
5210         /* clean the DMAE memory */
5211         bp->dmae_ready = 1;
5212         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5213
5214         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5215         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5216         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5217         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5218
5219         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5220         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5221         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5222         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5223
5224         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5225         /* soft reset pulse */
5226         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5227         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5228
5229 #ifdef BCM_ISCSI
5230         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5231 #endif
5232
5233         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5234         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5235         if (!CHIP_REV_IS_SLOW(bp)) {
5236                 /* enable hw interrupt from doorbell Q */
5237                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5238         }
5239
5240         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5241         if (CHIP_REV_IS_SLOW(bp)) {
5242                 /* fix for emulation and FPGA for no pause */
5243                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5244                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5245                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5246                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5247         }
5248
5249         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5250         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5251         /* set NIC mode */
5252         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5253         if (CHIP_IS_E1H(bp))
5254                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5255
5256         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5257         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5258         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5259         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5260
5261         if (CHIP_IS_E1H(bp)) {
5262                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5263                                 STORM_INTMEM_SIZE_E1H/2);
5264                 bnx2x_init_fill(bp,
5265                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5266                                 0, STORM_INTMEM_SIZE_E1H/2);
5267                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5268                                 STORM_INTMEM_SIZE_E1H/2);
5269                 bnx2x_init_fill(bp,
5270                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5271                                 0, STORM_INTMEM_SIZE_E1H/2);
5272                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5273                                 STORM_INTMEM_SIZE_E1H/2);
5274                 bnx2x_init_fill(bp,
5275                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5276                                 0, STORM_INTMEM_SIZE_E1H/2);
5277                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5278                                 STORM_INTMEM_SIZE_E1H/2);
5279                 bnx2x_init_fill(bp,
5280                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5281                                 0, STORM_INTMEM_SIZE_E1H/2);
5282         } else { /* E1 */
5283                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5284                                 STORM_INTMEM_SIZE_E1);
5285                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5286                                 STORM_INTMEM_SIZE_E1);
5287                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5288                                 STORM_INTMEM_SIZE_E1);
5289                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5290                                 STORM_INTMEM_SIZE_E1);
5291         }
5292
5293         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5294         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5295         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5296         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5297
5298         /* sync semi rtc */
5299         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5300                0x80000000);
5301         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5302                0x80000000);
5303
5304         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5305         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5306         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5307
5308         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5309         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5310                 REG_WR(bp, i, 0xc0cac01a);
5311                 /* TODO: replace with something meaningful */
5312         }
5313         if (CHIP_IS_E1H(bp))
5314                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5315         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5316
5317         if (sizeof(union cdu_context) != 1024)
5318                 /* we currently assume that a context is 1024 bytes */
5319                 printk(KERN_ALERT PFX "please adjust the size of"
5320                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5321
5322         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5323         val = (4 << 24) + (0 << 12) + 1024;
5324         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5325         if (CHIP_IS_E1(bp)) {
5326                 /* !!! fix pxp client crdit until excel update */
5327                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5328                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5329         }
5330
5331         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5332         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5333
5334         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5335         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5336
5337         /* PXPCS COMMON comes here */
5338         /* Reset PCIE errors for debug */
5339         REG_WR(bp, 0x2814, 0xffffffff);
5340         REG_WR(bp, 0x3820, 0xffffffff);
5341
5342         /* EMAC0 COMMON comes here */
5343         /* EMAC1 COMMON comes here */
5344         /* DBU COMMON comes here */
5345         /* DBG COMMON comes here */
5346
5347         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5348         if (CHIP_IS_E1H(bp)) {
5349                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5350                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5351         }
5352
5353         if (CHIP_REV_IS_SLOW(bp))
5354                 msleep(200);
5355
5356         /* finish CFC init */
5357         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5358         if (val != 1) {
5359                 BNX2X_ERR("CFC LL_INIT failed\n");
5360                 return -EBUSY;
5361         }
5362         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5363         if (val != 1) {
5364                 BNX2X_ERR("CFC AC_INIT failed\n");
5365                 return -EBUSY;
5366         }
5367         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5368         if (val != 1) {
5369                 BNX2X_ERR("CFC CAM_INIT failed\n");
5370                 return -EBUSY;
5371         }
5372         REG_WR(bp, CFC_REG_DEBUG0, 0);
5373
5374         /* read NIG statistic
5375            to see if this is our first up since powerup */
5376         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5377         val = *bnx2x_sp(bp, wb_data[0]);
5378
5379         /* do internal memory self test */
5380         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5381                 BNX2X_ERR("internal mem self test failed\n");
5382                 return -EBUSY;
5383         }
5384
5385         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5386         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5387         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5388                 /* Fan failure is indicated by SPIO 5 */
5389                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5390                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5391
5392                 /* set to active low mode */
5393                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5394                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5395                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5396                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5397
5398                 /* enable interrupt to signal the IGU */
5399                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5400                 val |= (1 << MISC_REGISTERS_SPIO_5);
5401                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5402                 break;
5403
5404         default:
5405                 break;
5406         }
5407
5408         /* clear PXP2 attentions */
5409         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5410
5411         enable_blocks_attention(bp);
5412
5413         if (!BP_NOMCP(bp)) {
5414                 bnx2x_acquire_phy_lock(bp);
5415                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5416                 bnx2x_release_phy_lock(bp);
5417         } else
5418                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5419
5420         return 0;
5421 }
5422
5423 static int bnx2x_init_port(struct bnx2x *bp)
5424 {
5425         int port = BP_PORT(bp);
5426         u32 val;
5427
5428         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5429
5430         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5431
5432         /* Port PXP comes here */
5433         /* Port PXP2 comes here */
5434 #ifdef BCM_ISCSI
5435         /* Port0  1
5436          * Port1  385 */
5437         i++;
5438         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5439         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5440         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5441         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5442
5443         /* Port0  2
5444          * Port1  386 */
5445         i++;
5446         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5447         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5448         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5449         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5450
5451         /* Port0  3
5452          * Port1  387 */
5453         i++;
5454         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5455         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5456         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5457         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5458 #endif
5459         /* Port CMs come here */
5460
5461         /* Port QM comes here */
5462 #ifdef BCM_ISCSI
5463         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5464         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5465
5466         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5467                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5468 #endif
5469         /* Port DQ comes here */
5470         /* Port BRB1 comes here */
5471         /* Port PRS comes here */
5472         /* Port TSDM comes here */
5473         /* Port CSDM comes here */
5474         /* Port USDM comes here */
5475         /* Port XSDM comes here */
5476         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5477                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5478         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5479                              port ? USEM_PORT1_END : USEM_PORT0_END);
5480         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5481                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5482         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5483                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5484         /* Port UPB comes here */
5485         /* Port XPB comes here */
5486
5487         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5488                              port ? PBF_PORT1_END : PBF_PORT0_END);
5489
5490         /* configure PBF to work without PAUSE mtu 9000 */
5491         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5492
5493         /* update threshold */
5494         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5495         /* update init credit */
5496         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5497
5498         /* probe changes */
5499         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5500         msleep(5);
5501         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5502
5503 #ifdef BCM_ISCSI
5504         /* tell the searcher where the T2 table is */
5505         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5506
5507         wb_write[0] = U64_LO(bp->t2_mapping);
5508         wb_write[1] = U64_HI(bp->t2_mapping);
5509         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5510         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5511         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5512         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5513
5514         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5515         /* Port SRCH comes here */
5516 #endif
5517         /* Port CDU comes here */
5518         /* Port CFC comes here */
5519
5520         if (CHIP_IS_E1(bp)) {
5521                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5522                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5523         }
5524         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5525                              port ? HC_PORT1_END : HC_PORT0_END);
5526
5527         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5528                                     MISC_AEU_PORT0_START,
5529                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5530         /* init aeu_mask_attn_func_0/1:
5531          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5532          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5533          *             bits 4-7 are used for "per vn group attention" */
5534         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5535                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5536
5537         /* Port PXPCS comes here */
5538         /* Port EMAC0 comes here */
5539         /* Port EMAC1 comes here */
5540         /* Port DBU comes here */
5541         /* Port DBG comes here */
5542         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5543                              port ? NIG_PORT1_END : NIG_PORT0_END);
5544
5545         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5546
5547         if (CHIP_IS_E1H(bp)) {
5548                 u32 wsum;
5549                 struct cmng_struct_per_port m_cmng_port;
5550                 int vn;
5551
5552                 /* 0x2 disable e1hov, 0x1 enable */
5553                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5554                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5555
5556                 /* Init RATE SHAPING and FAIRNESS contexts.
5557                    Initialize as if there is 10G link. */
5558                 wsum = bnx2x_calc_vn_wsum(bp);
5559                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5560                 if (IS_E1HMF(bp))
5561                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5562                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5563                                         wsum, 10000, &m_cmng_port);
5564         }
5565
5566         /* Port MCP comes here */
5567         /* Port DMAE comes here */
5568
5569         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5570         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5571         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5572                 /* add SPIO 5 to group 0 */
5573                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5574                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5575                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5576                 break;
5577
5578         default:
5579                 break;
5580         }
5581
5582         bnx2x__link_reset(bp);
5583
5584         return 0;
5585 }
5586
5587 #define ILT_PER_FUNC            (768/2)
5588 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5589 /* the phys address is shifted right 12 bits and has an added
5590    1=valid bit added to the 53rd bit
5591    then since this is a wide register(TM)
5592    we split it into two 32 bit writes
5593  */
5594 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5595 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5596 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5597 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5598
5599 #define CNIC_ILT_LINES          0
5600
5601 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5602 {
5603         int reg;
5604
5605         if (CHIP_IS_E1H(bp))
5606                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5607         else /* E1 */
5608                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5609
5610         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5611 }
5612
5613 static int bnx2x_init_func(struct bnx2x *bp)
5614 {
5615         int port = BP_PORT(bp);
5616         int func = BP_FUNC(bp);
5617         int i;
5618
5619         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5620
5621         i = FUNC_ILT_BASE(func);
5622
5623         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5624         if (CHIP_IS_E1H(bp)) {
5625                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5626                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5627         } else /* E1 */
5628                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5629                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5630
5631
5632         if (CHIP_IS_E1H(bp)) {
5633                 for (i = 0; i < 9; i++)
5634                         bnx2x_init_block(bp,
5635                                          cm_start[func][i], cm_end[func][i]);
5636
5637                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5638                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5639         }
5640
5641         /* HC init per function */
5642         if (CHIP_IS_E1H(bp)) {
5643                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5644
5645                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5646                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5647         }
5648         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5649
5650         if (CHIP_IS_E1H(bp))
5651                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5652
5653         /* Reset PCIE errors for debug */
5654         REG_WR(bp, 0x2114, 0xffffffff);
5655         REG_WR(bp, 0x2120, 0xffffffff);
5656
5657         return 0;
5658 }
5659
5660 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5661 {
5662         int i, rc = 0;
5663
5664         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5665            BP_FUNC(bp), load_code);
5666
5667         bp->dmae_ready = 0;
5668         mutex_init(&bp->dmae_mutex);
5669         bnx2x_gunzip_init(bp);
5670
5671         switch (load_code) {
5672         case FW_MSG_CODE_DRV_LOAD_COMMON:
5673                 rc = bnx2x_init_common(bp);
5674                 if (rc)
5675                         goto init_hw_err;
5676                 /* no break */
5677
5678         case FW_MSG_CODE_DRV_LOAD_PORT:
5679                 bp->dmae_ready = 1;
5680                 rc = bnx2x_init_port(bp);
5681                 if (rc)
5682                         goto init_hw_err;
5683                 /* no break */
5684
5685         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5686                 bp->dmae_ready = 1;
5687                 rc = bnx2x_init_func(bp);
5688                 if (rc)
5689                         goto init_hw_err;
5690                 break;
5691
5692         default:
5693                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5694                 break;
5695         }
5696
5697         if (!BP_NOMCP(bp)) {
5698                 int func = BP_FUNC(bp);
5699
5700                 bp->fw_drv_pulse_wr_seq =
5701                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5702                                  DRV_PULSE_SEQ_MASK);
5703                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5704                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5705                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5706         } else
5707                 bp->func_stx = 0;
5708
5709         /* this needs to be done before gunzip end */
5710         bnx2x_zero_def_sb(bp);
5711         for_each_queue(bp, i)
5712                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5713
5714 init_hw_err:
5715         bnx2x_gunzip_end(bp);
5716
5717         return rc;
5718 }
5719
5720 /* send the MCP a request, block until there is a reply */
5721 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5722 {
5723         int func = BP_FUNC(bp);
5724         u32 seq = ++bp->fw_seq;
5725         u32 rc = 0;
5726         u32 cnt = 1;
5727         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5728
5729         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5730         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5731
5732         do {
5733                 /* let the FW do it's magic ... */
5734                 msleep(delay);
5735
5736                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5737
5738                 /* Give the FW up to 2 second (200*10ms) */
5739         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5740
5741         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5742            cnt*delay, rc, seq);
5743
5744         /* is this a reply to our command? */
5745         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5746                 rc &= FW_MSG_CODE_MASK;
5747
5748         } else {
5749                 /* FW BUG! */
5750                 BNX2X_ERR("FW failed to respond!\n");
5751                 bnx2x_fw_dump(bp);
5752                 rc = 0;
5753         }
5754
5755         return rc;
5756 }
5757
5758 static void bnx2x_free_mem(struct bnx2x *bp)
5759 {
5760
5761 #define BNX2X_PCI_FREE(x, y, size) \
5762         do { \
5763                 if (x) { \
5764                         pci_free_consistent(bp->pdev, size, x, y); \
5765                         x = NULL; \
5766                         y = 0; \
5767                 } \
5768         } while (0)
5769
5770 #define BNX2X_FREE(x) \
5771         do { \
5772                 if (x) { \
5773                         vfree(x); \
5774                         x = NULL; \
5775                 } \
5776         } while (0)
5777
5778         int i;
5779
5780         /* fastpath */
5781         for_each_queue(bp, i) {
5782
5783                 /* Status blocks */
5784                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5785                                bnx2x_fp(bp, i, status_blk_mapping),
5786                                sizeof(struct host_status_block) +
5787                                sizeof(struct eth_tx_db_data));
5788
5789                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5790                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5791                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5792                                bnx2x_fp(bp, i, tx_desc_mapping),
5793                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5794
5795                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5796                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5797                                bnx2x_fp(bp, i, rx_desc_mapping),
5798                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5799
5800                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5801                                bnx2x_fp(bp, i, rx_comp_mapping),
5802                                sizeof(struct eth_fast_path_rx_cqe) *
5803                                NUM_RCQ_BD);
5804
5805                 /* SGE ring */
5806                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5807                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5808                                bnx2x_fp(bp, i, rx_sge_mapping),
5809                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5810         }
5811         /* end of fastpath */
5812
5813         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5814                        sizeof(struct host_def_status_block));
5815
5816         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5817                        sizeof(struct bnx2x_slowpath));
5818
5819 #ifdef BCM_ISCSI
5820         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5821         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5822         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5823         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5824 #endif
5825         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5826
5827 #undef BNX2X_PCI_FREE
5828 #undef BNX2X_KFREE
5829 }
5830
5831 static int bnx2x_alloc_mem(struct bnx2x *bp)
5832 {
5833
5834 #define BNX2X_PCI_ALLOC(x, y, size) \
5835         do { \
5836                 x = pci_alloc_consistent(bp->pdev, size, y); \
5837                 if (x == NULL) \
5838                         goto alloc_mem_err; \
5839                 memset(x, 0, size); \
5840         } while (0)
5841
5842 #define BNX2X_ALLOC(x, size) \
5843         do { \
5844                 x = vmalloc(size); \
5845                 if (x == NULL) \
5846                         goto alloc_mem_err; \
5847                 memset(x, 0, size); \
5848         } while (0)
5849
5850         int i;
5851
5852         /* fastpath */
5853         for_each_queue(bp, i) {
5854                 bnx2x_fp(bp, i, bp) = bp;
5855
5856                 /* Status blocks */
5857                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5858                                 &bnx2x_fp(bp, i, status_blk_mapping),
5859                                 sizeof(struct host_status_block) +
5860                                 sizeof(struct eth_tx_db_data));
5861
5862                 bnx2x_fp(bp, i, hw_tx_prods) =
5863                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5864
5865                 bnx2x_fp(bp, i, tx_prods_mapping) =
5866                                 bnx2x_fp(bp, i, status_blk_mapping) +
5867                                 sizeof(struct host_status_block);
5868
5869                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5870                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5871                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5872                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5873                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5874                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5875
5876                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5877                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5878                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5879                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5880                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5881
5882                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5883                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5884                                 sizeof(struct eth_fast_path_rx_cqe) *
5885                                 NUM_RCQ_BD);
5886
5887                 /* SGE ring */
5888                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5889                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5890                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5891                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5892                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5893         }
5894         /* end of fastpath */
5895
5896         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5897                         sizeof(struct host_def_status_block));
5898
5899         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5900                         sizeof(struct bnx2x_slowpath));
5901
5902 #ifdef BCM_ISCSI
5903         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5904
5905         /* Initialize T1 */
5906         for (i = 0; i < 64*1024; i += 64) {
5907                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5908                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5909         }
5910
5911         /* allocate searcher T2 table
5912            we allocate 1/4 of alloc num for T2
5913           (which is not entered into the ILT) */
5914         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5915
5916         /* Initialize T2 */
5917         for (i = 0; i < 16*1024; i += 64)
5918                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5919
5920         /* now fixup the last line in the block to point to the next block */
5921         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5922
5923         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5924         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5925
5926         /* QM queues (128*MAX_CONN) */
5927         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5928 #endif
5929
5930         /* Slow path ring */
5931         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5932
5933         return 0;
5934
5935 alloc_mem_err:
5936         bnx2x_free_mem(bp);
5937         return -ENOMEM;
5938
5939 #undef BNX2X_PCI_ALLOC
5940 #undef BNX2X_ALLOC
5941 }
5942
5943 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5944 {
5945         int i;
5946
5947         for_each_queue(bp, i) {
5948                 struct bnx2x_fastpath *fp = &bp->fp[i];
5949
5950                 u16 bd_cons = fp->tx_bd_cons;
5951                 u16 sw_prod = fp->tx_pkt_prod;
5952                 u16 sw_cons = fp->tx_pkt_cons;
5953
5954                 while (sw_cons != sw_prod) {
5955                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5956                         sw_cons++;
5957                 }
5958         }
5959 }
5960
5961 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5962 {
5963         int i, j;
5964
5965         for_each_queue(bp, j) {
5966                 struct bnx2x_fastpath *fp = &bp->fp[j];
5967
5968                 for (i = 0; i < NUM_RX_BD; i++) {
5969                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5970                         struct sk_buff *skb = rx_buf->skb;
5971
5972                         if (skb == NULL)
5973                                 continue;
5974
5975                         pci_unmap_single(bp->pdev,
5976                                          pci_unmap_addr(rx_buf, mapping),
5977                                          bp->rx_buf_size,
5978                                          PCI_DMA_FROMDEVICE);
5979
5980                         rx_buf->skb = NULL;
5981                         dev_kfree_skb(skb);
5982                 }
5983                 if (!fp->disable_tpa)
5984                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5985                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5986                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5987         }
5988 }
5989
5990 static void bnx2x_free_skbs(struct bnx2x *bp)
5991 {
5992         bnx2x_free_tx_skbs(bp);
5993         bnx2x_free_rx_skbs(bp);
5994 }
5995
5996 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5997 {
5998         int i, offset = 1;
5999
6000         free_irq(bp->msix_table[0].vector, bp->dev);
6001         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6002            bp->msix_table[0].vector);
6003
6004         for_each_queue(bp, i) {
6005                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6006                    "state %x\n", i, bp->msix_table[i + offset].vector,
6007                    bnx2x_fp(bp, i, state));
6008
6009                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6010                         BNX2X_ERR("IRQ of fp #%d being freed while "
6011                                   "state != closed\n", i);
6012
6013                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6014         }
6015 }
6016
6017 static void bnx2x_free_irq(struct bnx2x *bp)
6018 {
6019         if (bp->flags & USING_MSIX_FLAG) {
6020                 bnx2x_free_msix_irqs(bp);
6021                 pci_disable_msix(bp->pdev);
6022                 bp->flags &= ~USING_MSIX_FLAG;
6023
6024         } else
6025                 free_irq(bp->pdev->irq, bp->dev);
6026 }
6027
6028 static int bnx2x_enable_msix(struct bnx2x *bp)
6029 {
6030         int i, rc, offset;
6031
6032         bp->msix_table[0].entry = 0;
6033         offset = 1;
6034         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6035
6036         for_each_queue(bp, i) {
6037                 int igu_vec = offset + i + BP_L_ID(bp);
6038
6039                 bp->msix_table[i + offset].entry = igu_vec;
6040                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6041                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6042         }
6043
6044         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6045                              bp->num_queues + offset);
6046         if (rc) {
6047                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6048                 return -1;
6049         }
6050         bp->flags |= USING_MSIX_FLAG;
6051
6052         return 0;
6053 }
6054
6055 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6056 {
6057         int i, rc, offset = 1;
6058
6059         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6060                          bp->dev->name, bp->dev);
6061         if (rc) {
6062                 BNX2X_ERR("request sp irq failed\n");
6063                 return -EBUSY;
6064         }
6065
6066         for_each_queue(bp, i) {
6067                 rc = request_irq(bp->msix_table[i + offset].vector,
6068                                  bnx2x_msix_fp_int, 0,
6069                                  bp->dev->name, &bp->fp[i]);
6070                 if (rc) {
6071                         BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6072                                   i + offset, -rc);
6073                         bnx2x_free_msix_irqs(bp);
6074                         return -EBUSY;
6075                 }
6076
6077                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6078         }
6079
6080         return 0;
6081 }
6082
6083 static int bnx2x_req_irq(struct bnx2x *bp)
6084 {
6085         int rc;
6086
6087         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6088                          bp->dev->name, bp->dev);
6089         if (!rc)
6090                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6091
6092         return rc;
6093 }
6094
6095 static void bnx2x_napi_enable(struct bnx2x *bp)
6096 {
6097         int i;
6098
6099         for_each_queue(bp, i)
6100                 napi_enable(&bnx2x_fp(bp, i, napi));
6101 }
6102
6103 static void bnx2x_napi_disable(struct bnx2x *bp)
6104 {
6105         int i;
6106
6107         for_each_queue(bp, i)
6108                 napi_disable(&bnx2x_fp(bp, i, napi));
6109 }
6110
6111 static void bnx2x_netif_start(struct bnx2x *bp)
6112 {
6113         if (atomic_dec_and_test(&bp->intr_sem)) {
6114                 if (netif_running(bp->dev)) {
6115                         if (bp->state == BNX2X_STATE_OPEN)
6116                                 netif_wake_queue(bp->dev);
6117                         bnx2x_napi_enable(bp);
6118                         bnx2x_int_enable(bp);
6119                 }
6120         }
6121 }
6122
6123 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6124 {
6125         bnx2x_int_disable_sync(bp, disable_hw);
6126         if (netif_running(bp->dev)) {
6127                 bnx2x_napi_disable(bp);
6128                 netif_tx_disable(bp->dev);
6129                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6130         }
6131 }
6132
6133 /*
6134  * Init service functions
6135  */
6136
6137 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6138 {
6139         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6140         int port = BP_PORT(bp);
6141
6142         /* CAM allocation
6143          * unicasts 0-31:port0 32-63:port1
6144          * multicast 64-127:port0 128-191:port1
6145          */
6146         config->hdr.length_6b = 2;
6147         config->hdr.offset = port ? 31 : 0;
6148         config->hdr.client_id = BP_CL_ID(bp);
6149         config->hdr.reserved1 = 0;
6150
6151         /* primary MAC */
6152         config->config_table[0].cam_entry.msb_mac_addr =
6153                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6154         config->config_table[0].cam_entry.middle_mac_addr =
6155                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6156         config->config_table[0].cam_entry.lsb_mac_addr =
6157                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6158         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6159         if (set)
6160                 config->config_table[0].target_table_entry.flags = 0;
6161         else
6162                 CAM_INVALIDATE(config->config_table[0]);
6163         config->config_table[0].target_table_entry.client_id = 0;
6164         config->config_table[0].target_table_entry.vlan_id = 0;
6165
6166         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6167            (set ? "setting" : "clearing"),
6168            config->config_table[0].cam_entry.msb_mac_addr,
6169            config->config_table[0].cam_entry.middle_mac_addr,
6170            config->config_table[0].cam_entry.lsb_mac_addr);
6171
6172         /* broadcast */
6173         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6174         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6175         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6176         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6177         if (set)
6178                 config->config_table[1].target_table_entry.flags =
6179                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6180         else
6181                 CAM_INVALIDATE(config->config_table[1]);
6182         config->config_table[1].target_table_entry.client_id = 0;
6183         config->config_table[1].target_table_entry.vlan_id = 0;
6184
6185         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6186                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6187                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6188 }
6189
6190 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6191 {
6192         struct mac_configuration_cmd_e1h *config =
6193                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6194
6195         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6196                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6197                 return;
6198         }
6199
6200         /* CAM allocation for E1H
6201          * unicasts: by func number
6202          * multicast: 20+FUNC*20, 20 each
6203          */
6204         config->hdr.length_6b = 1;
6205         config->hdr.offset = BP_FUNC(bp);
6206         config->hdr.client_id = BP_CL_ID(bp);
6207         config->hdr.reserved1 = 0;
6208
6209         /* primary MAC */
6210         config->config_table[0].msb_mac_addr =
6211                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6212         config->config_table[0].middle_mac_addr =
6213                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6214         config->config_table[0].lsb_mac_addr =
6215                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6216         config->config_table[0].client_id = BP_L_ID(bp);
6217         config->config_table[0].vlan_id = 0;
6218         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6219         if (set)
6220                 config->config_table[0].flags = BP_PORT(bp);
6221         else
6222                 config->config_table[0].flags =
6223                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6224
6225         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6226            (set ? "setting" : "clearing"),
6227            config->config_table[0].msb_mac_addr,
6228            config->config_table[0].middle_mac_addr,
6229            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6230
6231         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6232                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6233                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6234 }
6235
6236 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6237                              int *state_p, int poll)
6238 {
6239         /* can take a while if any port is running */
6240         int cnt = 500;
6241
6242         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6243            poll ? "polling" : "waiting", state, idx);
6244
6245         might_sleep();
6246         while (cnt--) {
6247                 if (poll) {
6248                         bnx2x_rx_int(bp->fp, 10);
6249                         /* if index is different from 0
6250                          * the reply for some commands will
6251                          * be on the non default queue
6252                          */
6253                         if (idx)
6254                                 bnx2x_rx_int(&bp->fp[idx], 10);
6255                 }
6256
6257                 mb(); /* state is changed by bnx2x_sp_event() */
6258                 if (*state_p == state)
6259                         return 0;
6260
6261                 msleep(1);
6262         }
6263
6264         /* timeout! */
6265         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6266                   poll ? "polling" : "waiting", state, idx);
6267 #ifdef BNX2X_STOP_ON_ERROR
6268         bnx2x_panic();
6269 #endif
6270
6271         return -EBUSY;
6272 }
6273
6274 static int bnx2x_setup_leading(struct bnx2x *bp)
6275 {
6276         int rc;
6277
6278         /* reset IGU state */
6279         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6280
6281         /* SETUP ramrod */
6282         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6283
6284         /* Wait for completion */
6285         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6286
6287         return rc;
6288 }
6289
6290 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6291 {
6292         /* reset IGU state */
6293         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6294
6295         /* SETUP ramrod */
6296         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6297         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6298
6299         /* Wait for completion */
6300         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6301                                  &(bp->fp[index].state), 0);
6302 }
6303
6304 static int bnx2x_poll(struct napi_struct *napi, int budget);
6305 static void bnx2x_set_rx_mode(struct net_device *dev);
6306
6307 /* must be called with rtnl_lock */
6308 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6309 {
6310         u32 load_code;
6311         int i, rc;
6312 #ifdef BNX2X_STOP_ON_ERROR
6313         if (unlikely(bp->panic))
6314                 return -EPERM;
6315 #endif
6316
6317         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6318
6319         /* Send LOAD_REQUEST command to MCP
6320            Returns the type of LOAD command:
6321            if it is the first port to be initialized
6322            common blocks should be initialized, otherwise - not
6323         */
6324         if (!BP_NOMCP(bp)) {
6325                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6326                 if (!load_code) {
6327                         BNX2X_ERR("MCP response failure, aborting\n");
6328                         return -EBUSY;
6329                 }
6330                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6331                         return -EBUSY; /* other port in diagnostic mode */
6332
6333         } else {
6334                 int port = BP_PORT(bp);
6335
6336                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6337                    load_count[0], load_count[1], load_count[2]);
6338                 load_count[0]++;
6339                 load_count[1 + port]++;
6340                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6341                    load_count[0], load_count[1], load_count[2]);
6342                 if (load_count[0] == 1)
6343                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6344                 else if (load_count[1 + port] == 1)
6345                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6346                 else
6347                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6348         }
6349
6350         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6351             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6352                 bp->port.pmf = 1;
6353         else
6354                 bp->port.pmf = 0;
6355         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6356
6357         /* if we can't use MSI-X we only need one fp,
6358          * so try to enable MSI-X with the requested number of fp's
6359          * and fallback to inta with one fp
6360          */
6361         if (use_inta) {
6362                 bp->num_queues = 1;
6363
6364         } else {
6365                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6366                         /* user requested number */
6367                         bp->num_queues = use_multi;
6368
6369                 else if (use_multi)
6370                         bp->num_queues = min_t(u32, num_online_cpus(),
6371                                                BP_MAX_QUEUES(bp));
6372                 else
6373                         bp->num_queues = 1;
6374
6375                 if (bnx2x_enable_msix(bp)) {
6376                         /* failed to enable MSI-X */
6377                         bp->num_queues = 1;
6378                         if (use_multi)
6379                                 BNX2X_ERR("Multi requested but failed"
6380                                           " to enable MSI-X\n");
6381                 }
6382         }
6383         DP(NETIF_MSG_IFUP,
6384            "set number of queues to %d\n", bp->num_queues);
6385
6386         if (bnx2x_alloc_mem(bp))
6387                 return -ENOMEM;
6388
6389         for_each_queue(bp, i)
6390                 bnx2x_fp(bp, i, disable_tpa) =
6391                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6392
6393         if (bp->flags & USING_MSIX_FLAG) {
6394                 rc = bnx2x_req_msix_irqs(bp);
6395                 if (rc) {
6396                         pci_disable_msix(bp->pdev);
6397                         goto load_error;
6398                 }
6399         } else {
6400                 bnx2x_ack_int(bp);
6401                 rc = bnx2x_req_irq(bp);
6402                 if (rc) {
6403                         BNX2X_ERR("IRQ request failed, aborting\n");
6404                         goto load_error;
6405                 }
6406         }
6407
6408         for_each_queue(bp, i)
6409                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6410                                bnx2x_poll, 128);
6411
6412         /* Initialize HW */
6413         rc = bnx2x_init_hw(bp, load_code);
6414         if (rc) {
6415                 BNX2X_ERR("HW init failed, aborting\n");
6416                 goto load_int_disable;
6417         }
6418
6419         /* Setup NIC internals and enable interrupts */
6420         bnx2x_nic_init(bp, load_code);
6421
6422         /* Send LOAD_DONE command to MCP */
6423         if (!BP_NOMCP(bp)) {
6424                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6425                 if (!load_code) {
6426                         BNX2X_ERR("MCP response failure, aborting\n");
6427                         rc = -EBUSY;
6428                         goto load_rings_free;
6429                 }
6430         }
6431
6432         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6433
6434         rc = bnx2x_setup_leading(bp);
6435         if (rc) {
6436                 BNX2X_ERR("Setup leading failed!\n");
6437                 goto load_netif_stop;
6438         }
6439
6440         if (CHIP_IS_E1H(bp))
6441                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6442                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6443                         bp->state = BNX2X_STATE_DISABLED;
6444                 }
6445
6446         if (bp->state == BNX2X_STATE_OPEN)
6447                 for_each_nondefault_queue(bp, i) {
6448                         rc = bnx2x_setup_multi(bp, i);
6449                         if (rc)
6450                                 goto load_netif_stop;
6451                 }
6452
6453         if (CHIP_IS_E1(bp))
6454                 bnx2x_set_mac_addr_e1(bp, 1);
6455         else
6456                 bnx2x_set_mac_addr_e1h(bp, 1);
6457
6458         if (bp->port.pmf)
6459                 bnx2x_initial_phy_init(bp);
6460
6461         /* Start fast path */
6462         switch (load_mode) {
6463         case LOAD_NORMAL:
6464                 /* Tx queue should be only reenabled */
6465                 netif_wake_queue(bp->dev);
6466                 bnx2x_set_rx_mode(bp->dev);
6467                 break;
6468
6469         case LOAD_OPEN:
6470                 netif_start_queue(bp->dev);
6471                 bnx2x_set_rx_mode(bp->dev);
6472                 if (bp->flags & USING_MSIX_FLAG)
6473                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6474                                bp->dev->name);
6475                 break;
6476
6477         case LOAD_DIAG:
6478                 bnx2x_set_rx_mode(bp->dev);
6479                 bp->state = BNX2X_STATE_DIAG;
6480                 break;
6481
6482         default:
6483                 break;
6484         }
6485
6486         if (!bp->port.pmf)
6487                 bnx2x__link_status_update(bp);
6488
6489         /* start the timer */
6490         mod_timer(&bp->timer, jiffies + bp->current_interval);
6491
6492
6493         return 0;
6494
6495 load_netif_stop:
6496         bnx2x_napi_disable(bp);
6497 load_rings_free:
6498         /* Free SKBs, SGEs, TPA pool and driver internals */
6499         bnx2x_free_skbs(bp);
6500         for_each_queue(bp, i)
6501                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6502 load_int_disable:
6503         bnx2x_int_disable_sync(bp, 1);
6504         /* Release IRQs */
6505         bnx2x_free_irq(bp);
6506 load_error:
6507         bnx2x_free_mem(bp);
6508         bp->port.pmf = 0;
6509
6510         /* TBD we really need to reset the chip
6511            if we want to recover from this */
6512         return rc;
6513 }
6514
6515 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6516 {
6517         int rc;
6518
6519         /* halt the connection */
6520         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6521         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6522
6523         /* Wait for completion */
6524         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6525                                &(bp->fp[index].state), 1);
6526         if (rc) /* timeout */
6527                 return rc;
6528
6529         /* delete cfc entry */
6530         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6531
6532         /* Wait for completion */
6533         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6534                                &(bp->fp[index].state), 1);
6535         return rc;
6536 }
6537
6538 static int bnx2x_stop_leading(struct bnx2x *bp)
6539 {
6540         u16 dsb_sp_prod_idx;
6541         /* if the other port is handling traffic,
6542            this can take a lot of time */
6543         int cnt = 500;
6544         int rc;
6545
6546         might_sleep();
6547
6548         /* Send HALT ramrod */
6549         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6550         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6551
6552         /* Wait for completion */
6553         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6554                                &(bp->fp[0].state), 1);
6555         if (rc) /* timeout */
6556                 return rc;
6557
6558         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6559
6560         /* Send PORT_DELETE ramrod */
6561         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6562
6563         /* Wait for completion to arrive on default status block
6564            we are going to reset the chip anyway
6565            so there is not much to do if this times out
6566          */
6567         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6568                 if (!cnt) {
6569                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6570                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6571                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6572 #ifdef BNX2X_STOP_ON_ERROR
6573                         bnx2x_panic();
6574 #else
6575                         rc = -EBUSY;
6576 #endif
6577                         break;
6578                 }
6579                 cnt--;
6580                 msleep(1);
6581         }
6582         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6583         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6584
6585         return rc;
6586 }
6587
6588 static void bnx2x_reset_func(struct bnx2x *bp)
6589 {
6590         int port = BP_PORT(bp);
6591         int func = BP_FUNC(bp);
6592         int base, i;
6593
6594         /* Configure IGU */
6595         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6596         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6597
6598         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6599
6600         /* Clear ILT */
6601         base = FUNC_ILT_BASE(func);
6602         for (i = base; i < base + ILT_PER_FUNC; i++)
6603                 bnx2x_ilt_wr(bp, i, 0);
6604 }
6605
6606 static void bnx2x_reset_port(struct bnx2x *bp)
6607 {
6608         int port = BP_PORT(bp);
6609         u32 val;
6610
6611         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6612
6613         /* Do not rcv packets to BRB */
6614         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6615         /* Do not direct rcv packets that are not for MCP to the BRB */
6616         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6617                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6618
6619         /* Configure AEU */
6620         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6621
6622         msleep(100);
6623         /* Check for BRB port occupancy */
6624         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6625         if (val)
6626                 DP(NETIF_MSG_IFDOWN,
6627                    "BRB1 is not empty  %d blocks are occupied\n", val);
6628
6629         /* TODO: Close Doorbell port? */
6630 }
6631
6632 static void bnx2x_reset_common(struct bnx2x *bp)
6633 {
6634         /* reset_common */
6635         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6636                0xd3ffff7f);
6637         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6638 }
6639
6640 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6641 {
6642         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6643            BP_FUNC(bp), reset_code);
6644
6645         switch (reset_code) {
6646         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6647                 bnx2x_reset_port(bp);
6648                 bnx2x_reset_func(bp);
6649                 bnx2x_reset_common(bp);
6650                 break;
6651
6652         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6653                 bnx2x_reset_port(bp);
6654                 bnx2x_reset_func(bp);
6655                 break;
6656
6657         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6658                 bnx2x_reset_func(bp);
6659                 break;
6660
6661         default:
6662                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6663                 break;
6664         }
6665 }
6666
6667 /* must be called with rtnl_lock */
6668 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6669 {
6670         int port = BP_PORT(bp);
6671         u32 reset_code = 0;
6672         int i, cnt, rc;
6673
6674         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6675
6676         bp->rx_mode = BNX2X_RX_MODE_NONE;
6677         bnx2x_set_storm_rx_mode(bp);
6678
6679         bnx2x_netif_stop(bp, 1);
6680         if (!netif_running(bp->dev))
6681                 bnx2x_napi_disable(bp);
6682         del_timer_sync(&bp->timer);
6683         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6684                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6685         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6686
6687         /* Wait until tx fast path tasks complete */
6688         for_each_queue(bp, i) {
6689                 struct bnx2x_fastpath *fp = &bp->fp[i];
6690
6691                 cnt = 1000;
6692                 smp_rmb();
6693                 while (BNX2X_HAS_TX_WORK(fp)) {
6694
6695                         bnx2x_tx_int(fp, 1000);
6696                         if (!cnt) {
6697                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6698                                           i);
6699 #ifdef BNX2X_STOP_ON_ERROR
6700                                 bnx2x_panic();
6701                                 return -EBUSY;
6702 #else
6703                                 break;
6704 #endif
6705                         }
6706                         cnt--;
6707                         msleep(1);
6708                         smp_rmb();
6709                 }
6710         }
6711         /* Give HW time to discard old tx messages */
6712         msleep(1);
6713
6714         /* Release IRQs */
6715         bnx2x_free_irq(bp);
6716
6717         if (CHIP_IS_E1(bp)) {
6718                 struct mac_configuration_cmd *config =
6719                                                 bnx2x_sp(bp, mcast_config);
6720
6721                 bnx2x_set_mac_addr_e1(bp, 0);
6722
6723                 for (i = 0; i < config->hdr.length_6b; i++)
6724                         CAM_INVALIDATE(config->config_table[i]);
6725
6726                 config->hdr.length_6b = i;
6727                 if (CHIP_REV_IS_SLOW(bp))
6728                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6729                 else
6730                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6731                 config->hdr.client_id = BP_CL_ID(bp);
6732                 config->hdr.reserved1 = 0;
6733
6734                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6735                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6736                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6737
6738         } else { /* E1H */
6739                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6740
6741                 bnx2x_set_mac_addr_e1h(bp, 0);
6742
6743                 for (i = 0; i < MC_HASH_SIZE; i++)
6744                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6745         }
6746
6747         if (unload_mode == UNLOAD_NORMAL)
6748                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6749
6750         else if (bp->flags & NO_WOL_FLAG) {
6751                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6752                 if (CHIP_IS_E1H(bp))
6753                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6754
6755         } else if (bp->wol) {
6756                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6757                 u8 *mac_addr = bp->dev->dev_addr;
6758                 u32 val;
6759                 /* The mac address is written to entries 1-4 to
6760                    preserve entry 0 which is used by the PMF */
6761                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6762
6763                 val = (mac_addr[0] << 8) | mac_addr[1];
6764                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6765
6766                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6767                       (mac_addr[4] << 8) | mac_addr[5];
6768                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6769
6770                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6771
6772         } else
6773                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6774
6775         /* Close multi and leading connections
6776            Completions for ramrods are collected in a synchronous way */
6777         for_each_nondefault_queue(bp, i)
6778                 if (bnx2x_stop_multi(bp, i))
6779                         goto unload_error;
6780
6781         rc = bnx2x_stop_leading(bp);
6782         if (rc) {
6783                 BNX2X_ERR("Stop leading failed!\n");
6784 #ifdef BNX2X_STOP_ON_ERROR
6785                 return -EBUSY;
6786 #else
6787                 goto unload_error;
6788 #endif
6789         }
6790
6791 unload_error:
6792         if (!BP_NOMCP(bp))
6793                 reset_code = bnx2x_fw_command(bp, reset_code);
6794         else {
6795                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6796                    load_count[0], load_count[1], load_count[2]);
6797                 load_count[0]--;
6798                 load_count[1 + port]--;
6799                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6800                    load_count[0], load_count[1], load_count[2]);
6801                 if (load_count[0] == 0)
6802                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6803                 else if (load_count[1 + port] == 0)
6804                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6805                 else
6806                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6807         }
6808
6809         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6810             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6811                 bnx2x__link_reset(bp);
6812
6813         /* Reset the chip */
6814         bnx2x_reset_chip(bp, reset_code);
6815
6816         /* Report UNLOAD_DONE to MCP */
6817         if (!BP_NOMCP(bp))
6818                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6819         bp->port.pmf = 0;
6820
6821         /* Free SKBs, SGEs, TPA pool and driver internals */
6822         bnx2x_free_skbs(bp);
6823         for_each_queue(bp, i)
6824                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6825         bnx2x_free_mem(bp);
6826
6827         bp->state = BNX2X_STATE_CLOSED;
6828
6829         netif_carrier_off(bp->dev);
6830
6831         return 0;
6832 }
6833
6834 static void bnx2x_reset_task(struct work_struct *work)
6835 {
6836         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6837
6838 #ifdef BNX2X_STOP_ON_ERROR
6839         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6840                   " so reset not done to allow debug dump,\n"
6841          KERN_ERR " you will need to reboot when done\n");
6842         return;
6843 #endif
6844
6845         rtnl_lock();
6846
6847         if (!netif_running(bp->dev))
6848                 goto reset_task_exit;
6849
6850         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6851         bnx2x_nic_load(bp, LOAD_NORMAL);
6852
6853 reset_task_exit:
6854         rtnl_unlock();
6855 }
6856
6857 /* end of nic load/unload */
6858
6859 /* ethtool_ops */
6860
6861 /*
6862  * Init service functions
6863  */
6864
6865 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6866 {
6867         u32 val;
6868
6869         /* Check if there is any driver already loaded */
6870         val = REG_RD(bp, MISC_REG_UNPREPARED);
6871         if (val == 0x1) {
6872                 /* Check if it is the UNDI driver
6873                  * UNDI driver initializes CID offset for normal bell to 0x7
6874                  */
6875                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6876                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6877                 if (val == 0x7)
6878                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6879                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6880
6881                 if (val == 0x7) {
6882                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6883                         /* save our func */
6884                         int func = BP_FUNC(bp);
6885                         u32 swap_en;
6886                         u32 swap_val;
6887
6888                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6889
6890                         /* try unload UNDI on port 0 */
6891                         bp->func = 0;
6892                         bp->fw_seq =
6893                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6894                                 DRV_MSG_SEQ_NUMBER_MASK);
6895                         reset_code = bnx2x_fw_command(bp, reset_code);
6896
6897                         /* if UNDI is loaded on the other port */
6898                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6899
6900                                 /* send "DONE" for previous unload */
6901                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6902
6903                                 /* unload UNDI on port 1 */
6904                                 bp->func = 1;
6905                                 bp->fw_seq =
6906                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6907                                         DRV_MSG_SEQ_NUMBER_MASK);
6908                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6909
6910                                 bnx2x_fw_command(bp, reset_code);
6911                         }
6912
6913                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6914                                     HC_REG_CONFIG_0), 0x1000);
6915
6916                         /* close input traffic and wait for it */
6917                         /* Do not rcv packets to BRB */
6918                         REG_WR(bp,
6919                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6920                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6921                         /* Do not direct rcv packets that are not for MCP to
6922                          * the BRB */
6923                         REG_WR(bp,
6924                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6925                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6926                         /* clear AEU */
6927                         REG_WR(bp,
6928                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6929                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6930                         msleep(10);
6931
6932                         /* save NIG port swap info */
6933                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6934                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6935                         /* reset device */
6936                         REG_WR(bp,
6937                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6938                                0xd3ffffff);
6939                         REG_WR(bp,
6940                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6941                                0x1403);
6942                         /* take the NIG out of reset and restore swap values */
6943                         REG_WR(bp,
6944                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6945                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6946                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6947                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6948
6949                         /* send unload done to the MCP */
6950                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6951
6952                         /* restore our func and fw_seq */
6953                         bp->func = func;
6954                         bp->fw_seq =
6955                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6956                                 DRV_MSG_SEQ_NUMBER_MASK);
6957                 }
6958         }
6959 }
6960
6961 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6962 {
6963         u32 val, val2, val3, val4, id;
6964         u16 pmc;
6965
6966         /* Get the chip revision id and number. */
6967         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6968         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6969         id = ((val & 0xffff) << 16);
6970         val = REG_RD(bp, MISC_REG_CHIP_REV);
6971         id |= ((val & 0xf) << 12);
6972         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6973         id |= ((val & 0xff) << 4);
6974         REG_RD(bp, MISC_REG_BOND_ID);
6975         id |= (val & 0xf);
6976         bp->common.chip_id = id;
6977         bp->link_params.chip_id = bp->common.chip_id;
6978         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6979
6980         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6981         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6982                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6983         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6984                        bp->common.flash_size, bp->common.flash_size);
6985
6986         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6987         bp->link_params.shmem_base = bp->common.shmem_base;
6988         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6989
6990         if (!bp->common.shmem_base ||
6991             (bp->common.shmem_base < 0xA0000) ||
6992             (bp->common.shmem_base >= 0xC0000)) {
6993                 BNX2X_DEV_INFO("MCP not active\n");
6994                 bp->flags |= NO_MCP_FLAG;
6995                 return;
6996         }
6997
6998         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6999         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7000                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7001                 BNX2X_ERR("BAD MCP validity signature\n");
7002
7003         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7004         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7005
7006         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
7007                        bp->common.hw_config, bp->common.board);
7008
7009         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7010                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7011                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7012
7013         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7014         bp->common.bc_ver = val;
7015         BNX2X_DEV_INFO("bc_ver %X\n", val);
7016         if (val < BNX2X_BC_VER) {
7017                 /* for now only warn
7018                  * later we might need to enforce this */
7019                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7020                           " please upgrade BC\n", BNX2X_BC_VER, val);
7021         }
7022
7023         if (BP_E1HVN(bp) == 0) {
7024                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7025                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7026         } else {
7027                 /* no WOL capability for E1HVN != 0 */
7028                 bp->flags |= NO_WOL_FLAG;
7029         }
7030         BNX2X_DEV_INFO("%sWoL capable\n",
7031                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7032
7033         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7034         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7035         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7036         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7037
7038         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7039                val, val2, val3, val4);
7040 }
7041
7042 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7043                                                     u32 switch_cfg)
7044 {
7045         int port = BP_PORT(bp);
7046         u32 ext_phy_type;
7047
7048         switch (switch_cfg) {
7049         case SWITCH_CFG_1G:
7050                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7051
7052                 ext_phy_type =
7053                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7054                 switch (ext_phy_type) {
7055                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7056                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7057                                        ext_phy_type);
7058
7059                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7060                                                SUPPORTED_10baseT_Full |
7061                                                SUPPORTED_100baseT_Half |
7062                                                SUPPORTED_100baseT_Full |
7063                                                SUPPORTED_1000baseT_Full |
7064                                                SUPPORTED_2500baseX_Full |
7065                                                SUPPORTED_TP |
7066                                                SUPPORTED_FIBRE |
7067                                                SUPPORTED_Autoneg |
7068                                                SUPPORTED_Pause |
7069                                                SUPPORTED_Asym_Pause);
7070                         break;
7071
7072                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7073                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7074                                        ext_phy_type);
7075
7076                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7077                                                SUPPORTED_10baseT_Full |
7078                                                SUPPORTED_100baseT_Half |
7079                                                SUPPORTED_100baseT_Full |
7080                                                SUPPORTED_1000baseT_Full |
7081                                                SUPPORTED_TP |
7082                                                SUPPORTED_FIBRE |
7083                                                SUPPORTED_Autoneg |
7084                                                SUPPORTED_Pause |
7085                                                SUPPORTED_Asym_Pause);
7086                         break;
7087
7088                 default:
7089                         BNX2X_ERR("NVRAM config error. "
7090                                   "BAD SerDes ext_phy_config 0x%x\n",
7091                                   bp->link_params.ext_phy_config);
7092                         return;
7093                 }
7094
7095                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7096                                            port*0x10);
7097                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7098                 break;
7099
7100         case SWITCH_CFG_10G:
7101                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7102
7103                 ext_phy_type =
7104                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7105                 switch (ext_phy_type) {
7106                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7107                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7108                                        ext_phy_type);
7109
7110                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7111                                                SUPPORTED_10baseT_Full |
7112                                                SUPPORTED_100baseT_Half |
7113                                                SUPPORTED_100baseT_Full |
7114                                                SUPPORTED_1000baseT_Full |
7115                                                SUPPORTED_2500baseX_Full |
7116                                                SUPPORTED_10000baseT_Full |
7117                                                SUPPORTED_TP |
7118                                                SUPPORTED_FIBRE |
7119                                                SUPPORTED_Autoneg |
7120                                                SUPPORTED_Pause |
7121                                                SUPPORTED_Asym_Pause);
7122                         break;
7123
7124                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7125                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7126                                        ext_phy_type);
7127
7128                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7129                                                SUPPORTED_FIBRE |
7130                                                SUPPORTED_Pause |
7131                                                SUPPORTED_Asym_Pause);
7132                         break;
7133
7134                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7135                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7136                                        ext_phy_type);
7137
7138                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7139                                                SUPPORTED_1000baseT_Full |
7140                                                SUPPORTED_FIBRE |
7141                                                SUPPORTED_Pause |
7142                                                SUPPORTED_Asym_Pause);
7143                         break;
7144
7145                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7146                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7147                                        ext_phy_type);
7148
7149                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7150                                                SUPPORTED_1000baseT_Full |
7151                                                SUPPORTED_FIBRE |
7152                                                SUPPORTED_Autoneg |
7153                                                SUPPORTED_Pause |
7154                                                SUPPORTED_Asym_Pause);
7155                         break;
7156
7157                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7158                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7159                                        ext_phy_type);
7160
7161                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7162                                                SUPPORTED_2500baseX_Full |
7163                                                SUPPORTED_1000baseT_Full |
7164                                                SUPPORTED_FIBRE |
7165                                                SUPPORTED_Autoneg |
7166                                                SUPPORTED_Pause |
7167                                                SUPPORTED_Asym_Pause);
7168                         break;
7169
7170                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7171                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7172                                        ext_phy_type);
7173
7174                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7175                                                SUPPORTED_TP |
7176                                                SUPPORTED_Autoneg |
7177                                                SUPPORTED_Pause |
7178                                                SUPPORTED_Asym_Pause);
7179                         break;
7180
7181                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7182                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7183                                   bp->link_params.ext_phy_config);
7184                         break;
7185
7186                 default:
7187                         BNX2X_ERR("NVRAM config error. "
7188                                   "BAD XGXS ext_phy_config 0x%x\n",
7189                                   bp->link_params.ext_phy_config);
7190                         return;
7191                 }
7192
7193                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7194                                            port*0x18);
7195                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7196
7197                 break;
7198
7199         default:
7200                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7201                           bp->port.link_config);
7202                 return;
7203         }
7204         bp->link_params.phy_addr = bp->port.phy_addr;
7205
7206         /* mask what we support according to speed_cap_mask */
7207         if (!(bp->link_params.speed_cap_mask &
7208                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7209                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7210
7211         if (!(bp->link_params.speed_cap_mask &
7212                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7213                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7214
7215         if (!(bp->link_params.speed_cap_mask &
7216                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7217                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7218
7219         if (!(bp->link_params.speed_cap_mask &
7220                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7221                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7222
7223         if (!(bp->link_params.speed_cap_mask &
7224                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7225                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7226                                         SUPPORTED_1000baseT_Full);
7227
7228         if (!(bp->link_params.speed_cap_mask &
7229                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7230                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7231
7232         if (!(bp->link_params.speed_cap_mask &
7233                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7234                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7235
7236         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7237 }
7238
7239 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7240 {
7241         bp->link_params.req_duplex = DUPLEX_FULL;
7242
7243         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7244         case PORT_FEATURE_LINK_SPEED_AUTO:
7245                 if (bp->port.supported & SUPPORTED_Autoneg) {
7246                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7247                         bp->port.advertising = bp->port.supported;
7248                 } else {
7249                         u32 ext_phy_type =
7250                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7251
7252                         if ((ext_phy_type ==
7253                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7254                             (ext_phy_type ==
7255                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7256                                 /* force 10G, no AN */
7257                                 bp->link_params.req_line_speed = SPEED_10000;
7258                                 bp->port.advertising =
7259                                                 (ADVERTISED_10000baseT_Full |
7260                                                  ADVERTISED_FIBRE);
7261                                 break;
7262                         }
7263                         BNX2X_ERR("NVRAM config error. "
7264                                   "Invalid link_config 0x%x"
7265                                   "  Autoneg not supported\n",
7266                                   bp->port.link_config);
7267                         return;
7268                 }
7269                 break;
7270
7271         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7272                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7273                         bp->link_params.req_line_speed = SPEED_10;
7274                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7275                                                 ADVERTISED_TP);
7276                 } else {
7277                         BNX2X_ERR("NVRAM config error. "
7278                                   "Invalid link_config 0x%x"
7279                                   "  speed_cap_mask 0x%x\n",
7280                                   bp->port.link_config,
7281                                   bp->link_params.speed_cap_mask);
7282                         return;
7283                 }
7284                 break;
7285
7286         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7287                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7288                         bp->link_params.req_line_speed = SPEED_10;
7289                         bp->link_params.req_duplex = DUPLEX_HALF;
7290                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7291                                                 ADVERTISED_TP);
7292                 } else {
7293                         BNX2X_ERR("NVRAM config error. "
7294                                   "Invalid link_config 0x%x"
7295                                   "  speed_cap_mask 0x%x\n",
7296                                   bp->port.link_config,
7297                                   bp->link_params.speed_cap_mask);
7298                         return;
7299                 }
7300                 break;
7301
7302         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7303                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7304                         bp->link_params.req_line_speed = SPEED_100;
7305                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7306                                                 ADVERTISED_TP);
7307                 } else {
7308                         BNX2X_ERR("NVRAM config error. "
7309                                   "Invalid link_config 0x%x"
7310                                   "  speed_cap_mask 0x%x\n",
7311                                   bp->port.link_config,
7312                                   bp->link_params.speed_cap_mask);
7313                         return;
7314                 }
7315                 break;
7316
7317         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7318                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7319                         bp->link_params.req_line_speed = SPEED_100;
7320                         bp->link_params.req_duplex = DUPLEX_HALF;
7321                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7322                                                 ADVERTISED_TP);
7323                 } else {
7324                         BNX2X_ERR("NVRAM config error. "
7325                                   "Invalid link_config 0x%x"
7326                                   "  speed_cap_mask 0x%x\n",
7327                                   bp->port.link_config,
7328                                   bp->link_params.speed_cap_mask);
7329                         return;
7330                 }
7331                 break;
7332
7333         case PORT_FEATURE_LINK_SPEED_1G:
7334                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7335                         bp->link_params.req_line_speed = SPEED_1000;
7336                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7337                                                 ADVERTISED_TP);
7338                 } else {
7339                         BNX2X_ERR("NVRAM config error. "
7340                                   "Invalid link_config 0x%x"
7341                                   "  speed_cap_mask 0x%x\n",
7342                                   bp->port.link_config,
7343                                   bp->link_params.speed_cap_mask);
7344                         return;
7345                 }
7346                 break;
7347
7348         case PORT_FEATURE_LINK_SPEED_2_5G:
7349                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7350                         bp->link_params.req_line_speed = SPEED_2500;
7351                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7352                                                 ADVERTISED_TP);
7353                 } else {
7354                         BNX2X_ERR("NVRAM config error. "
7355                                   "Invalid link_config 0x%x"
7356                                   "  speed_cap_mask 0x%x\n",
7357                                   bp->port.link_config,
7358                                   bp->link_params.speed_cap_mask);
7359                         return;
7360                 }
7361                 break;
7362
7363         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7364         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7365         case PORT_FEATURE_LINK_SPEED_10G_KR:
7366                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7367                         bp->link_params.req_line_speed = SPEED_10000;
7368                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7369                                                 ADVERTISED_FIBRE);
7370                 } else {
7371                         BNX2X_ERR("NVRAM config error. "
7372                                   "Invalid link_config 0x%x"
7373                                   "  speed_cap_mask 0x%x\n",
7374                                   bp->port.link_config,
7375                                   bp->link_params.speed_cap_mask);
7376                         return;
7377                 }
7378                 break;
7379
7380         default:
7381                 BNX2X_ERR("NVRAM config error. "
7382                           "BAD link speed link_config 0x%x\n",
7383                           bp->port.link_config);
7384                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7385                 bp->port.advertising = bp->port.supported;
7386                 break;
7387         }
7388
7389         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7390                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7391         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7392             !(bp->port.supported & SUPPORTED_Autoneg))
7393                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7394
7395         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7396                        "  advertising 0x%x\n",
7397                        bp->link_params.req_line_speed,
7398                        bp->link_params.req_duplex,
7399                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7400 }
7401
7402 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7403 {
7404         int port = BP_PORT(bp);
7405         u32 val, val2;
7406
7407         bp->link_params.bp = bp;
7408         bp->link_params.port = port;
7409
7410         bp->link_params.serdes_config =
7411                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7412         bp->link_params.lane_config =
7413                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7414         bp->link_params.ext_phy_config =
7415                 SHMEM_RD(bp,
7416                          dev_info.port_hw_config[port].external_phy_config);
7417         bp->link_params.speed_cap_mask =
7418                 SHMEM_RD(bp,
7419                          dev_info.port_hw_config[port].speed_capability_mask);
7420
7421         bp->port.link_config =
7422                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7423
7424         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7425              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7426                        "  link_config 0x%08x\n",
7427                        bp->link_params.serdes_config,
7428                        bp->link_params.lane_config,
7429                        bp->link_params.ext_phy_config,
7430                        bp->link_params.speed_cap_mask, bp->port.link_config);
7431
7432         bp->link_params.switch_cfg = (bp->port.link_config &
7433                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7434         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7435
7436         bnx2x_link_settings_requested(bp);
7437
7438         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7439         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7440         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7441         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7442         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7443         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7444         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7445         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7446         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7447         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7448 }
7449
7450 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7451 {
7452         int func = BP_FUNC(bp);
7453         u32 val, val2;
7454         int rc = 0;
7455
7456         bnx2x_get_common_hwinfo(bp);
7457
7458         bp->e1hov = 0;
7459         bp->e1hmf = 0;
7460         if (CHIP_IS_E1H(bp)) {
7461                 bp->mf_config =
7462                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7463
7464                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7465                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7466                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7467
7468                         bp->e1hov = val;
7469                         bp->e1hmf = 1;
7470                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7471                                        "(0x%04x)\n",
7472                                        func, bp->e1hov, bp->e1hov);
7473                 } else {
7474                         BNX2X_DEV_INFO("Single function mode\n");
7475                         if (BP_E1HVN(bp)) {
7476                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7477                                           "  aborting\n", func);
7478                                 rc = -EPERM;
7479                         }
7480                 }
7481         }
7482
7483         if (!BP_NOMCP(bp)) {
7484                 bnx2x_get_port_hwinfo(bp);
7485
7486                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7487                               DRV_MSG_SEQ_NUMBER_MASK);
7488                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7489         }
7490
7491         if (IS_E1HMF(bp)) {
7492                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7493                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7494                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7495                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7496                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7497                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7498                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7499                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7500                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7501                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7502                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7503                                ETH_ALEN);
7504                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7505                                ETH_ALEN);
7506                 }
7507
7508                 return rc;
7509         }
7510
7511         if (BP_NOMCP(bp)) {
7512                 /* only supposed to happen on emulation/FPGA */
7513                 BNX2X_ERR("warning random MAC workaround active\n");
7514                 random_ether_addr(bp->dev->dev_addr);
7515                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7516         }
7517
7518         return rc;
7519 }
7520
7521 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7522 {
7523         int func = BP_FUNC(bp);
7524         int rc;
7525
7526         /* Disable interrupt handling until HW is initialized */
7527         atomic_set(&bp->intr_sem, 1);
7528
7529         mutex_init(&bp->port.phy_mutex);
7530
7531         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7532         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7533
7534         rc = bnx2x_get_hwinfo(bp);
7535
7536         /* need to reset chip if undi was active */
7537         if (!BP_NOMCP(bp))
7538                 bnx2x_undi_unload(bp);
7539
7540         if (CHIP_REV_IS_FPGA(bp))
7541                 printk(KERN_ERR PFX "FPGA detected\n");
7542
7543         if (BP_NOMCP(bp) && (func == 0))
7544                 printk(KERN_ERR PFX
7545                        "MCP disabled, must load devices in order!\n");
7546
7547         /* Set TPA flags */
7548         if (disable_tpa) {
7549                 bp->flags &= ~TPA_ENABLE_FLAG;
7550                 bp->dev->features &= ~NETIF_F_LRO;
7551         } else {
7552                 bp->flags |= TPA_ENABLE_FLAG;
7553                 bp->dev->features |= NETIF_F_LRO;
7554         }
7555
7556
7557         bp->tx_ring_size = MAX_TX_AVAIL;
7558         bp->rx_ring_size = MAX_RX_AVAIL;
7559
7560         bp->rx_csum = 1;
7561         bp->rx_offset = 0;
7562
7563         bp->tx_ticks = 50;
7564         bp->rx_ticks = 25;
7565
7566         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7567         bp->current_interval = (poll ? poll : bp->timer_interval);
7568
7569         init_timer(&bp->timer);
7570         bp->timer.expires = jiffies + bp->current_interval;
7571         bp->timer.data = (unsigned long) bp;
7572         bp->timer.function = bnx2x_timer;
7573
7574         return rc;
7575 }
7576
7577 /*
7578  * ethtool service functions
7579  */
7580
7581 /* All ethtool functions called with rtnl_lock */
7582
7583 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7584 {
7585         struct bnx2x *bp = netdev_priv(dev);
7586
7587         cmd->supported = bp->port.supported;
7588         cmd->advertising = bp->port.advertising;
7589
7590         if (netif_carrier_ok(dev)) {
7591                 cmd->speed = bp->link_vars.line_speed;
7592                 cmd->duplex = bp->link_vars.duplex;
7593         } else {
7594                 cmd->speed = bp->link_params.req_line_speed;
7595                 cmd->duplex = bp->link_params.req_duplex;
7596         }
7597         if (IS_E1HMF(bp)) {
7598                 u16 vn_max_rate;
7599
7600                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7601                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7602                 if (vn_max_rate < cmd->speed)
7603                         cmd->speed = vn_max_rate;
7604         }
7605
7606         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7607                 u32 ext_phy_type =
7608                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7609
7610                 switch (ext_phy_type) {
7611                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7612                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7613                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7614                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7615                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7616                         cmd->port = PORT_FIBRE;
7617                         break;
7618
7619                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7620                         cmd->port = PORT_TP;
7621                         break;
7622
7623                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7624                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7625                                   bp->link_params.ext_phy_config);
7626                         break;
7627
7628                 default:
7629                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7630                            bp->link_params.ext_phy_config);
7631                         break;
7632                 }
7633         } else
7634                 cmd->port = PORT_TP;
7635
7636         cmd->phy_address = bp->port.phy_addr;
7637         cmd->transceiver = XCVR_INTERNAL;
7638
7639         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7640                 cmd->autoneg = AUTONEG_ENABLE;
7641         else
7642                 cmd->autoneg = AUTONEG_DISABLE;
7643
7644         cmd->maxtxpkt = 0;
7645         cmd->maxrxpkt = 0;
7646
7647         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7648            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7649            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7650            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7651            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7652            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7653            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7654
7655         return 0;
7656 }
7657
7658 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7659 {
7660         struct bnx2x *bp = netdev_priv(dev);
7661         u32 advertising;
7662
7663         if (IS_E1HMF(bp))
7664                 return 0;
7665
7666         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7667            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7668            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7669            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7670            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7671            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7672            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7673
7674         if (cmd->autoneg == AUTONEG_ENABLE) {
7675                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7676                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7677                         return -EINVAL;
7678                 }
7679
7680                 /* advertise the requested speed and duplex if supported */
7681                 cmd->advertising &= bp->port.supported;
7682
7683                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7684                 bp->link_params.req_duplex = DUPLEX_FULL;
7685                 bp->port.advertising |= (ADVERTISED_Autoneg |
7686                                          cmd->advertising);
7687
7688         } else { /* forced speed */
7689                 /* advertise the requested speed and duplex if supported */
7690                 switch (cmd->speed) {
7691                 case SPEED_10:
7692                         if (cmd->duplex == DUPLEX_FULL) {
7693                                 if (!(bp->port.supported &
7694                                       SUPPORTED_10baseT_Full)) {
7695                                         DP(NETIF_MSG_LINK,
7696                                            "10M full not supported\n");
7697                                         return -EINVAL;
7698                                 }
7699
7700                                 advertising = (ADVERTISED_10baseT_Full |
7701                                                ADVERTISED_TP);
7702                         } else {
7703                                 if (!(bp->port.supported &
7704                                       SUPPORTED_10baseT_Half)) {
7705                                         DP(NETIF_MSG_LINK,
7706                                            "10M half not supported\n");
7707                                         return -EINVAL;
7708                                 }
7709
7710                                 advertising = (ADVERTISED_10baseT_Half |
7711                                                ADVERTISED_TP);
7712                         }
7713                         break;
7714
7715                 case SPEED_100:
7716                         if (cmd->duplex == DUPLEX_FULL) {
7717                                 if (!(bp->port.supported &
7718                                                 SUPPORTED_100baseT_Full)) {
7719                                         DP(NETIF_MSG_LINK,
7720                                            "100M full not supported\n");
7721                                         return -EINVAL;
7722                                 }
7723
7724                                 advertising = (ADVERTISED_100baseT_Full |
7725                                                ADVERTISED_TP);
7726                         } else {
7727                                 if (!(bp->port.supported &
7728                                                 SUPPORTED_100baseT_Half)) {
7729                                         DP(NETIF_MSG_LINK,
7730                                            "100M half not supported\n");
7731                                         return -EINVAL;
7732                                 }
7733
7734                                 advertising = (ADVERTISED_100baseT_Half |
7735                                                ADVERTISED_TP);
7736                         }
7737                         break;
7738
7739                 case SPEED_1000:
7740                         if (cmd->duplex != DUPLEX_FULL) {
7741                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7742                                 return -EINVAL;
7743                         }
7744
7745                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7746                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7747                                 return -EINVAL;
7748                         }
7749
7750                         advertising = (ADVERTISED_1000baseT_Full |
7751                                        ADVERTISED_TP);
7752                         break;
7753
7754                 case SPEED_2500:
7755                         if (cmd->duplex != DUPLEX_FULL) {
7756                                 DP(NETIF_MSG_LINK,
7757                                    "2.5G half not supported\n");
7758                                 return -EINVAL;
7759                         }
7760
7761                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7762                                 DP(NETIF_MSG_LINK,
7763                                    "2.5G full not supported\n");
7764                                 return -EINVAL;
7765                         }
7766
7767                         advertising = (ADVERTISED_2500baseX_Full |
7768                                        ADVERTISED_TP);
7769                         break;
7770
7771                 case SPEED_10000:
7772                         if (cmd->duplex != DUPLEX_FULL) {
7773                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7774                                 return -EINVAL;
7775                         }
7776
7777                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7778                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7779                                 return -EINVAL;
7780                         }
7781
7782                         advertising = (ADVERTISED_10000baseT_Full |
7783                                        ADVERTISED_FIBRE);
7784                         break;
7785
7786                 default:
7787                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7788                         return -EINVAL;
7789                 }
7790
7791                 bp->link_params.req_line_speed = cmd->speed;
7792                 bp->link_params.req_duplex = cmd->duplex;
7793                 bp->port.advertising = advertising;
7794         }
7795
7796         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7797            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7798            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7799            bp->port.advertising);
7800
7801         if (netif_running(dev)) {
7802                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7803                 bnx2x_link_set(bp);
7804         }
7805
7806         return 0;
7807 }
7808
7809 #define PHY_FW_VER_LEN                  10
7810
7811 static void bnx2x_get_drvinfo(struct net_device *dev,
7812                               struct ethtool_drvinfo *info)
7813 {
7814         struct bnx2x *bp = netdev_priv(dev);
7815         u8 phy_fw_ver[PHY_FW_VER_LEN];
7816
7817         strcpy(info->driver, DRV_MODULE_NAME);
7818         strcpy(info->version, DRV_MODULE_VERSION);
7819
7820         phy_fw_ver[0] = '\0';
7821         if (bp->port.pmf) {
7822                 bnx2x_acquire_phy_lock(bp);
7823                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7824                                              (bp->state != BNX2X_STATE_CLOSED),
7825                                              phy_fw_ver, PHY_FW_VER_LEN);
7826                 bnx2x_release_phy_lock(bp);
7827         }
7828
7829         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7830                  (bp->common.bc_ver & 0xff0000) >> 16,
7831                  (bp->common.bc_ver & 0xff00) >> 8,
7832                  (bp->common.bc_ver & 0xff),
7833                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7834         strcpy(info->bus_info, pci_name(bp->pdev));
7835         info->n_stats = BNX2X_NUM_STATS;
7836         info->testinfo_len = BNX2X_NUM_TESTS;
7837         info->eedump_len = bp->common.flash_size;
7838         info->regdump_len = 0;
7839 }
7840
7841 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7842 {
7843         struct bnx2x *bp = netdev_priv(dev);
7844
7845         if (bp->flags & NO_WOL_FLAG) {
7846                 wol->supported = 0;
7847                 wol->wolopts = 0;
7848         } else {
7849                 wol->supported = WAKE_MAGIC;
7850                 if (bp->wol)
7851                         wol->wolopts = WAKE_MAGIC;
7852                 else
7853                         wol->wolopts = 0;
7854         }
7855         memset(&wol->sopass, 0, sizeof(wol->sopass));
7856 }
7857
7858 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7859 {
7860         struct bnx2x *bp = netdev_priv(dev);
7861
7862         if (wol->wolopts & ~WAKE_MAGIC)
7863                 return -EINVAL;
7864
7865         if (wol->wolopts & WAKE_MAGIC) {
7866                 if (bp->flags & NO_WOL_FLAG)
7867                         return -EINVAL;
7868
7869                 bp->wol = 1;
7870         } else
7871                 bp->wol = 0;
7872
7873         return 0;
7874 }
7875
7876 static u32 bnx2x_get_msglevel(struct net_device *dev)
7877 {
7878         struct bnx2x *bp = netdev_priv(dev);
7879
7880         return bp->msglevel;
7881 }
7882
7883 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7884 {
7885         struct bnx2x *bp = netdev_priv(dev);
7886
7887         if (capable(CAP_NET_ADMIN))
7888                 bp->msglevel = level;
7889 }
7890
7891 static int bnx2x_nway_reset(struct net_device *dev)
7892 {
7893         struct bnx2x *bp = netdev_priv(dev);
7894
7895         if (!bp->port.pmf)
7896                 return 0;
7897
7898         if (netif_running(dev)) {
7899                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7900                 bnx2x_link_set(bp);
7901         }
7902
7903         return 0;
7904 }
7905
7906 static int bnx2x_get_eeprom_len(struct net_device *dev)
7907 {
7908         struct bnx2x *bp = netdev_priv(dev);
7909
7910         return bp->common.flash_size;
7911 }
7912
7913 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7914 {
7915         int port = BP_PORT(bp);
7916         int count, i;
7917         u32 val = 0;
7918
7919         /* adjust timeout for emulation/FPGA */
7920         count = NVRAM_TIMEOUT_COUNT;
7921         if (CHIP_REV_IS_SLOW(bp))
7922                 count *= 100;
7923
7924         /* request access to nvram interface */
7925         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7926                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7927
7928         for (i = 0; i < count*10; i++) {
7929                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7930                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7931                         break;
7932
7933                 udelay(5);
7934         }
7935
7936         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7937                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7938                 return -EBUSY;
7939         }
7940
7941         return 0;
7942 }
7943
7944 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7945 {
7946         int port = BP_PORT(bp);
7947         int count, i;
7948         u32 val = 0;
7949
7950         /* adjust timeout for emulation/FPGA */
7951         count = NVRAM_TIMEOUT_COUNT;
7952         if (CHIP_REV_IS_SLOW(bp))
7953                 count *= 100;
7954
7955         /* relinquish nvram interface */
7956         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7957                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7958
7959         for (i = 0; i < count*10; i++) {
7960                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7961                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7962                         break;
7963
7964                 udelay(5);
7965         }
7966
7967         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7968                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7969                 return -EBUSY;
7970         }
7971
7972         return 0;
7973 }
7974
7975 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7976 {
7977         u32 val;
7978
7979         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7980
7981         /* enable both bits, even on read */
7982         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7983                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7984                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7985 }
7986
7987 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7988 {
7989         u32 val;
7990
7991         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7992
7993         /* disable both bits, even after read */
7994         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7995                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7996                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7997 }
7998
7999 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8000                                   u32 cmd_flags)
8001 {
8002         int count, i, rc;
8003         u32 val;
8004
8005         /* build the command word */
8006         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8007
8008         /* need to clear DONE bit separately */
8009         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8010
8011         /* address of the NVRAM to read from */
8012         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8013                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8014
8015         /* issue a read command */
8016         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8017
8018         /* adjust timeout for emulation/FPGA */
8019         count = NVRAM_TIMEOUT_COUNT;
8020         if (CHIP_REV_IS_SLOW(bp))
8021                 count *= 100;
8022
8023         /* wait for completion */
8024         *ret_val = 0;
8025         rc = -EBUSY;
8026         for (i = 0; i < count; i++) {
8027                 udelay(5);
8028                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8029
8030                 if (val & MCPR_NVM_COMMAND_DONE) {
8031                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8032                         /* we read nvram data in cpu order
8033                          * but ethtool sees it as an array of bytes
8034                          * converting to big-endian will do the work */
8035                         val = cpu_to_be32(val);
8036                         *ret_val = val;
8037                         rc = 0;
8038                         break;
8039                 }
8040         }
8041
8042         return rc;
8043 }
8044
8045 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8046                             int buf_size)
8047 {
8048         int rc;
8049         u32 cmd_flags;
8050         u32 val;
8051
8052         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8053                 DP(BNX2X_MSG_NVM,
8054                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8055                    offset, buf_size);
8056                 return -EINVAL;
8057         }
8058
8059         if (offset + buf_size > bp->common.flash_size) {
8060                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8061                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8062                    offset, buf_size, bp->common.flash_size);
8063                 return -EINVAL;
8064         }
8065
8066         /* request access to nvram interface */
8067         rc = bnx2x_acquire_nvram_lock(bp);
8068         if (rc)
8069                 return rc;
8070
8071         /* enable access to nvram interface */
8072         bnx2x_enable_nvram_access(bp);
8073
8074         /* read the first word(s) */
8075         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8076         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8077                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8078                 memcpy(ret_buf, &val, 4);
8079
8080                 /* advance to the next dword */
8081                 offset += sizeof(u32);
8082                 ret_buf += sizeof(u32);
8083                 buf_size -= sizeof(u32);
8084                 cmd_flags = 0;
8085         }
8086
8087         if (rc == 0) {
8088                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8089                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8090                 memcpy(ret_buf, &val, 4);
8091         }
8092
8093         /* disable access to nvram interface */
8094         bnx2x_disable_nvram_access(bp);
8095         bnx2x_release_nvram_lock(bp);
8096
8097         return rc;
8098 }
8099
8100 static int bnx2x_get_eeprom(struct net_device *dev,
8101                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8102 {
8103         struct bnx2x *bp = netdev_priv(dev);
8104         int rc;
8105
8106         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8107            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8108            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8109            eeprom->len, eeprom->len);
8110
8111         /* parameters already validated in ethtool_get_eeprom */
8112
8113         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8114
8115         return rc;
8116 }
8117
8118 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8119                                    u32 cmd_flags)
8120 {
8121         int count, i, rc;
8122
8123         /* build the command word */
8124         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8125
8126         /* need to clear DONE bit separately */
8127         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8128
8129         /* write the data */
8130         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8131
8132         /* address of the NVRAM to write to */
8133         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8134                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8135
8136         /* issue the write command */
8137         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8138
8139         /* adjust timeout for emulation/FPGA */
8140         count = NVRAM_TIMEOUT_COUNT;
8141         if (CHIP_REV_IS_SLOW(bp))
8142                 count *= 100;
8143
8144         /* wait for completion */
8145         rc = -EBUSY;
8146         for (i = 0; i < count; i++) {
8147                 udelay(5);
8148                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8149                 if (val & MCPR_NVM_COMMAND_DONE) {
8150                         rc = 0;
8151                         break;
8152                 }
8153         }
8154
8155         return rc;
8156 }
8157
8158 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8159
8160 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8161                               int buf_size)
8162 {
8163         int rc;
8164         u32 cmd_flags;
8165         u32 align_offset;
8166         u32 val;
8167
8168         if (offset + buf_size > bp->common.flash_size) {
8169                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8170                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8171                    offset, buf_size, bp->common.flash_size);
8172                 return -EINVAL;
8173         }
8174
8175         /* request access to nvram interface */
8176         rc = bnx2x_acquire_nvram_lock(bp);
8177         if (rc)
8178                 return rc;
8179
8180         /* enable access to nvram interface */
8181         bnx2x_enable_nvram_access(bp);
8182
8183         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8184         align_offset = (offset & ~0x03);
8185         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8186
8187         if (rc == 0) {
8188                 val &= ~(0xff << BYTE_OFFSET(offset));
8189                 val |= (*data_buf << BYTE_OFFSET(offset));
8190
8191                 /* nvram data is returned as an array of bytes
8192                  * convert it back to cpu order */
8193                 val = be32_to_cpu(val);
8194
8195                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8196                                              cmd_flags);
8197         }
8198
8199         /* disable access to nvram interface */
8200         bnx2x_disable_nvram_access(bp);
8201         bnx2x_release_nvram_lock(bp);
8202
8203         return rc;
8204 }
8205
8206 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8207                              int buf_size)
8208 {
8209         int rc;
8210         u32 cmd_flags;
8211         u32 val;
8212         u32 written_so_far;
8213
8214         if (buf_size == 1)      /* ethtool */
8215                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8216
8217         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8218                 DP(BNX2X_MSG_NVM,
8219                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8220                    offset, buf_size);
8221                 return -EINVAL;
8222         }
8223
8224         if (offset + buf_size > bp->common.flash_size) {
8225                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8226                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8227                    offset, buf_size, bp->common.flash_size);
8228                 return -EINVAL;
8229         }
8230
8231         /* request access to nvram interface */
8232         rc = bnx2x_acquire_nvram_lock(bp);
8233         if (rc)
8234                 return rc;
8235
8236         /* enable access to nvram interface */
8237         bnx2x_enable_nvram_access(bp);
8238
8239         written_so_far = 0;
8240         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8241         while ((written_so_far < buf_size) && (rc == 0)) {
8242                 if (written_so_far == (buf_size - sizeof(u32)))
8243                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8244                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8245                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8246                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8247                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8248
8249                 memcpy(&val, data_buf, 4);
8250
8251                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8252
8253                 /* advance to the next dword */
8254                 offset += sizeof(u32);
8255                 data_buf += sizeof(u32);
8256                 written_so_far += sizeof(u32);
8257                 cmd_flags = 0;
8258         }
8259
8260         /* disable access to nvram interface */
8261         bnx2x_disable_nvram_access(bp);
8262         bnx2x_release_nvram_lock(bp);
8263
8264         return rc;
8265 }
8266
8267 static int bnx2x_set_eeprom(struct net_device *dev,
8268                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8269 {
8270         struct bnx2x *bp = netdev_priv(dev);
8271         int rc;
8272
8273         if (!netif_running(dev))
8274                 return -EAGAIN;
8275
8276         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8277            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8278            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8279            eeprom->len, eeprom->len);
8280
8281         /* parameters already validated in ethtool_set_eeprom */
8282
8283         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8284         if (eeprom->magic == 0x00504859)
8285                 if (bp->port.pmf) {
8286
8287                         bnx2x_acquire_phy_lock(bp);
8288                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8289                                              bp->link_params.ext_phy_config,
8290                                              (bp->state != BNX2X_STATE_CLOSED),
8291                                              eebuf, eeprom->len);
8292                         if ((bp->state == BNX2X_STATE_OPEN) ||
8293                             (bp->state == BNX2X_STATE_DISABLED)) {
8294                                 rc |= bnx2x_link_reset(&bp->link_params,
8295                                                        &bp->link_vars);
8296                                 rc |= bnx2x_phy_init(&bp->link_params,
8297                                                      &bp->link_vars);
8298                         }
8299                         bnx2x_release_phy_lock(bp);
8300
8301                 } else /* Only the PMF can access the PHY */
8302                         return -EINVAL;
8303         else
8304                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8305
8306         return rc;
8307 }
8308
8309 static int bnx2x_get_coalesce(struct net_device *dev,
8310                               struct ethtool_coalesce *coal)
8311 {
8312         struct bnx2x *bp = netdev_priv(dev);
8313
8314         memset(coal, 0, sizeof(struct ethtool_coalesce));
8315
8316         coal->rx_coalesce_usecs = bp->rx_ticks;
8317         coal->tx_coalesce_usecs = bp->tx_ticks;
8318
8319         return 0;
8320 }
8321
8322 static int bnx2x_set_coalesce(struct net_device *dev,
8323                               struct ethtool_coalesce *coal)
8324 {
8325         struct bnx2x *bp = netdev_priv(dev);
8326
8327         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8328         if (bp->rx_ticks > 3000)
8329                 bp->rx_ticks = 3000;
8330
8331         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8332         if (bp->tx_ticks > 0x3000)
8333                 bp->tx_ticks = 0x3000;
8334
8335         if (netif_running(dev))
8336                 bnx2x_update_coalesce(bp);
8337
8338         return 0;
8339 }
8340
8341 static void bnx2x_get_ringparam(struct net_device *dev,
8342                                 struct ethtool_ringparam *ering)
8343 {
8344         struct bnx2x *bp = netdev_priv(dev);
8345
8346         ering->rx_max_pending = MAX_RX_AVAIL;
8347         ering->rx_mini_max_pending = 0;
8348         ering->rx_jumbo_max_pending = 0;
8349
8350         ering->rx_pending = bp->rx_ring_size;
8351         ering->rx_mini_pending = 0;
8352         ering->rx_jumbo_pending = 0;
8353
8354         ering->tx_max_pending = MAX_TX_AVAIL;
8355         ering->tx_pending = bp->tx_ring_size;
8356 }
8357
8358 static int bnx2x_set_ringparam(struct net_device *dev,
8359                                struct ethtool_ringparam *ering)
8360 {
8361         struct bnx2x *bp = netdev_priv(dev);
8362         int rc = 0;
8363
8364         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8365             (ering->tx_pending > MAX_TX_AVAIL) ||
8366             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8367                 return -EINVAL;
8368
8369         bp->rx_ring_size = ering->rx_pending;
8370         bp->tx_ring_size = ering->tx_pending;
8371
8372         if (netif_running(dev)) {
8373                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8374                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8375         }
8376
8377         return rc;
8378 }
8379
8380 static void bnx2x_get_pauseparam(struct net_device *dev,
8381                                  struct ethtool_pauseparam *epause)
8382 {
8383         struct bnx2x *bp = netdev_priv(dev);
8384
8385         epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8386                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8387
8388         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8389                             BNX2X_FLOW_CTRL_RX);
8390         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8391                             BNX2X_FLOW_CTRL_TX);
8392
8393         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8394            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8395            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8396 }
8397
8398 static int bnx2x_set_pauseparam(struct net_device *dev,
8399                                 struct ethtool_pauseparam *epause)
8400 {
8401         struct bnx2x *bp = netdev_priv(dev);
8402
8403         if (IS_E1HMF(bp))
8404                 return 0;
8405
8406         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8407            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8408            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8409
8410         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8411
8412         if (epause->rx_pause)
8413                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8414
8415         if (epause->tx_pause)
8416                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8417
8418         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8419                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8420
8421         if (epause->autoneg) {
8422                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8423                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8424                         return -EINVAL;
8425                 }
8426
8427                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8428                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8429         }
8430
8431         DP(NETIF_MSG_LINK,
8432            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8433
8434         if (netif_running(dev)) {
8435                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8436                 bnx2x_link_set(bp);
8437         }
8438
8439         return 0;
8440 }
8441
8442 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8443 {
8444         struct bnx2x *bp = netdev_priv(dev);
8445         int changed = 0;
8446         int rc = 0;
8447
8448         /* TPA requires Rx CSUM offloading */
8449         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8450                 if (!(dev->features & NETIF_F_LRO)) {
8451                         dev->features |= NETIF_F_LRO;
8452                         bp->flags |= TPA_ENABLE_FLAG;
8453                         changed = 1;
8454                 }
8455
8456         } else if (dev->features & NETIF_F_LRO) {
8457                 dev->features &= ~NETIF_F_LRO;
8458                 bp->flags &= ~TPA_ENABLE_FLAG;
8459                 changed = 1;
8460         }
8461
8462         if (changed && netif_running(dev)) {
8463                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8464                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8465         }
8466
8467         return rc;
8468 }
8469
8470 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8471 {
8472         struct bnx2x *bp = netdev_priv(dev);
8473
8474         return bp->rx_csum;
8475 }
8476
8477 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8478 {
8479         struct bnx2x *bp = netdev_priv(dev);
8480         int rc = 0;
8481
8482         bp->rx_csum = data;
8483
8484         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8485            TPA'ed packets will be discarded due to wrong TCP CSUM */
8486         if (!data) {
8487                 u32 flags = ethtool_op_get_flags(dev);
8488
8489                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8490         }
8491
8492         return rc;
8493 }
8494
8495 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8496 {
8497         if (data) {
8498                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8499                 dev->features |= NETIF_F_TSO6;
8500         } else {
8501                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8502                 dev->features &= ~NETIF_F_TSO6;
8503         }
8504
8505         return 0;
8506 }
8507
8508 static const struct {
8509         char string[ETH_GSTRING_LEN];
8510 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8511         { "register_test (offline)" },
8512         { "memory_test (offline)" },
8513         { "loopback_test (offline)" },
8514         { "nvram_test (online)" },
8515         { "interrupt_test (online)" },
8516         { "link_test (online)" },
8517         { "idle check (online)" },
8518         { "MC errors (online)" }
8519 };
8520
8521 static int bnx2x_self_test_count(struct net_device *dev)
8522 {
8523         return BNX2X_NUM_TESTS;
8524 }
8525
8526 static int bnx2x_test_registers(struct bnx2x *bp)
8527 {
8528         int idx, i, rc = -ENODEV;
8529         u32 wr_val = 0;
8530         int port = BP_PORT(bp);
8531         static const struct {
8532                 u32  offset0;
8533                 u32  offset1;
8534                 u32  mask;
8535         } reg_tbl[] = {
8536 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8537                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8538                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8539                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8540                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8541                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8542                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8543                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8544                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8545                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8546 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8547                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8548                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8549                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8550                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8551                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8552                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8553                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8554                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8555                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8556 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8557                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8558                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8559                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8560                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8561                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8562                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8563                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8564                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8565                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8566 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8567                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8568                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8569                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8570                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8571                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8572                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8573                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8574
8575                 { 0xffffffff, 0, 0x00000000 }
8576         };
8577
8578         if (!netif_running(bp->dev))
8579                 return rc;
8580
8581         /* Repeat the test twice:
8582            First by writing 0x00000000, second by writing 0xffffffff */
8583         for (idx = 0; idx < 2; idx++) {
8584
8585                 switch (idx) {
8586                 case 0:
8587                         wr_val = 0;
8588                         break;
8589                 case 1:
8590                         wr_val = 0xffffffff;
8591                         break;
8592                 }
8593
8594                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8595                         u32 offset, mask, save_val, val;
8596
8597                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8598                         mask = reg_tbl[i].mask;
8599
8600                         save_val = REG_RD(bp, offset);
8601
8602                         REG_WR(bp, offset, wr_val);
8603                         val = REG_RD(bp, offset);
8604
8605                         /* Restore the original register's value */
8606                         REG_WR(bp, offset, save_val);
8607
8608                         /* verify that value is as expected value */
8609                         if ((val & mask) != (wr_val & mask))
8610                                 goto test_reg_exit;
8611                 }
8612         }
8613
8614         rc = 0;
8615
8616 test_reg_exit:
8617         return rc;
8618 }
8619
8620 static int bnx2x_test_memory(struct bnx2x *bp)
8621 {
8622         int i, j, rc = -ENODEV;
8623         u32 val;
8624         static const struct {
8625                 u32 offset;
8626                 int size;
8627         } mem_tbl[] = {
8628                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8629                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8630                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8631                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8632                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8633                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8634                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8635
8636                 { 0xffffffff, 0 }
8637         };
8638         static const struct {
8639                 char *name;
8640                 u32 offset;
8641                 u32 e1_mask;
8642                 u32 e1h_mask;
8643         } prty_tbl[] = {
8644                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8645                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8646                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8647                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8648                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8649                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8650
8651                 { NULL, 0xffffffff, 0, 0 }
8652         };
8653
8654         if (!netif_running(bp->dev))
8655                 return rc;
8656
8657         /* Go through all the memories */
8658         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8659                 for (j = 0; j < mem_tbl[i].size; j++)
8660                         REG_RD(bp, mem_tbl[i].offset + j*4);
8661
8662         /* Check the parity status */
8663         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8664                 val = REG_RD(bp, prty_tbl[i].offset);
8665                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8666                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8667                         DP(NETIF_MSG_HW,
8668                            "%s is 0x%x\n", prty_tbl[i].name, val);
8669                         goto test_mem_exit;
8670                 }
8671         }
8672
8673         rc = 0;
8674
8675 test_mem_exit:
8676         return rc;
8677 }
8678
8679 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8680 {
8681         int cnt = 1000;
8682
8683         if (link_up)
8684                 while (bnx2x_link_test(bp) && cnt--)
8685                         msleep(10);
8686 }
8687
8688 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8689 {
8690         unsigned int pkt_size, num_pkts, i;
8691         struct sk_buff *skb;
8692         unsigned char *packet;
8693         struct bnx2x_fastpath *fp = &bp->fp[0];
8694         u16 tx_start_idx, tx_idx;
8695         u16 rx_start_idx, rx_idx;
8696         u16 pkt_prod;
8697         struct sw_tx_bd *tx_buf;
8698         struct eth_tx_bd *tx_bd;
8699         dma_addr_t mapping;
8700         union eth_rx_cqe *cqe;
8701         u8 cqe_fp_flags;
8702         struct sw_rx_bd *rx_buf;
8703         u16 len;
8704         int rc = -ENODEV;
8705
8706         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8707                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8708                 bnx2x_acquire_phy_lock(bp);
8709                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8710                 bnx2x_release_phy_lock(bp);
8711
8712         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8713                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8714                 bnx2x_acquire_phy_lock(bp);
8715                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8716                 bnx2x_release_phy_lock(bp);
8717                 /* wait until link state is restored */
8718                 bnx2x_wait_for_link(bp, link_up);
8719
8720         } else
8721                 return -EINVAL;
8722
8723         pkt_size = 1514;
8724         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8725         if (!skb) {
8726                 rc = -ENOMEM;
8727                 goto test_loopback_exit;
8728         }
8729         packet = skb_put(skb, pkt_size);
8730         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8731         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8732         for (i = ETH_HLEN; i < pkt_size; i++)
8733                 packet[i] = (unsigned char) (i & 0xff);
8734
8735         num_pkts = 0;
8736         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8737         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8738
8739         pkt_prod = fp->tx_pkt_prod++;
8740         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8741         tx_buf->first_bd = fp->tx_bd_prod;
8742         tx_buf->skb = skb;
8743
8744         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8745         mapping = pci_map_single(bp->pdev, skb->data,
8746                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8747         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8748         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8749         tx_bd->nbd = cpu_to_le16(1);
8750         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8751         tx_bd->vlan = cpu_to_le16(pkt_prod);
8752         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8753                                        ETH_TX_BD_FLAGS_END_BD);
8754         tx_bd->general_data = ((UNICAST_ADDRESS <<
8755                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8756
8757         wmb();
8758
8759         fp->hw_tx_prods->bds_prod =
8760                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8761         mb(); /* FW restriction: must not reorder writing nbd and packets */
8762         fp->hw_tx_prods->packets_prod =
8763                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8764         DOORBELL(bp, FP_IDX(fp), 0);
8765
8766         mmiowb();
8767
8768         num_pkts++;
8769         fp->tx_bd_prod++;
8770         bp->dev->trans_start = jiffies;
8771
8772         udelay(100);
8773
8774         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8775         if (tx_idx != tx_start_idx + num_pkts)
8776                 goto test_loopback_exit;
8777
8778         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8779         if (rx_idx != rx_start_idx + num_pkts)
8780                 goto test_loopback_exit;
8781
8782         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8783         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8784         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8785                 goto test_loopback_rx_exit;
8786
8787         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8788         if (len != pkt_size)
8789                 goto test_loopback_rx_exit;
8790
8791         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8792         skb = rx_buf->skb;
8793         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8794         for (i = ETH_HLEN; i < pkt_size; i++)
8795                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8796                         goto test_loopback_rx_exit;
8797
8798         rc = 0;
8799
8800 test_loopback_rx_exit:
8801
8802         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8803         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8804         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8805         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8806
8807         /* Update producers */
8808         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8809                              fp->rx_sge_prod);
8810
8811 test_loopback_exit:
8812         bp->link_params.loopback_mode = LOOPBACK_NONE;
8813
8814         return rc;
8815 }
8816
8817 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8818 {
8819         int rc = 0;
8820
8821         if (!netif_running(bp->dev))
8822                 return BNX2X_LOOPBACK_FAILED;
8823
8824         bnx2x_netif_stop(bp, 1);
8825
8826         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8827                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8828                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8829         }
8830
8831         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8832                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8833                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8834         }
8835
8836         bnx2x_netif_start(bp);
8837
8838         return rc;
8839 }
8840
8841 #define CRC32_RESIDUAL                  0xdebb20e3
8842
8843 static int bnx2x_test_nvram(struct bnx2x *bp)
8844 {
8845         static const struct {
8846                 int offset;
8847                 int size;
8848         } nvram_tbl[] = {
8849                 {     0,  0x14 }, /* bootstrap */
8850                 {  0x14,  0xec }, /* dir */
8851                 { 0x100, 0x350 }, /* manuf_info */
8852                 { 0x450,  0xf0 }, /* feature_info */
8853                 { 0x640,  0x64 }, /* upgrade_key_info */
8854                 { 0x6a4,  0x64 },
8855                 { 0x708,  0x70 }, /* manuf_key_info */
8856                 { 0x778,  0x70 },
8857                 {     0,     0 }
8858         };
8859         u32 buf[0x350 / 4];
8860         u8 *data = (u8 *)buf;
8861         int i, rc;
8862         u32 magic, csum;
8863
8864         rc = bnx2x_nvram_read(bp, 0, data, 4);
8865         if (rc) {
8866                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8867                 goto test_nvram_exit;
8868         }
8869
8870         magic = be32_to_cpu(buf[0]);
8871         if (magic != 0x669955aa) {
8872                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8873                 rc = -ENODEV;
8874                 goto test_nvram_exit;
8875         }
8876
8877         for (i = 0; nvram_tbl[i].size; i++) {
8878
8879                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8880                                       nvram_tbl[i].size);
8881                 if (rc) {
8882                         DP(NETIF_MSG_PROBE,
8883                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8884                         goto test_nvram_exit;
8885                 }
8886
8887                 csum = ether_crc_le(nvram_tbl[i].size, data);
8888                 if (csum != CRC32_RESIDUAL) {
8889                         DP(NETIF_MSG_PROBE,
8890                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8891                         rc = -ENODEV;
8892                         goto test_nvram_exit;
8893                 }
8894         }
8895
8896 test_nvram_exit:
8897         return rc;
8898 }
8899
8900 static int bnx2x_test_intr(struct bnx2x *bp)
8901 {
8902         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8903         int i, rc;
8904
8905         if (!netif_running(bp->dev))
8906                 return -ENODEV;
8907
8908         config->hdr.length_6b = 0;
8909         config->hdr.offset = 0;
8910         config->hdr.client_id = BP_CL_ID(bp);
8911         config->hdr.reserved1 = 0;
8912
8913         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8914                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8915                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8916         if (rc == 0) {
8917                 bp->set_mac_pending++;
8918                 for (i = 0; i < 10; i++) {
8919                         if (!bp->set_mac_pending)
8920                                 break;
8921                         msleep_interruptible(10);
8922                 }
8923                 if (i == 10)
8924                         rc = -ENODEV;
8925         }
8926
8927         return rc;
8928 }
8929
8930 static void bnx2x_self_test(struct net_device *dev,
8931                             struct ethtool_test *etest, u64 *buf)
8932 {
8933         struct bnx2x *bp = netdev_priv(dev);
8934
8935         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8936
8937         if (!netif_running(dev))
8938                 return;
8939
8940         /* offline tests are not supported in MF mode */
8941         if (IS_E1HMF(bp))
8942                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8943
8944         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8945                 u8 link_up;
8946
8947                 link_up = bp->link_vars.link_up;
8948                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8949                 bnx2x_nic_load(bp, LOAD_DIAG);
8950                 /* wait until link state is restored */
8951                 bnx2x_wait_for_link(bp, link_up);
8952
8953                 if (bnx2x_test_registers(bp) != 0) {
8954                         buf[0] = 1;
8955                         etest->flags |= ETH_TEST_FL_FAILED;
8956                 }
8957                 if (bnx2x_test_memory(bp) != 0) {
8958                         buf[1] = 1;
8959                         etest->flags |= ETH_TEST_FL_FAILED;
8960                 }
8961                 buf[2] = bnx2x_test_loopback(bp, link_up);
8962                 if (buf[2] != 0)
8963                         etest->flags |= ETH_TEST_FL_FAILED;
8964
8965                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8966                 bnx2x_nic_load(bp, LOAD_NORMAL);
8967                 /* wait until link state is restored */
8968                 bnx2x_wait_for_link(bp, link_up);
8969         }
8970         if (bnx2x_test_nvram(bp) != 0) {
8971                 buf[3] = 1;
8972                 etest->flags |= ETH_TEST_FL_FAILED;
8973         }
8974         if (bnx2x_test_intr(bp) != 0) {
8975                 buf[4] = 1;
8976                 etest->flags |= ETH_TEST_FL_FAILED;
8977         }
8978         if (bp->port.pmf)
8979                 if (bnx2x_link_test(bp) != 0) {
8980                         buf[5] = 1;
8981                         etest->flags |= ETH_TEST_FL_FAILED;
8982                 }
8983         buf[7] = bnx2x_mc_assert(bp);
8984         if (buf[7] != 0)
8985                 etest->flags |= ETH_TEST_FL_FAILED;
8986
8987 #ifdef BNX2X_EXTRA_DEBUG
8988         bnx2x_panic_dump(bp);
8989 #endif
8990 }
8991
8992 static const struct {
8993         long offset;
8994         int size;
8995         u32 flags;
8996 #define STATS_FLAGS_PORT                1
8997 #define STATS_FLAGS_FUNC                2
8998         u8 string[ETH_GSTRING_LEN];
8999 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9000 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9001                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
9002         { STATS_OFFSET32(error_bytes_received_hi),
9003                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9004         { STATS_OFFSET32(total_bytes_transmitted_hi),
9005                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
9006         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9007                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9008         { STATS_OFFSET32(total_unicast_packets_received_hi),
9009                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9010         { STATS_OFFSET32(total_multicast_packets_received_hi),
9011                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9012         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9013                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9014         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9015                                 8, STATS_FLAGS_FUNC, "tx_packets" },
9016         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9017                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9018 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9019                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9020         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9021                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9022         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9023                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9024         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9025                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9026         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9027                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9028         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9029                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9030         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9031                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9032         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9033                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9034         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9035                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9036         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9037                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9038 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9039                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9040         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9041                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9042         { STATS_OFFSET32(jabber_packets_received),
9043                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9044         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9045                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9046         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9047                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9048         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9049                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9050         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9051                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9052         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9053                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9054         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9055                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9056         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9057                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9058 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9059                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9060         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9061                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9062         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9063                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9064         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9065                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9066         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9067                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9068         { STATS_OFFSET32(mac_filter_discard),
9069                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9070         { STATS_OFFSET32(no_buff_discard),
9071                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9072         { STATS_OFFSET32(xxoverflow_discard),
9073                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9074         { STATS_OFFSET32(brb_drop_hi),
9075                                 8, STATS_FLAGS_PORT, "brb_discard" },
9076         { STATS_OFFSET32(brb_truncate_hi),
9077                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9078 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9079                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9080         { STATS_OFFSET32(rx_skb_alloc_failed),
9081                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9082 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9083                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9084 };
9085
9086 #define IS_NOT_E1HMF_STAT(bp, i) \
9087                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9088
9089 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9090 {
9091         struct bnx2x *bp = netdev_priv(dev);
9092         int i, j;
9093
9094         switch (stringset) {
9095         case ETH_SS_STATS:
9096                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9097                         if (IS_NOT_E1HMF_STAT(bp, i))
9098                                 continue;
9099                         strcpy(buf + j*ETH_GSTRING_LEN,
9100                                bnx2x_stats_arr[i].string);
9101                         j++;
9102                 }
9103                 break;
9104
9105         case ETH_SS_TEST:
9106                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9107                 break;
9108         }
9109 }
9110
9111 static int bnx2x_get_stats_count(struct net_device *dev)
9112 {
9113         struct bnx2x *bp = netdev_priv(dev);
9114         int i, num_stats = 0;
9115
9116         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9117                 if (IS_NOT_E1HMF_STAT(bp, i))
9118                         continue;
9119                 num_stats++;
9120         }
9121         return num_stats;
9122 }
9123
9124 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9125                                     struct ethtool_stats *stats, u64 *buf)
9126 {
9127         struct bnx2x *bp = netdev_priv(dev);
9128         u32 *hw_stats = (u32 *)&bp->eth_stats;
9129         int i, j;
9130
9131         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9132                 if (IS_NOT_E1HMF_STAT(bp, i))
9133                         continue;
9134
9135                 if (bnx2x_stats_arr[i].size == 0) {
9136                         /* skip this counter */
9137                         buf[j] = 0;
9138                         j++;
9139                         continue;
9140                 }
9141                 if (bnx2x_stats_arr[i].size == 4) {
9142                         /* 4-byte counter */
9143                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9144                         j++;
9145                         continue;
9146                 }
9147                 /* 8-byte counter */
9148                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9149                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9150                 j++;
9151         }
9152 }
9153
9154 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9155 {
9156         struct bnx2x *bp = netdev_priv(dev);
9157         int port = BP_PORT(bp);
9158         int i;
9159
9160         if (!netif_running(dev))
9161                 return 0;
9162
9163         if (!bp->port.pmf)
9164                 return 0;
9165
9166         if (data == 0)
9167                 data = 2;
9168
9169         for (i = 0; i < (data * 2); i++) {
9170                 if ((i % 2) == 0)
9171                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9172                                       bp->link_params.hw_led_mode,
9173                                       bp->link_params.chip_id);
9174                 else
9175                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9176                                       bp->link_params.hw_led_mode,
9177                                       bp->link_params.chip_id);
9178
9179                 msleep_interruptible(500);
9180                 if (signal_pending(current))
9181                         break;
9182         }
9183
9184         if (bp->link_vars.link_up)
9185                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9186                               bp->link_vars.line_speed,
9187                               bp->link_params.hw_led_mode,
9188                               bp->link_params.chip_id);
9189
9190         return 0;
9191 }
9192
9193 static struct ethtool_ops bnx2x_ethtool_ops = {
9194         .get_settings           = bnx2x_get_settings,
9195         .set_settings           = bnx2x_set_settings,
9196         .get_drvinfo            = bnx2x_get_drvinfo,
9197         .get_wol                = bnx2x_get_wol,
9198         .set_wol                = bnx2x_set_wol,
9199         .get_msglevel           = bnx2x_get_msglevel,
9200         .set_msglevel           = bnx2x_set_msglevel,
9201         .nway_reset             = bnx2x_nway_reset,
9202         .get_link               = ethtool_op_get_link,
9203         .get_eeprom_len         = bnx2x_get_eeprom_len,
9204         .get_eeprom             = bnx2x_get_eeprom,
9205         .set_eeprom             = bnx2x_set_eeprom,
9206         .get_coalesce           = bnx2x_get_coalesce,
9207         .set_coalesce           = bnx2x_set_coalesce,
9208         .get_ringparam          = bnx2x_get_ringparam,
9209         .set_ringparam          = bnx2x_set_ringparam,
9210         .get_pauseparam         = bnx2x_get_pauseparam,
9211         .set_pauseparam         = bnx2x_set_pauseparam,
9212         .get_rx_csum            = bnx2x_get_rx_csum,
9213         .set_rx_csum            = bnx2x_set_rx_csum,
9214         .get_tx_csum            = ethtool_op_get_tx_csum,
9215         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9216         .set_flags              = bnx2x_set_flags,
9217         .get_flags              = ethtool_op_get_flags,
9218         .get_sg                 = ethtool_op_get_sg,
9219         .set_sg                 = ethtool_op_set_sg,
9220         .get_tso                = ethtool_op_get_tso,
9221         .set_tso                = bnx2x_set_tso,
9222         .self_test_count        = bnx2x_self_test_count,
9223         .self_test              = bnx2x_self_test,
9224         .get_strings            = bnx2x_get_strings,
9225         .phys_id                = bnx2x_phys_id,
9226         .get_stats_count        = bnx2x_get_stats_count,
9227         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9228 };
9229
9230 /* end of ethtool_ops */
9231
9232 /****************************************************************************
9233 * General service functions
9234 ****************************************************************************/
9235
9236 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9237 {
9238         u16 pmcsr;
9239
9240         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9241
9242         switch (state) {
9243         case PCI_D0:
9244                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9245                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9246                                        PCI_PM_CTRL_PME_STATUS));
9247
9248                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9249                         /* delay required during transition out of D3hot */
9250                         msleep(20);
9251                 break;
9252
9253         case PCI_D3hot:
9254                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9255                 pmcsr |= 3;
9256
9257                 if (bp->wol)
9258                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9259
9260                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9261                                       pmcsr);
9262
9263                 /* No more memory access after this point until
9264                 * device is brought back to D0.
9265                 */
9266                 break;
9267
9268         default:
9269                 return -EINVAL;
9270         }
9271         return 0;
9272 }
9273
9274 /*
9275  * net_device service functions
9276  */
9277
9278 static int bnx2x_poll(struct napi_struct *napi, int budget)
9279 {
9280         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9281                                                  napi);
9282         struct bnx2x *bp = fp->bp;
9283         int work_done = 0;
9284         u16 rx_cons_sb;
9285
9286 #ifdef BNX2X_STOP_ON_ERROR
9287         if (unlikely(bp->panic))
9288                 goto poll_panic;
9289 #endif
9290
9291         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9292         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9293         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9294
9295         bnx2x_update_fpsb_idx(fp);
9296
9297         if (BNX2X_HAS_TX_WORK(fp))
9298                 bnx2x_tx_int(fp, budget);
9299
9300         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9301         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9302                 rx_cons_sb++;
9303         if (BNX2X_HAS_RX_WORK(fp))
9304                 work_done = bnx2x_rx_int(fp, budget);
9305
9306         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9307         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9308         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9309                 rx_cons_sb++;
9310
9311         /* must not complete if we consumed full budget */
9312         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9313
9314 #ifdef BNX2X_STOP_ON_ERROR
9315 poll_panic:
9316 #endif
9317                 netif_rx_complete(napi);
9318
9319                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9320                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9321                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9322                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9323         }
9324         return work_done;
9325 }
9326
9327
9328 /* we split the first BD into headers and data BDs
9329  * to ease the pain of our fellow microcode engineers
9330  * we use one mapping for both BDs
9331  * So far this has only been observed to happen
9332  * in Other Operating Systems(TM)
9333  */
9334 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9335                                    struct bnx2x_fastpath *fp,
9336                                    struct eth_tx_bd **tx_bd, u16 hlen,
9337                                    u16 bd_prod, int nbd)
9338 {
9339         struct eth_tx_bd *h_tx_bd = *tx_bd;
9340         struct eth_tx_bd *d_tx_bd;
9341         dma_addr_t mapping;
9342         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9343
9344         /* first fix first BD */
9345         h_tx_bd->nbd = cpu_to_le16(nbd);
9346         h_tx_bd->nbytes = cpu_to_le16(hlen);
9347
9348         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9349            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9350            h_tx_bd->addr_lo, h_tx_bd->nbd);
9351
9352         /* now get a new data BD
9353          * (after the pbd) and fill it */
9354         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9355         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9356
9357         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9358                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9359
9360         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9361         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9362         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9363         d_tx_bd->vlan = 0;
9364         /* this marks the BD as one that has no individual mapping
9365          * the FW ignores this flag in a BD not marked start
9366          */
9367         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9368         DP(NETIF_MSG_TX_QUEUED,
9369            "TSO split data size is %d (%x:%x)\n",
9370            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9371
9372         /* update tx_bd for marking the last BD flag */
9373         *tx_bd = d_tx_bd;
9374
9375         return bd_prod;
9376 }
9377
9378 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9379 {
9380         if (fix > 0)
9381                 csum = (u16) ~csum_fold(csum_sub(csum,
9382                                 csum_partial(t_header - fix, fix, 0)));
9383
9384         else if (fix < 0)
9385                 csum = (u16) ~csum_fold(csum_add(csum,
9386                                 csum_partial(t_header, -fix, 0)));
9387
9388         return swab16(csum);
9389 }
9390
9391 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9392 {
9393         u32 rc;
9394
9395         if (skb->ip_summed != CHECKSUM_PARTIAL)
9396                 rc = XMIT_PLAIN;
9397
9398         else {
9399                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9400                         rc = XMIT_CSUM_V6;
9401                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9402                                 rc |= XMIT_CSUM_TCP;
9403
9404                 } else {
9405                         rc = XMIT_CSUM_V4;
9406                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9407                                 rc |= XMIT_CSUM_TCP;
9408                 }
9409         }
9410
9411         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9412                 rc |= XMIT_GSO_V4;
9413
9414         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9415                 rc |= XMIT_GSO_V6;
9416
9417         return rc;
9418 }
9419
9420 /* check if packet requires linearization (packet is too fragmented) */
9421 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9422                              u32 xmit_type)
9423 {
9424         int to_copy = 0;
9425         int hlen = 0;
9426         int first_bd_sz = 0;
9427
9428         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9429         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9430
9431                 if (xmit_type & XMIT_GSO) {
9432                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9433                         /* Check if LSO packet needs to be copied:
9434                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9435                         int wnd_size = MAX_FETCH_BD - 3;
9436                         /* Number of windows to check */
9437                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9438                         int wnd_idx = 0;
9439                         int frag_idx = 0;
9440                         u32 wnd_sum = 0;
9441
9442                         /* Headers length */
9443                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9444                                 tcp_hdrlen(skb);
9445
9446                         /* Amount of data (w/o headers) on linear part of SKB*/
9447                         first_bd_sz = skb_headlen(skb) - hlen;
9448
9449                         wnd_sum  = first_bd_sz;
9450
9451                         /* Calculate the first sum - it's special */
9452                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9453                                 wnd_sum +=
9454                                         skb_shinfo(skb)->frags[frag_idx].size;
9455
9456                         /* If there was data on linear skb data - check it */
9457                         if (first_bd_sz > 0) {
9458                                 if (unlikely(wnd_sum < lso_mss)) {
9459                                         to_copy = 1;
9460                                         goto exit_lbl;
9461                                 }
9462
9463                                 wnd_sum -= first_bd_sz;
9464                         }
9465
9466                         /* Others are easier: run through the frag list and
9467                            check all windows */
9468                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9469                                 wnd_sum +=
9470                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9471
9472                                 if (unlikely(wnd_sum < lso_mss)) {
9473                                         to_copy = 1;
9474                                         break;
9475                                 }
9476                                 wnd_sum -=
9477                                         skb_shinfo(skb)->frags[wnd_idx].size;
9478                         }
9479
9480                 } else {
9481                         /* in non-LSO too fragmented packet should always
9482                            be linearized */
9483                         to_copy = 1;
9484                 }
9485         }
9486
9487 exit_lbl:
9488         if (unlikely(to_copy))
9489                 DP(NETIF_MSG_TX_QUEUED,
9490                    "Linearization IS REQUIRED for %s packet. "
9491                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9492                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9493                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9494
9495         return to_copy;
9496 }
9497
9498 /* called with netif_tx_lock
9499  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9500  * netif_wake_queue()
9501  */
9502 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9503 {
9504         struct bnx2x *bp = netdev_priv(dev);
9505         struct bnx2x_fastpath *fp;
9506         struct sw_tx_bd *tx_buf;
9507         struct eth_tx_bd *tx_bd;
9508         struct eth_tx_parse_bd *pbd = NULL;
9509         u16 pkt_prod, bd_prod;
9510         int nbd, fp_index;
9511         dma_addr_t mapping;
9512         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9513         int vlan_off = (bp->e1hov ? 4 : 0);
9514         int i;
9515         u8 hlen = 0;
9516
9517 #ifdef BNX2X_STOP_ON_ERROR
9518         if (unlikely(bp->panic))
9519                 return NETDEV_TX_BUSY;
9520 #endif
9521
9522         fp_index = (smp_processor_id() % bp->num_queues);
9523         fp = &bp->fp[fp_index];
9524
9525         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9526                 bp->eth_stats.driver_xoff++,
9527                 netif_stop_queue(dev);
9528                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9529                 return NETDEV_TX_BUSY;
9530         }
9531
9532         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9533            "  gso type %x  xmit_type %x\n",
9534            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9535            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9536
9537         /* First, check if we need to linearize the skb
9538            (due to FW restrictions) */
9539         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9540                 /* Statistics of linearization */
9541                 bp->lin_cnt++;
9542                 if (skb_linearize(skb) != 0) {
9543                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9544                            "silently dropping this SKB\n");
9545                         dev_kfree_skb_any(skb);
9546                         return NETDEV_TX_OK;
9547                 }
9548         }
9549
9550         /*
9551         Please read carefully. First we use one BD which we mark as start,
9552         then for TSO or xsum we have a parsing info BD,
9553         and only then we have the rest of the TSO BDs.
9554         (don't forget to mark the last one as last,
9555         and to unmap only AFTER you write to the BD ...)
9556         And above all, all pdb sizes are in words - NOT DWORDS!
9557         */
9558
9559         pkt_prod = fp->tx_pkt_prod++;
9560         bd_prod = TX_BD(fp->tx_bd_prod);
9561
9562         /* get a tx_buf and first BD */
9563         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9564         tx_bd = &fp->tx_desc_ring[bd_prod];
9565
9566         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9567         tx_bd->general_data = (UNICAST_ADDRESS <<
9568                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9569         /* header nbd */
9570         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9571
9572         /* remember the first BD of the packet */
9573         tx_buf->first_bd = fp->tx_bd_prod;
9574         tx_buf->skb = skb;
9575
9576         DP(NETIF_MSG_TX_QUEUED,
9577            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9578            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9579
9580 #ifdef BCM_VLAN
9581         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9582             (bp->flags & HW_VLAN_TX_FLAG)) {
9583                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9584                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9585                 vlan_off += 4;
9586         } else
9587 #endif
9588                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9589
9590         if (xmit_type) {
9591                 /* turn on parsing and get a BD */
9592                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9593                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9594
9595                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9596         }
9597
9598         if (xmit_type & XMIT_CSUM) {
9599                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9600
9601                 /* for now NS flag is not used in Linux */
9602                 pbd->global_data = (hlen |
9603                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9604                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9605
9606                 pbd->ip_hlen = (skb_transport_header(skb) -
9607                                 skb_network_header(skb)) / 2;
9608
9609                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9610
9611                 pbd->total_hlen = cpu_to_le16(hlen);
9612                 hlen = hlen*2 - vlan_off;
9613
9614                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9615
9616                 if (xmit_type & XMIT_CSUM_V4)
9617                         tx_bd->bd_flags.as_bitfield |=
9618                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9619                 else
9620                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9621
9622                 if (xmit_type & XMIT_CSUM_TCP) {
9623                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9624
9625                 } else {
9626                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9627
9628                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9629                         pbd->cs_offset = fix / 2;
9630
9631                         DP(NETIF_MSG_TX_QUEUED,
9632                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9633                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9634                            SKB_CS(skb));
9635
9636                         /* HW bug: fixup the CSUM */
9637                         pbd->tcp_pseudo_csum =
9638                                 bnx2x_csum_fix(skb_transport_header(skb),
9639                                                SKB_CS(skb), fix);
9640
9641                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9642                            pbd->tcp_pseudo_csum);
9643                 }
9644         }
9645
9646         mapping = pci_map_single(bp->pdev, skb->data,
9647                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9648
9649         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9650         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9651         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9652         tx_bd->nbd = cpu_to_le16(nbd);
9653         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9654
9655         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9656            "  nbytes %d  flags %x  vlan %x\n",
9657            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9658            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9659            le16_to_cpu(tx_bd->vlan));
9660
9661         if (xmit_type & XMIT_GSO) {
9662
9663                 DP(NETIF_MSG_TX_QUEUED,
9664                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9665                    skb->len, hlen, skb_headlen(skb),
9666                    skb_shinfo(skb)->gso_size);
9667
9668                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9669
9670                 if (unlikely(skb_headlen(skb) > hlen))
9671                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9672                                                  bd_prod, ++nbd);
9673
9674                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9675                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9676                 pbd->tcp_flags = pbd_tcp_flags(skb);
9677
9678                 if (xmit_type & XMIT_GSO_V4) {
9679                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9680                         pbd->tcp_pseudo_csum =
9681                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9682                                                           ip_hdr(skb)->daddr,
9683                                                           0, IPPROTO_TCP, 0));
9684
9685                 } else
9686                         pbd->tcp_pseudo_csum =
9687                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9688                                                         &ipv6_hdr(skb)->daddr,
9689                                                         0, IPPROTO_TCP, 0));
9690
9691                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9692         }
9693
9694         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9695                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9696
9697                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9698                 tx_bd = &fp->tx_desc_ring[bd_prod];
9699
9700                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9701                                        frag->size, PCI_DMA_TODEVICE);
9702
9703                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9704                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9705                 tx_bd->nbytes = cpu_to_le16(frag->size);
9706                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9707                 tx_bd->bd_flags.as_bitfield = 0;
9708
9709                 DP(NETIF_MSG_TX_QUEUED,
9710                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9711                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9712                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9713         }
9714
9715         /* now at last mark the BD as the last BD */
9716         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9717
9718         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9719            tx_bd, tx_bd->bd_flags.as_bitfield);
9720
9721         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9722
9723         /* now send a tx doorbell, counting the next BD
9724          * if the packet contains or ends with it
9725          */
9726         if (TX_BD_POFF(bd_prod) < nbd)
9727                 nbd++;
9728
9729         if (pbd)
9730                 DP(NETIF_MSG_TX_QUEUED,
9731                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9732                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9733                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9734                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9735                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9736
9737         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9738
9739         /*
9740          * Make sure that the BD data is updated before updating the producer
9741          * since FW might read the BD right after the producer is updated.
9742          * This is only applicable for weak-ordered memory model archs such
9743          * as IA-64. The following barrier is also mandatory since FW will
9744          * assumes packets must have BDs.
9745          */
9746         wmb();
9747
9748         fp->hw_tx_prods->bds_prod =
9749                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9750         mb(); /* FW restriction: must not reorder writing nbd and packets */
9751         fp->hw_tx_prods->packets_prod =
9752                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9753         DOORBELL(bp, FP_IDX(fp), 0);
9754
9755         mmiowb();
9756
9757         fp->tx_bd_prod += nbd;
9758         dev->trans_start = jiffies;
9759
9760         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9761                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9762                    if we put Tx into XOFF state. */
9763                 smp_mb();
9764                 netif_stop_queue(dev);
9765                 bp->eth_stats.driver_xoff++;
9766                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9767                         netif_wake_queue(dev);
9768         }
9769         fp->tx_pkt++;
9770
9771         return NETDEV_TX_OK;
9772 }
9773
9774 /* called with rtnl_lock */
9775 static int bnx2x_open(struct net_device *dev)
9776 {
9777         struct bnx2x *bp = netdev_priv(dev);
9778
9779         bnx2x_set_power_state(bp, PCI_D0);
9780
9781         return bnx2x_nic_load(bp, LOAD_OPEN);
9782 }
9783
9784 /* called with rtnl_lock */
9785 static int bnx2x_close(struct net_device *dev)
9786 {
9787         struct bnx2x *bp = netdev_priv(dev);
9788
9789         /* Unload the driver, release IRQs */
9790         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9791         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9792                 if (!CHIP_REV_IS_SLOW(bp))
9793                         bnx2x_set_power_state(bp, PCI_D3hot);
9794
9795         return 0;
9796 }
9797
9798 /* called with netif_tx_lock from set_multicast */
9799 static void bnx2x_set_rx_mode(struct net_device *dev)
9800 {
9801         struct bnx2x *bp = netdev_priv(dev);
9802         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9803         int port = BP_PORT(bp);
9804
9805         if (bp->state != BNX2X_STATE_OPEN) {
9806                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9807                 return;
9808         }
9809
9810         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9811
9812         if (dev->flags & IFF_PROMISC)
9813                 rx_mode = BNX2X_RX_MODE_PROMISC;
9814
9815         else if ((dev->flags & IFF_ALLMULTI) ||
9816                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9817                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9818
9819         else { /* some multicasts */
9820                 if (CHIP_IS_E1(bp)) {
9821                         int i, old, offset;
9822                         struct dev_mc_list *mclist;
9823                         struct mac_configuration_cmd *config =
9824                                                 bnx2x_sp(bp, mcast_config);
9825
9826                         for (i = 0, mclist = dev->mc_list;
9827                              mclist && (i < dev->mc_count);
9828                              i++, mclist = mclist->next) {
9829
9830                                 config->config_table[i].
9831                                         cam_entry.msb_mac_addr =
9832                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9833                                 config->config_table[i].
9834                                         cam_entry.middle_mac_addr =
9835                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9836                                 config->config_table[i].
9837                                         cam_entry.lsb_mac_addr =
9838                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9839                                 config->config_table[i].cam_entry.flags =
9840                                                         cpu_to_le16(port);
9841                                 config->config_table[i].
9842                                         target_table_entry.flags = 0;
9843                                 config->config_table[i].
9844                                         target_table_entry.client_id = 0;
9845                                 config->config_table[i].
9846                                         target_table_entry.vlan_id = 0;
9847
9848                                 DP(NETIF_MSG_IFUP,
9849                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9850                                    config->config_table[i].
9851                                                 cam_entry.msb_mac_addr,
9852                                    config->config_table[i].
9853                                                 cam_entry.middle_mac_addr,
9854                                    config->config_table[i].
9855                                                 cam_entry.lsb_mac_addr);
9856                         }
9857                         old = config->hdr.length_6b;
9858                         if (old > i) {
9859                                 for (; i < old; i++) {
9860                                         if (CAM_IS_INVALID(config->
9861                                                            config_table[i])) {
9862                                                 i--; /* already invalidated */
9863                                                 break;
9864                                         }
9865                                         /* invalidate */
9866                                         CAM_INVALIDATE(config->
9867                                                        config_table[i]);
9868                                 }
9869                         }
9870
9871                         if (CHIP_REV_IS_SLOW(bp))
9872                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9873                         else
9874                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9875
9876                         config->hdr.length_6b = i;
9877                         config->hdr.offset = offset;
9878                         config->hdr.client_id = BP_CL_ID(bp);
9879                         config->hdr.reserved1 = 0;
9880
9881                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9882                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9883                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9884                                       0);
9885                 } else { /* E1H */
9886                         /* Accept one or more multicasts */
9887                         struct dev_mc_list *mclist;
9888                         u32 mc_filter[MC_HASH_SIZE];
9889                         u32 crc, bit, regidx;
9890                         int i;
9891
9892                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9893
9894                         for (i = 0, mclist = dev->mc_list;
9895                              mclist && (i < dev->mc_count);
9896                              i++, mclist = mclist->next) {
9897
9898                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9899                                    mclist->dmi_addr);
9900
9901                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9902                                 bit = (crc >> 24) & 0xff;
9903                                 regidx = bit >> 5;
9904                                 bit &= 0x1f;
9905                                 mc_filter[regidx] |= (1 << bit);
9906                         }
9907
9908                         for (i = 0; i < MC_HASH_SIZE; i++)
9909                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9910                                        mc_filter[i]);
9911                 }
9912         }
9913
9914         bp->rx_mode = rx_mode;
9915         bnx2x_set_storm_rx_mode(bp);
9916 }
9917
9918 /* called with rtnl_lock */
9919 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9920 {
9921         struct sockaddr *addr = p;
9922         struct bnx2x *bp = netdev_priv(dev);
9923
9924         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9925                 return -EINVAL;
9926
9927         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9928         if (netif_running(dev)) {
9929                 if (CHIP_IS_E1(bp))
9930                         bnx2x_set_mac_addr_e1(bp, 1);
9931                 else
9932                         bnx2x_set_mac_addr_e1h(bp, 1);
9933         }
9934
9935         return 0;
9936 }
9937
9938 /* called with rtnl_lock */
9939 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9940 {
9941         struct mii_ioctl_data *data = if_mii(ifr);
9942         struct bnx2x *bp = netdev_priv(dev);
9943         int port = BP_PORT(bp);
9944         int err;
9945
9946         switch (cmd) {
9947         case SIOCGMIIPHY:
9948                 data->phy_id = bp->port.phy_addr;
9949
9950                 /* fallthrough */
9951
9952         case SIOCGMIIREG: {
9953                 u16 mii_regval;
9954
9955                 if (!netif_running(dev))
9956                         return -EAGAIN;
9957
9958                 mutex_lock(&bp->port.phy_mutex);
9959                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9960                                       DEFAULT_PHY_DEV_ADDR,
9961                                       (data->reg_num & 0x1f), &mii_regval);
9962                 data->val_out = mii_regval;
9963                 mutex_unlock(&bp->port.phy_mutex);
9964                 return err;
9965         }
9966
9967         case SIOCSMIIREG:
9968                 if (!capable(CAP_NET_ADMIN))
9969                         return -EPERM;
9970
9971                 if (!netif_running(dev))
9972                         return -EAGAIN;
9973
9974                 mutex_lock(&bp->port.phy_mutex);
9975                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9976                                        DEFAULT_PHY_DEV_ADDR,
9977                                        (data->reg_num & 0x1f), data->val_in);
9978                 mutex_unlock(&bp->port.phy_mutex);
9979                 return err;
9980
9981         default:
9982                 /* do nothing */
9983                 break;
9984         }
9985
9986         return -EOPNOTSUPP;
9987 }
9988
9989 /* called with rtnl_lock */
9990 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9991 {
9992         struct bnx2x *bp = netdev_priv(dev);
9993         int rc = 0;
9994
9995         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9996             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9997                 return -EINVAL;
9998
9999         /* This does not race with packet allocation
10000          * because the actual alloc size is
10001          * only updated as part of load
10002          */
10003         dev->mtu = new_mtu;
10004
10005         if (netif_running(dev)) {
10006                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10007                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10008         }
10009
10010         return rc;
10011 }
10012
10013 static void bnx2x_tx_timeout(struct net_device *dev)
10014 {
10015         struct bnx2x *bp = netdev_priv(dev);
10016
10017 #ifdef BNX2X_STOP_ON_ERROR
10018         if (!bp->panic)
10019                 bnx2x_panic();
10020 #endif
10021         /* This allows the netif to be shutdown gracefully before resetting */
10022         schedule_work(&bp->reset_task);
10023 }
10024
10025 #ifdef BCM_VLAN
10026 /* called with rtnl_lock */
10027 static void bnx2x_vlan_rx_register(struct net_device *dev,
10028                                    struct vlan_group *vlgrp)
10029 {
10030         struct bnx2x *bp = netdev_priv(dev);
10031
10032         bp->vlgrp = vlgrp;
10033
10034         /* Set flags according to the required capabilities */
10035         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10036
10037         if (dev->features & NETIF_F_HW_VLAN_TX)
10038                 bp->flags |= HW_VLAN_TX_FLAG;
10039
10040         if (dev->features & NETIF_F_HW_VLAN_RX)
10041                 bp->flags |= HW_VLAN_RX_FLAG;
10042
10043         if (netif_running(dev))
10044                 bnx2x_set_client_config(bp);
10045 }
10046
10047 #endif
10048
10049 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10050 static void poll_bnx2x(struct net_device *dev)
10051 {
10052         struct bnx2x *bp = netdev_priv(dev);
10053
10054         disable_irq(bp->pdev->irq);
10055         bnx2x_interrupt(bp->pdev->irq, dev);
10056         enable_irq(bp->pdev->irq);
10057 }
10058 #endif
10059
10060 static const struct net_device_ops bnx2x_netdev_ops = {
10061         .ndo_open               = bnx2x_open,
10062         .ndo_stop               = bnx2x_close,
10063         .ndo_start_xmit         = bnx2x_start_xmit,
10064         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10065         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10066         .ndo_validate_addr      = eth_validate_addr,
10067         .ndo_do_ioctl           = bnx2x_ioctl,
10068         .ndo_change_mtu         = bnx2x_change_mtu,
10069         .ndo_tx_timeout         = bnx2x_tx_timeout,
10070 #ifdef BCM_VLAN
10071         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10072 #endif
10073 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10074         .ndo_poll_controller    = poll_bnx2x,
10075 #endif
10076 };
10077
10078
10079 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10080                                     struct net_device *dev)
10081 {
10082         struct bnx2x *bp;
10083         int rc;
10084
10085         SET_NETDEV_DEV(dev, &pdev->dev);
10086         bp = netdev_priv(dev);
10087
10088         bp->dev = dev;
10089         bp->pdev = pdev;
10090         bp->flags = 0;
10091         bp->func = PCI_FUNC(pdev->devfn);
10092
10093         rc = pci_enable_device(pdev);
10094         if (rc) {
10095                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10096                 goto err_out;
10097         }
10098
10099         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10100                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10101                        " aborting\n");
10102                 rc = -ENODEV;
10103                 goto err_out_disable;
10104         }
10105
10106         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10107                 printk(KERN_ERR PFX "Cannot find second PCI device"
10108                        " base address, aborting\n");
10109                 rc = -ENODEV;
10110                 goto err_out_disable;
10111         }
10112
10113         if (atomic_read(&pdev->enable_cnt) == 1) {
10114                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10115                 if (rc) {
10116                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10117                                " aborting\n");
10118                         goto err_out_disable;
10119                 }
10120
10121                 pci_set_master(pdev);
10122                 pci_save_state(pdev);
10123         }
10124
10125         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10126         if (bp->pm_cap == 0) {
10127                 printk(KERN_ERR PFX "Cannot find power management"
10128                        " capability, aborting\n");
10129                 rc = -EIO;
10130                 goto err_out_release;
10131         }
10132
10133         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10134         if (bp->pcie_cap == 0) {
10135                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10136                        " aborting\n");
10137                 rc = -EIO;
10138                 goto err_out_release;
10139         }
10140
10141         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10142                 bp->flags |= USING_DAC_FLAG;
10143                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10144                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10145                                " failed, aborting\n");
10146                         rc = -EIO;
10147                         goto err_out_release;
10148                 }
10149
10150         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10151                 printk(KERN_ERR PFX "System does not support DMA,"
10152                        " aborting\n");
10153                 rc = -EIO;
10154                 goto err_out_release;
10155         }
10156
10157         dev->mem_start = pci_resource_start(pdev, 0);
10158         dev->base_addr = dev->mem_start;
10159         dev->mem_end = pci_resource_end(pdev, 0);
10160
10161         dev->irq = pdev->irq;
10162
10163         bp->regview = pci_ioremap_bar(pdev, 0);
10164         if (!bp->regview) {
10165                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10166                 rc = -ENOMEM;
10167                 goto err_out_release;
10168         }
10169
10170         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10171                                         min_t(u64, BNX2X_DB_SIZE,
10172                                               pci_resource_len(pdev, 2)));
10173         if (!bp->doorbells) {
10174                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10175                 rc = -ENOMEM;
10176                 goto err_out_unmap;
10177         }
10178
10179         bnx2x_set_power_state(bp, PCI_D0);
10180
10181         /* clean indirect addresses */
10182         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10183                                PCICFG_VENDOR_ID_OFFSET);
10184         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10185         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10186         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10187         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10188
10189         dev->watchdog_timeo = TX_TIMEOUT;
10190
10191         dev->netdev_ops = &bnx2x_netdev_ops;
10192         dev->ethtool_ops = &bnx2x_ethtool_ops;
10193         dev->features |= NETIF_F_SG;
10194         dev->features |= NETIF_F_HW_CSUM;
10195         if (bp->flags & USING_DAC_FLAG)
10196                 dev->features |= NETIF_F_HIGHDMA;
10197 #ifdef BCM_VLAN
10198         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10199         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10200 #endif
10201         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10202         dev->features |= NETIF_F_TSO6;
10203
10204         return 0;
10205
10206 err_out_unmap:
10207         if (bp->regview) {
10208                 iounmap(bp->regview);
10209                 bp->regview = NULL;
10210         }
10211         if (bp->doorbells) {
10212                 iounmap(bp->doorbells);
10213                 bp->doorbells = NULL;
10214         }
10215
10216 err_out_release:
10217         if (atomic_read(&pdev->enable_cnt) == 1)
10218                 pci_release_regions(pdev);
10219
10220 err_out_disable:
10221         pci_disable_device(pdev);
10222         pci_set_drvdata(pdev, NULL);
10223
10224 err_out:
10225         return rc;
10226 }
10227
10228 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10229 {
10230         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10231
10232         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10233         return val;
10234 }
10235
10236 /* return value of 1=2.5GHz 2=5GHz */
10237 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10238 {
10239         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10240
10241         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10242         return val;
10243 }
10244
10245 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10246                                     const struct pci_device_id *ent)
10247 {
10248         static int version_printed;
10249         struct net_device *dev = NULL;
10250         struct bnx2x *bp;
10251         int rc;
10252
10253         if (version_printed++ == 0)
10254                 printk(KERN_INFO "%s", version);
10255
10256         /* dev zeroed in init_etherdev */
10257         dev = alloc_etherdev(sizeof(*bp));
10258         if (!dev) {
10259                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10260                 return -ENOMEM;
10261         }
10262
10263         bp = netdev_priv(dev);
10264         bp->msglevel = debug;
10265
10266         rc = bnx2x_init_dev(pdev, dev);
10267         if (rc < 0) {
10268                 free_netdev(dev);
10269                 return rc;
10270         }
10271
10272         rc = register_netdev(dev);
10273         if (rc) {
10274                 dev_err(&pdev->dev, "Cannot register net device\n");
10275                 goto init_one_exit;
10276         }
10277
10278         pci_set_drvdata(pdev, dev);
10279
10280         rc = bnx2x_init_bp(bp);
10281         if (rc) {
10282                 unregister_netdev(dev);
10283                 goto init_one_exit;
10284         }
10285
10286         netif_carrier_off(dev);
10287
10288         bp->common.name = board_info[ent->driver_data].name;
10289         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10290                " IRQ %d, ", dev->name, bp->common.name,
10291                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10292                bnx2x_get_pcie_width(bp),
10293                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10294                dev->base_addr, bp->pdev->irq);
10295         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10296         return 0;
10297
10298 init_one_exit:
10299         if (bp->regview)
10300                 iounmap(bp->regview);
10301
10302         if (bp->doorbells)
10303                 iounmap(bp->doorbells);
10304
10305         free_netdev(dev);
10306
10307         if (atomic_read(&pdev->enable_cnt) == 1)
10308                 pci_release_regions(pdev);
10309
10310         pci_disable_device(pdev);
10311         pci_set_drvdata(pdev, NULL);
10312
10313         return rc;
10314 }
10315
10316 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10317 {
10318         struct net_device *dev = pci_get_drvdata(pdev);
10319         struct bnx2x *bp;
10320
10321         if (!dev) {
10322                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10323                 return;
10324         }
10325         bp = netdev_priv(dev);
10326
10327         unregister_netdev(dev);
10328
10329         if (bp->regview)
10330                 iounmap(bp->regview);
10331
10332         if (bp->doorbells)
10333                 iounmap(bp->doorbells);
10334
10335         free_netdev(dev);
10336
10337         if (atomic_read(&pdev->enable_cnt) == 1)
10338                 pci_release_regions(pdev);
10339
10340         pci_disable_device(pdev);
10341         pci_set_drvdata(pdev, NULL);
10342 }
10343
10344 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10345 {
10346         struct net_device *dev = pci_get_drvdata(pdev);
10347         struct bnx2x *bp;
10348
10349         if (!dev) {
10350                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10351                 return -ENODEV;
10352         }
10353         bp = netdev_priv(dev);
10354
10355         rtnl_lock();
10356
10357         pci_save_state(pdev);
10358
10359         if (!netif_running(dev)) {
10360                 rtnl_unlock();
10361                 return 0;
10362         }
10363
10364         netif_device_detach(dev);
10365
10366         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10367
10368         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10369
10370         rtnl_unlock();
10371
10372         return 0;
10373 }
10374
10375 static int bnx2x_resume(struct pci_dev *pdev)
10376 {
10377         struct net_device *dev = pci_get_drvdata(pdev);
10378         struct bnx2x *bp;
10379         int rc;
10380
10381         if (!dev) {
10382                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10383                 return -ENODEV;
10384         }
10385         bp = netdev_priv(dev);
10386
10387         rtnl_lock();
10388
10389         pci_restore_state(pdev);
10390
10391         if (!netif_running(dev)) {
10392                 rtnl_unlock();
10393                 return 0;
10394         }
10395
10396         bnx2x_set_power_state(bp, PCI_D0);
10397         netif_device_attach(dev);
10398
10399         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10400
10401         rtnl_unlock();
10402
10403         return rc;
10404 }
10405
10406 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10407 {
10408         int i;
10409
10410         bp->state = BNX2X_STATE_ERROR;
10411
10412         bp->rx_mode = BNX2X_RX_MODE_NONE;
10413
10414         bnx2x_netif_stop(bp, 0);
10415
10416         del_timer_sync(&bp->timer);
10417         bp->stats_state = STATS_STATE_DISABLED;
10418         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10419
10420         /* Release IRQs */
10421         bnx2x_free_irq(bp);
10422
10423         if (CHIP_IS_E1(bp)) {
10424                 struct mac_configuration_cmd *config =
10425                                                 bnx2x_sp(bp, mcast_config);
10426
10427                 for (i = 0; i < config->hdr.length_6b; i++)
10428                         CAM_INVALIDATE(config->config_table[i]);
10429         }
10430
10431         /* Free SKBs, SGEs, TPA pool and driver internals */
10432         bnx2x_free_skbs(bp);
10433         for_each_queue(bp, i)
10434                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10435         bnx2x_free_mem(bp);
10436
10437         bp->state = BNX2X_STATE_CLOSED;
10438
10439         netif_carrier_off(bp->dev);
10440
10441         return 0;
10442 }
10443
10444 static void bnx2x_eeh_recover(struct bnx2x *bp)
10445 {
10446         u32 val;
10447
10448         mutex_init(&bp->port.phy_mutex);
10449
10450         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10451         bp->link_params.shmem_base = bp->common.shmem_base;
10452         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10453
10454         if (!bp->common.shmem_base ||
10455             (bp->common.shmem_base < 0xA0000) ||
10456             (bp->common.shmem_base >= 0xC0000)) {
10457                 BNX2X_DEV_INFO("MCP not active\n");
10458                 bp->flags |= NO_MCP_FLAG;
10459                 return;
10460         }
10461
10462         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10463         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10464                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10465                 BNX2X_ERR("BAD MCP validity signature\n");
10466
10467         if (!BP_NOMCP(bp)) {
10468                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10469                               & DRV_MSG_SEQ_NUMBER_MASK);
10470                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10471         }
10472 }
10473
10474 /**
10475  * bnx2x_io_error_detected - called when PCI error is detected
10476  * @pdev: Pointer to PCI device
10477  * @state: The current pci connection state
10478  *
10479  * This function is called after a PCI bus error affecting
10480  * this device has been detected.
10481  */
10482 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10483                                                 pci_channel_state_t state)
10484 {
10485         struct net_device *dev = pci_get_drvdata(pdev);
10486         struct bnx2x *bp = netdev_priv(dev);
10487
10488         rtnl_lock();
10489
10490         netif_device_detach(dev);
10491
10492         if (netif_running(dev))
10493                 bnx2x_eeh_nic_unload(bp);
10494
10495         pci_disable_device(pdev);
10496
10497         rtnl_unlock();
10498
10499         /* Request a slot reset */
10500         return PCI_ERS_RESULT_NEED_RESET;
10501 }
10502
10503 /**
10504  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10505  * @pdev: Pointer to PCI device
10506  *
10507  * Restart the card from scratch, as if from a cold-boot.
10508  */
10509 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10510 {
10511         struct net_device *dev = pci_get_drvdata(pdev);
10512         struct bnx2x *bp = netdev_priv(dev);
10513
10514         rtnl_lock();
10515
10516         if (pci_enable_device(pdev)) {
10517                 dev_err(&pdev->dev,
10518                         "Cannot re-enable PCI device after reset\n");
10519                 rtnl_unlock();
10520                 return PCI_ERS_RESULT_DISCONNECT;
10521         }
10522
10523         pci_set_master(pdev);
10524         pci_restore_state(pdev);
10525
10526         if (netif_running(dev))
10527                 bnx2x_set_power_state(bp, PCI_D0);
10528
10529         rtnl_unlock();
10530
10531         return PCI_ERS_RESULT_RECOVERED;
10532 }
10533
10534 /**
10535  * bnx2x_io_resume - called when traffic can start flowing again
10536  * @pdev: Pointer to PCI device
10537  *
10538  * This callback is called when the error recovery driver tells us that
10539  * its OK to resume normal operation.
10540  */
10541 static void bnx2x_io_resume(struct pci_dev *pdev)
10542 {
10543         struct net_device *dev = pci_get_drvdata(pdev);
10544         struct bnx2x *bp = netdev_priv(dev);
10545
10546         rtnl_lock();
10547
10548         bnx2x_eeh_recover(bp);
10549
10550         if (netif_running(dev))
10551                 bnx2x_nic_load(bp, LOAD_NORMAL);
10552
10553         netif_device_attach(dev);
10554
10555         rtnl_unlock();
10556 }
10557
10558 static struct pci_error_handlers bnx2x_err_handler = {
10559         .error_detected = bnx2x_io_error_detected,
10560         .slot_reset = bnx2x_io_slot_reset,
10561         .resume = bnx2x_io_resume,
10562 };
10563
10564 static struct pci_driver bnx2x_pci_driver = {
10565         .name        = DRV_MODULE_NAME,
10566         .id_table    = bnx2x_pci_tbl,
10567         .probe       = bnx2x_init_one,
10568         .remove      = __devexit_p(bnx2x_remove_one),
10569         .suspend     = bnx2x_suspend,
10570         .resume      = bnx2x_resume,
10571         .err_handler = &bnx2x_err_handler,
10572 };
10573
10574 static int __init bnx2x_init(void)
10575 {
10576         bnx2x_wq = create_singlethread_workqueue("bnx2x");
10577         if (bnx2x_wq == NULL) {
10578                 printk(KERN_ERR PFX "Cannot create workqueue\n");
10579                 return -ENOMEM;
10580         }
10581
10582         return pci_register_driver(&bnx2x_pci_driver);
10583 }
10584
10585 static void __exit bnx2x_cleanup(void)
10586 {
10587         pci_unregister_driver(&bnx2x_pci_driver);
10588
10589         destroy_workqueue(bnx2x_wq);
10590 }
10591
10592 module_init(bnx2x_init);
10593 module_exit(bnx2x_cleanup);
10594