]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/bnx2x_main.c
d2350dd300b279cb700c0cc69aef83098e571c5a
[linux-2.6-omap-h63xx.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.23"
61 #define DRV_MODULE_RELDATE      "2008/11/03"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_tpa;
77 static int use_inta;
78 static int poll;
79 static int debug;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 static int use_multi;
82
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
91
92 #ifdef BNX2X_MULTI
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95 #endif
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99         BCM57710 = 0,
100         BCM57711 = 1,
101         BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM57710 XGb" },
109         { "Broadcom NetXtreme II BCM57711 XGb" },
110         { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131  * locking is done by mcp
132  */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138                                PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178                       u32 len32)
179 {
180         struct dmae_command *dmae = &bp->init_dmae;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int cnt = 200;
183
184         if (!bp->dmae_ready) {
185                 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188                    "  using indirect\n", dst_addr, len32);
189                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190                 return;
191         }
192
193         mutex_lock(&bp->dmae_mutex);
194
195         memset(dmae, 0, sizeof(struct dmae_command));
196
197         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203                         DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207         dmae->src_addr_lo = U64_LO(dma_addr);
208         dmae->src_addr_hi = U64_HI(dma_addr);
209         dmae->dst_addr_lo = dst_addr >> 2;
210         dmae->dst_addr_hi = 0;
211         dmae->len = len32;
212         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_val = DMAE_COMP_VAL;
215
216         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218                     "dst_addr [%x:%08x (%08x)]\n"
219            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227         *wb_comp = 0;
228
229         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231         udelay(5);
232
233         while (*wb_comp != DMAE_COMP_VAL) {
234                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236                 if (!cnt) {
237                         BNX2X_ERR("dmae timeout!\n");
238                         break;
239                 }
240                 cnt--;
241                 /* adjust delay for emulation/FPGA */
242                 if (CHIP_REV_IS_SLOW(bp))
243                         msleep(100);
244                 else
245                         udelay(5);
246         }
247
248         mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253         struct dmae_command *dmae = &bp->init_dmae;
254         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255         int cnt = 200;
256
257         if (!bp->dmae_ready) {
258                 u32 *data = bnx2x_sp(bp, wb_data[0]);
259                 int i;
260
261                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262                    "  using indirect\n", src_addr, len32);
263                 for (i = 0; i < len32; i++)
264                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265                 return;
266         }
267
268         mutex_lock(&bp->dmae_mutex);
269
270         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271         memset(dmae, 0, sizeof(struct dmae_command));
272
273         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279                         DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283         dmae->src_addr_lo = src_addr >> 2;
284         dmae->src_addr_hi = 0;
285         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287         dmae->len = len32;
288         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_val = DMAE_COMP_VAL;
291
292         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294                     "dst_addr [%x:%08x (%08x)]\n"
295            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300         *wb_comp = 0;
301
302         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304         udelay(5);
305
306         while (*wb_comp != DMAE_COMP_VAL) {
307
308                 if (!cnt) {
309                         BNX2X_ERR("dmae timeout!\n");
310                         break;
311                 }
312                 cnt--;
313                 /* adjust delay for emulation/FPGA */
314                 if (CHIP_REV_IS_SLOW(bp))
315                         msleep(100);
316                 else
317                         udelay(5);
318         }
319         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323         mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329         u32 wb_write[2];
330
331         wb_write[0] = val_hi;
332         wb_write[1] = val_lo;
333         REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339         u32 wb_data[2];
340
341         REG_RD_DMAE(bp, reg, wb_data, 2);
342
343         return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349         char last_idx;
350         int i, rc = 0;
351         u32 row0, row1, row2, row3;
352
353         /* XSTORM */
354         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
356         if (last_idx)
357                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359         /* print the asserts */
360         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363                               XSTORM_ASSERT_LIST_OFFSET(i));
364                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373                                   " 0x%08x 0x%08x 0x%08x\n",
374                                   i, row3, row2, row1, row0);
375                         rc++;
376                 } else {
377                         break;
378                 }
379         }
380
381         /* TSTORM */
382         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
384         if (last_idx)
385                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387         /* print the asserts */
388         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391                               TSTORM_ASSERT_LIST_OFFSET(i));
392                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401                                   " 0x%08x 0x%08x 0x%08x\n",
402                                   i, row3, row2, row1, row0);
403                         rc++;
404                 } else {
405                         break;
406                 }
407         }
408
409         /* CSTORM */
410         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
412         if (last_idx)
413                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415         /* print the asserts */
416         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419                               CSTORM_ASSERT_LIST_OFFSET(i));
420                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429                                   " 0x%08x 0x%08x 0x%08x\n",
430                                   i, row3, row2, row1, row0);
431                         rc++;
432                 } else {
433                         break;
434                 }
435         }
436
437         /* USTORM */
438         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439                            USTORM_ASSERT_LIST_INDEX_OFFSET);
440         if (last_idx)
441                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443         /* print the asserts */
444         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447                               USTORM_ASSERT_LIST_OFFSET(i));
448                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
450                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
452                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457                                   " 0x%08x 0x%08x 0x%08x\n",
458                                   i, row3, row2, row1, row0);
459                         rc++;
460                 } else {
461                         break;
462                 }
463         }
464
465         return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470         u32 mark, offset;
471         u32 data[9];
472         int word;
473
474         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475         mark = ((mark + 0x3) & ~0x3);
476         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479                 for (word = 0; word < 8; word++)
480                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481                                                   offset + 4*word));
482                 data[8] = 0x0;
483                 printk(KERN_CONT "%s", (char *)data);
484         }
485         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486                 for (word = 0; word < 8; word++)
487                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488                                                   offset + 4*word));
489                 data[8] = 0x0;
490                 printk(KERN_CONT "%s", (char *)data);
491         }
492         printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497         int i;
498         u16 j, start, end;
499
500         bp->stats_state = STATS_STATE_DISABLED;
501         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503         BNX2X_ERR("begin crash dump -----------------\n");
504
505         for_each_queue(bp, i) {
506                 struct bnx2x_fastpath *fp = &bp->fp[i];
507                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516                           fp->rx_bd_prod, fp->rx_bd_cons,
517                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523                           fp->status_blk->c_status_block.status_block_index,
524                           fp->fp_u_idx,
525                           fp->status_blk->u_status_block.status_block_index,
526                           hw_prods->packets_prod, hw_prods->bds_prod);
527
528                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530                 for (j = start; j < end; j++) {
531                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534                                   sw_bd->skb, sw_bd->first_bd);
535                 }
536
537                 start = TX_BD(fp->tx_bd_cons - 10);
538                 end = TX_BD(fp->tx_bd_cons + 254);
539                 for (j = start; j < end; j++) {
540                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544                 }
545
546                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548                 for (j = start; j < end; j++) {
549                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
554                 }
555
556                 start = RX_SGE(fp->rx_sge_prod);
557                 end = RX_SGE(fp->last_max_sge);
558                 for (j = start; j < end; j++) {
559                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563                                   j, rx_sge[1], rx_sge[0], sw_page->page);
564                 }
565
566                 start = RCQ_BD(fp->rx_comp_cons - 10);
567                 end = RCQ_BD(fp->rx_comp_cons + 503);
568                 for (j = start; j < end; j++) {
569                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
573                 }
574         }
575
576         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578                   "  spq_prod_idx(%u)\n",
579                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582         bnx2x_fw_dump(bp);
583         bnx2x_mc_assert(bp);
584         BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589         int port = BP_PORT(bp);
590         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591         u32 val = REG_RD(bp, addr);
592         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594         if (msix) {
595                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598         } else {
599                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
602                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603
604                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
605                    val, port, addr, msix);
606
607                 REG_WR(bp, addr, val);
608
609                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610         }
611
612         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
613            val, port, addr, msix);
614
615         REG_WR(bp, addr, val);
616
617         if (CHIP_IS_E1H(bp)) {
618                 /* init leading/trailing edge */
619                 if (IS_E1HMF(bp)) {
620                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621                         if (bp->port.pmf)
622                                 /* enable nig attention */
623                                 val |= 0x0100;
624                 } else
625                         val = 0xffff;
626
627                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629         }
630 }
631
632 static void bnx2x_int_disable(struct bnx2x *bp)
633 {
634         int port = BP_PORT(bp);
635         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636         u32 val = REG_RD(bp, addr);
637
638         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644            val, port, addr);
645
646         REG_WR(bp, addr, val);
647         if (REG_RD(bp, addr) != val)
648                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649 }
650
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
652 {
653         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654         int i;
655
656         /* disable interrupt handling */
657         atomic_inc(&bp->intr_sem);
658         if (disable_hw)
659                 /* prevent the HW from sending interrupts */
660                 bnx2x_int_disable(bp);
661
662         /* make sure all ISRs are done */
663         if (msix) {
664                 for_each_queue(bp, i)
665                         synchronize_irq(bp->msix_table[i].vector);
666
667                 /* one more for the Slow Path IRQ */
668                 synchronize_irq(bp->msix_table[i].vector);
669         } else
670                 synchronize_irq(bp->pdev->irq);
671
672         /* make sure sp_task is not running */
673         cancel_delayed_work(&bp->sp_task);
674         flush_workqueue(bnx2x_wq);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 /* free skb in the packet ring at pos idx
737  * return idx of last bd freed
738  */
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740                              u16 idx)
741 {
742         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743         struct eth_tx_bd *tx_bd;
744         struct sk_buff *skb = tx_buf->skb;
745         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746         int nbd;
747
748         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
749            idx, tx_buf, skb);
750
751         /* unmap first bd */
752         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753         tx_bd = &fp->tx_desc_ring[bd_idx];
754         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757         nbd = le16_to_cpu(tx_bd->nbd) - 1;
758         new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760         if (nbd > (MAX_SKB_FRAGS + 2)) {
761                 BNX2X_ERR("BAD nbd!\n");
762                 bnx2x_panic();
763         }
764 #endif
765
766         /* Skip a parse bd and the TSO split header bd
767            since they have no mapping */
768         if (nbd)
769                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772                                            ETH_TX_BD_FLAGS_TCP_CSUM |
773                                            ETH_TX_BD_FLAGS_SW_LSO)) {
774                 if (--nbd)
775                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776                 tx_bd = &fp->tx_desc_ring[bd_idx];
777                 /* is this a TSO split header bd? */
778                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779                         if (--nbd)
780                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781                 }
782         }
783
784         /* now free frags */
785         while (nbd > 0) {
786
787                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788                 tx_bd = &fp->tx_desc_ring[bd_idx];
789                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791                 if (--nbd)
792                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793         }
794
795         /* release skb */
796         WARN_ON(!skb);
797         dev_kfree_skb(skb);
798         tx_buf->first_bd = 0;
799         tx_buf->skb = NULL;
800
801         return new_cons;
802 }
803
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 {
806         s16 used;
807         u16 prod;
808         u16 cons;
809
810         barrier(); /* Tell compiler that prod and cons can change */
811         prod = fp->tx_bd_prod;
812         cons = fp->tx_bd_cons;
813
814         /* NUM_TX_RINGS = number of "next-page" entries
815            It will be used as a threshold */
816         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817
818 #ifdef BNX2X_STOP_ON_ERROR
819         WARN_ON(used < 0);
820         WARN_ON(used > fp->bp->tx_ring_size);
821         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
823
824         return (s16)(fp->bp->tx_ring_size) - used;
825 }
826
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 {
829         struct bnx2x *bp = fp->bp;
830         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831         int done = 0;
832
833 #ifdef BNX2X_STOP_ON_ERROR
834         if (unlikely(bp->panic))
835                 return;
836 #endif
837
838         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839         sw_cons = fp->tx_pkt_cons;
840
841         while (sw_cons != hw_cons) {
842                 u16 pkt_cons;
843
844                 pkt_cons = TX_BD(sw_cons);
845
846                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
848                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
849                    hw_cons, sw_cons, pkt_cons);
850
851 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852                         rmb();
853                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854                 }
855 */
856                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857                 sw_cons++;
858                 done++;
859
860                 if (done == work)
861                         break;
862         }
863
864         fp->tx_pkt_cons = sw_cons;
865         fp->tx_bd_cons = bd_cons;
866
867         /* Need to make the tx_cons update visible to start_xmit()
868          * before checking for netif_queue_stopped().  Without the
869          * memory barrier, there is a small possibility that start_xmit()
870          * will miss it and cause the queue to be stopped forever.
871          */
872         smp_mb();
873
874         /* TBD need a thresh? */
875         if (unlikely(netif_queue_stopped(bp->dev))) {
876
877                 netif_tx_lock(bp->dev);
878
879                 if (netif_queue_stopped(bp->dev) &&
880                     (bp->state == BNX2X_STATE_OPEN) &&
881                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882                         netif_wake_queue(bp->dev);
883
884                 netif_tx_unlock(bp->dev);
885         }
886 }
887
888
889 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890                            union eth_rx_cqe *rr_cqe)
891 {
892         struct bnx2x *bp = fp->bp;
893         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895
896         DP(BNX2X_MSG_SP,
897            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
898            FP_IDX(fp), cid, command, bp->state,
899            rr_cqe->ramrod_cqe.ramrod_type);
900
901         bp->spq_left++;
902
903         if (FP_IDX(fp)) {
904                 switch (command | fp->state) {
905                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906                                                 BNX2X_FP_STATE_OPENING):
907                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908                            cid);
909                         fp->state = BNX2X_FP_STATE_OPEN;
910                         break;
911
912                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914                            cid);
915                         fp->state = BNX2X_FP_STATE_HALTED;
916                         break;
917
918                 default:
919                         BNX2X_ERR("unexpected MC reply (%d)  "
920                                   "fp->state is %x\n", command, fp->state);
921                         break;
922                 }
923                 mb(); /* force bnx2x_wait_ramrod() to see the change */
924                 return;
925         }
926
927         switch (command | bp->state) {
928         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930                 bp->state = BNX2X_STATE_OPEN;
931                 break;
932
933         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936                 fp->state = BNX2X_FP_STATE_HALTED;
937                 break;
938
939         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
940                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
941                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
942                 break;
943
944
945         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
946         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
947                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
948                 bp->set_mac_pending = 0;
949                 break;
950
951         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
952                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
953                 break;
954
955         default:
956                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
957                           command, bp->state);
958                 break;
959         }
960         mb(); /* force bnx2x_wait_ramrod() to see the change */
961 }
962
963 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964                                      struct bnx2x_fastpath *fp, u16 index)
965 {
966         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967         struct page *page = sw_buf->page;
968         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970         /* Skip "next page" elements */
971         if (!page)
972                 return;
973
974         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976         __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978         sw_buf->page = NULL;
979         sge->addr_hi = 0;
980         sge->addr_lo = 0;
981 }
982
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984                                            struct bnx2x_fastpath *fp, int last)
985 {
986         int i;
987
988         for (i = 0; i < last; i++)
989                 bnx2x_free_rx_sge(bp, fp, i);
990 }
991
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993                                      struct bnx2x_fastpath *fp, u16 index)
994 {
995         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998         dma_addr_t mapping;
999
1000         if (unlikely(page == NULL))
1001                 return -ENOMEM;
1002
1003         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1004                                PCI_DMA_FROMDEVICE);
1005         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007                 return -ENOMEM;
1008         }
1009
1010         sw_buf->page = page;
1011         pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016         return 0;
1017 }
1018
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020                                      struct bnx2x_fastpath *fp, u16 index)
1021 {
1022         struct sk_buff *skb;
1023         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025         dma_addr_t mapping;
1026
1027         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028         if (unlikely(skb == NULL))
1029                 return -ENOMEM;
1030
1031         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1032                                  PCI_DMA_FROMDEVICE);
1033         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1034                 dev_kfree_skb(skb);
1035                 return -ENOMEM;
1036         }
1037
1038         rx_buf->skb = skb;
1039         pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044         return 0;
1045 }
1046
1047 /* note that we are not allocating a new skb,
1048  * we are just moving one from cons to prod
1049  * we are not creating a new mapping,
1050  * so there is no need to check for dma_mapping_error().
1051  */
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053                                struct sk_buff *skb, u16 cons, u16 prod)
1054 {
1055         struct bnx2x *bp = fp->bp;
1056         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061         pci_dma_sync_single_for_device(bp->pdev,
1062                                        pci_unmap_addr(cons_rx_buf, mapping),
1063                                        bp->rx_offset + RX_COPY_THRESH,
1064                                        PCI_DMA_FROMDEVICE);
1065
1066         prod_rx_buf->skb = cons_rx_buf->skb;
1067         pci_unmap_addr_set(prod_rx_buf, mapping,
1068                            pci_unmap_addr(cons_rx_buf, mapping));
1069         *prod_bd = *cons_bd;
1070 }
1071
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073                                              u16 idx)
1074 {
1075         u16 last_max = fp->last_max_sge;
1076
1077         if (SUB_S16(idx, last_max) > 0)
1078                 fp->last_max_sge = idx;
1079 }
1080
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082 {
1083         int i, j;
1084
1085         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086                 int idx = RX_SGE_CNT * i - 1;
1087
1088                 for (j = 0; j < 2; j++) {
1089                         SGE_MASK_CLEAR_BIT(fp, idx);
1090                         idx--;
1091                 }
1092         }
1093 }
1094
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096                                   struct eth_fast_path_rx_cqe *fp_cqe)
1097 {
1098         struct bnx2x *bp = fp->bp;
1099         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1101                       SGE_PAGE_SHIFT;
1102         u16 last_max, last_elem, first_elem;
1103         u16 delta = 0;
1104         u16 i;
1105
1106         if (!sge_len)
1107                 return;
1108
1109         /* First mark all used pages */
1110         for (i = 0; i < sge_len; i++)
1111                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116         /* Here we assume that the last SGE index is the biggest */
1117         prefetch((void *)(fp->sge_mask));
1118         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120         last_max = RX_SGE(fp->last_max_sge);
1121         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124         /* If ring is not full */
1125         if (last_elem + 1 != first_elem)
1126                 last_elem++;
1127
1128         /* Now update the prod */
1129         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130                 if (likely(fp->sge_mask[i]))
1131                         break;
1132
1133                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134                 delta += RX_SGE_MASK_ELEM_SZ;
1135         }
1136
1137         if (delta > 0) {
1138                 fp->rx_sge_prod += delta;
1139                 /* clear page-end entries */
1140                 bnx2x_clear_sge_mask_next_elems(fp);
1141         }
1142
1143         DP(NETIF_MSG_RX_STATUS,
1144            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1145            fp->last_max_sge, fp->rx_sge_prod);
1146 }
1147
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149 {
1150         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151         memset(fp->sge_mask, 0xff,
1152                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
1154         /* Clear the two last indices in the page to 1:
1155            these are the indices that correspond to the "next" element,
1156            hence will never be indicated and should be removed from
1157            the calculations. */
1158         bnx2x_clear_sge_mask_next_elems(fp);
1159 }
1160
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162                             struct sk_buff *skb, u16 cons, u16 prod)
1163 {
1164         struct bnx2x *bp = fp->bp;
1165         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168         dma_addr_t mapping;
1169
1170         /* move empty skb from pool to prod and map it */
1171         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1174         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176         /* move partial skb from cons to pool (don't unmap yet) */
1177         fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179         /* mark bin state as start - print error if current state != stop */
1180         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183         fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185         /* point prod_bd to new skb */
1186         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189 #ifdef BNX2X_STOP_ON_ERROR
1190         fp->tpa_queue_used |= (1 << queue);
1191 #ifdef __powerpc64__
1192         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193 #else
1194         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195 #endif
1196            fp->tpa_queue_used);
1197 #endif
1198 }
1199
1200 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201                                struct sk_buff *skb,
1202                                struct eth_fast_path_rx_cqe *fp_cqe,
1203                                u16 cqe_idx)
1204 {
1205         struct sw_rx_page *rx_pg, old_rx_pg;
1206         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1207         u32 i, frag_len, frag_size, pages;
1208         int err;
1209         int j;
1210
1211         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1212         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1213
1214         /* This is needed in order to enable forwarding support */
1215         if (frag_size)
1216                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1217                                                max(frag_size, (u32)len_on_bd));
1218
1219 #ifdef BNX2X_STOP_ON_ERROR
1220         if (pages >
1221             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1222                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223                           pages, cqe_idx);
1224                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1225                           fp_cqe->pkt_len, len_on_bd);
1226                 bnx2x_panic();
1227                 return -EINVAL;
1228         }
1229 #endif
1230
1231         /* Run through the SGL and compose the fragmented skb */
1232         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235                 /* FW gives the indices of the SGE as if the ring is an array
1236                    (meaning that "next" element will consume 2 indices) */
1237                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1238                 rx_pg = &fp->rx_page_ring[sge_idx];
1239                 old_rx_pg = *rx_pg;
1240
1241                 /* If we fail to allocate a substitute page, we simply stop
1242                    where we are and drop the whole packet */
1243                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1244                 if (unlikely(err)) {
1245                         bp->eth_stats.rx_skb_alloc_failed++;
1246                         return err;
1247                 }
1248
1249                 /* Unmap the page as we r going to pass it to the stack */
1250                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1251                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1252
1253                 /* Add one frag and update the appropriate fields in the skb */
1254                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1255
1256                 skb->data_len += frag_len;
1257                 skb->truesize += frag_len;
1258                 skb->len += frag_len;
1259
1260                 frag_size -= frag_len;
1261         }
1262
1263         return 0;
1264 }
1265
1266 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1268                            u16 cqe_idx)
1269 {
1270         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1271         struct sk_buff *skb = rx_buf->skb;
1272         /* alloc new skb */
1273         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1274
1275         /* Unmap skb in the pool anyway, as we are going to change
1276            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1277            fails. */
1278         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1280
1281         if (likely(new_skb)) {
1282                 /* fix ip xsum and give it to the stack */
1283                 /* (no need to map the new skb) */
1284 #ifdef BCM_VLAN
1285                 int is_vlan_cqe =
1286                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1287                          PARSING_FLAGS_VLAN);
1288                 int is_not_hwaccel_vlan_cqe =
1289                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1290 #endif
1291
1292                 prefetch(skb);
1293                 prefetch(((char *)(skb)) + 128);
1294
1295 #ifdef BNX2X_STOP_ON_ERROR
1296                 if (pad + len > bp->rx_buf_size) {
1297                         BNX2X_ERR("skb_put is about to fail...  "
1298                                   "pad %d  len %d  rx_buf_size %d\n",
1299                                   pad, len, bp->rx_buf_size);
1300                         bnx2x_panic();
1301                         return;
1302                 }
1303 #endif
1304
1305                 skb_reserve(skb, pad);
1306                 skb_put(skb, len);
1307
1308                 skb->protocol = eth_type_trans(skb, bp->dev);
1309                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1310
1311                 {
1312                         struct iphdr *iph;
1313
1314                         iph = (struct iphdr *)skb->data;
1315 #ifdef BCM_VLAN
1316                         /* If there is no Rx VLAN offloading -
1317                            take VLAN tag into an account */
1318                         if (unlikely(is_not_hwaccel_vlan_cqe))
1319                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1320 #endif
1321                         iph->check = 0;
1322                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1323                 }
1324
1325                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1326                                          &cqe->fast_path_cqe, cqe_idx)) {
1327 #ifdef BCM_VLAN
1328                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1329                             (!is_not_hwaccel_vlan_cqe))
1330                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1331                                                 le16_to_cpu(cqe->fast_path_cqe.
1332                                                             vlan_tag));
1333                         else
1334 #endif
1335                                 netif_receive_skb(skb);
1336                 } else {
1337                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1338                            " - dropping packet!\n");
1339                         dev_kfree_skb(skb);
1340                 }
1341
1342
1343                 /* put new skb in bin */
1344                 fp->tpa_pool[queue].skb = new_skb;
1345
1346         } else {
1347                 /* else drop the packet and keep the buffer in the bin */
1348                 DP(NETIF_MSG_RX_STATUS,
1349                    "Failed to allocate new skb - dropping packet!\n");
1350                 bp->eth_stats.rx_skb_alloc_failed++;
1351         }
1352
1353         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1354 }
1355
1356 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1357                                         struct bnx2x_fastpath *fp,
1358                                         u16 bd_prod, u16 rx_comp_prod,
1359                                         u16 rx_sge_prod)
1360 {
1361         struct tstorm_eth_rx_producers rx_prods = {0};
1362         int i;
1363
1364         /* Update producers */
1365         rx_prods.bd_prod = bd_prod;
1366         rx_prods.cqe_prod = rx_comp_prod;
1367         rx_prods.sge_prod = rx_sge_prod;
1368
1369         /*
1370          * Make sure that the BD and SGE data is updated before updating the
1371          * producers since FW might read the BD/SGE right after the producer
1372          * is updated.
1373          * This is only applicable for weak-ordered memory model archs such
1374          * as IA-64. The following barrier is also mandatory since FW will
1375          * assumes BDs must have buffers.
1376          */
1377         wmb();
1378
1379         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1380                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1381                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1382                        ((u32 *)&rx_prods)[i]);
1383
1384         mmiowb(); /* keep prod updates ordered */
1385
1386         DP(NETIF_MSG_RX_STATUS,
1387            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1388            bd_prod, rx_comp_prod, rx_sge_prod);
1389 }
1390
1391 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1392 {
1393         struct bnx2x *bp = fp->bp;
1394         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1395         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1396         int rx_pkt = 0;
1397
1398 #ifdef BNX2X_STOP_ON_ERROR
1399         if (unlikely(bp->panic))
1400                 return 0;
1401 #endif
1402
1403         /* CQ "next element" is of the size of the regular element,
1404            that's why it's ok here */
1405         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1406         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1407                 hw_comp_cons++;
1408
1409         bd_cons = fp->rx_bd_cons;
1410         bd_prod = fp->rx_bd_prod;
1411         bd_prod_fw = bd_prod;
1412         sw_comp_cons = fp->rx_comp_cons;
1413         sw_comp_prod = fp->rx_comp_prod;
1414
1415         /* Memory barrier necessary as speculative reads of the rx
1416          * buffer can be ahead of the index in the status block
1417          */
1418         rmb();
1419
1420         DP(NETIF_MSG_RX_STATUS,
1421            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1422            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1423
1424         while (sw_comp_cons != hw_comp_cons) {
1425                 struct sw_rx_bd *rx_buf = NULL;
1426                 struct sk_buff *skb;
1427                 union eth_rx_cqe *cqe;
1428                 u8 cqe_fp_flags;
1429                 u16 len, pad;
1430
1431                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1432                 bd_prod = RX_BD(bd_prod);
1433                 bd_cons = RX_BD(bd_cons);
1434
1435                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1436                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1437
1438                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1439                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1440                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1441                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1442                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1443                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1444
1445                 /* is this a slowpath msg? */
1446                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1447                         bnx2x_sp_event(fp, cqe);
1448                         goto next_cqe;
1449
1450                 /* this is an rx packet */
1451                 } else {
1452                         rx_buf = &fp->rx_buf_ring[bd_cons];
1453                         skb = rx_buf->skb;
1454                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1455                         pad = cqe->fast_path_cqe.placement_offset;
1456
1457                         /* If CQE is marked both TPA_START and TPA_END
1458                            it is a non-TPA CQE */
1459                         if ((!fp->disable_tpa) &&
1460                             (TPA_TYPE(cqe_fp_flags) !=
1461                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1462                                 u16 queue = cqe->fast_path_cqe.queue_index;
1463
1464                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1465                                         DP(NETIF_MSG_RX_STATUS,
1466                                            "calling tpa_start on queue %d\n",
1467                                            queue);
1468
1469                                         bnx2x_tpa_start(fp, queue, skb,
1470                                                         bd_cons, bd_prod);
1471                                         goto next_rx;
1472                                 }
1473
1474                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1475                                         DP(NETIF_MSG_RX_STATUS,
1476                                            "calling tpa_stop on queue %d\n",
1477                                            queue);
1478
1479                                         if (!BNX2X_RX_SUM_FIX(cqe))
1480                                                 BNX2X_ERR("STOP on none TCP "
1481                                                           "data\n");
1482
1483                                         /* This is a size of the linear data
1484                                            on this skb */
1485                                         len = le16_to_cpu(cqe->fast_path_cqe.
1486                                                                 len_on_bd);
1487                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1488                                                     len, cqe, comp_ring_cons);
1489 #ifdef BNX2X_STOP_ON_ERROR
1490                                         if (bp->panic)
1491                                                 return -EINVAL;
1492 #endif
1493
1494                                         bnx2x_update_sge_prod(fp,
1495                                                         &cqe->fast_path_cqe);
1496                                         goto next_cqe;
1497                                 }
1498                         }
1499
1500                         pci_dma_sync_single_for_device(bp->pdev,
1501                                         pci_unmap_addr(rx_buf, mapping),
1502                                                        pad + RX_COPY_THRESH,
1503                                                        PCI_DMA_FROMDEVICE);
1504                         prefetch(skb);
1505                         prefetch(((char *)(skb)) + 128);
1506
1507                         /* is this an error packet? */
1508                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1509                                 DP(NETIF_MSG_RX_ERR,
1510                                    "ERROR  flags %x  rx packet %u\n",
1511                                    cqe_fp_flags, sw_comp_cons);
1512                                 bp->eth_stats.rx_err_discard_pkt++;
1513                                 goto reuse_rx;
1514                         }
1515
1516                         /* Since we don't have a jumbo ring
1517                          * copy small packets if mtu > 1500
1518                          */
1519                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1520                             (len <= RX_COPY_THRESH)) {
1521                                 struct sk_buff *new_skb;
1522
1523                                 new_skb = netdev_alloc_skb(bp->dev,
1524                                                            len + pad);
1525                                 if (new_skb == NULL) {
1526                                         DP(NETIF_MSG_RX_ERR,
1527                                            "ERROR  packet dropped "
1528                                            "because of alloc failure\n");
1529                                         bp->eth_stats.rx_skb_alloc_failed++;
1530                                         goto reuse_rx;
1531                                 }
1532
1533                                 /* aligned copy */
1534                                 skb_copy_from_linear_data_offset(skb, pad,
1535                                                     new_skb->data + pad, len);
1536                                 skb_reserve(new_skb, pad);
1537                                 skb_put(new_skb, len);
1538
1539                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1540
1541                                 skb = new_skb;
1542
1543                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1544                                 pci_unmap_single(bp->pdev,
1545                                         pci_unmap_addr(rx_buf, mapping),
1546                                                  bp->rx_buf_size,
1547                                                  PCI_DMA_FROMDEVICE);
1548                                 skb_reserve(skb, pad);
1549                                 skb_put(skb, len);
1550
1551                         } else {
1552                                 DP(NETIF_MSG_RX_ERR,
1553                                    "ERROR  packet dropped because "
1554                                    "of alloc failure\n");
1555                                 bp->eth_stats.rx_skb_alloc_failed++;
1556 reuse_rx:
1557                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1558                                 goto next_rx;
1559                         }
1560
1561                         skb->protocol = eth_type_trans(skb, bp->dev);
1562
1563                         skb->ip_summed = CHECKSUM_NONE;
1564                         if (bp->rx_csum) {
1565                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1566                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1567                                 else
1568                                         bp->eth_stats.hw_csum_err++;
1569                         }
1570                 }
1571
1572 #ifdef BCM_VLAN
1573                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1574                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1575                      PARSING_FLAGS_VLAN))
1576                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1577                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1578                 else
1579 #endif
1580                         netif_receive_skb(skb);
1581
1582
1583 next_rx:
1584                 rx_buf->skb = NULL;
1585
1586                 bd_cons = NEXT_RX_IDX(bd_cons);
1587                 bd_prod = NEXT_RX_IDX(bd_prod);
1588                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1589                 rx_pkt++;
1590 next_cqe:
1591                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1592                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1593
1594                 if (rx_pkt == budget)
1595                         break;
1596         } /* while */
1597
1598         fp->rx_bd_cons = bd_cons;
1599         fp->rx_bd_prod = bd_prod_fw;
1600         fp->rx_comp_cons = sw_comp_cons;
1601         fp->rx_comp_prod = sw_comp_prod;
1602
1603         /* Update producers */
1604         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1605                              fp->rx_sge_prod);
1606
1607         fp->rx_pkt += rx_pkt;
1608         fp->rx_calls++;
1609
1610         return rx_pkt;
1611 }
1612
1613 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1614 {
1615         struct bnx2x_fastpath *fp = fp_cookie;
1616         struct bnx2x *bp = fp->bp;
1617         int index = FP_IDX(fp);
1618
1619         /* Return here if interrupt is disabled */
1620         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1621                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1622                 return IRQ_HANDLED;
1623         }
1624
1625         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1626            index, FP_SB_ID(fp));
1627         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1628
1629 #ifdef BNX2X_STOP_ON_ERROR
1630         if (unlikely(bp->panic))
1631                 return IRQ_HANDLED;
1632 #endif
1633
1634         prefetch(fp->rx_cons_sb);
1635         prefetch(fp->tx_cons_sb);
1636         prefetch(&fp->status_blk->c_status_block.status_block_index);
1637         prefetch(&fp->status_blk->u_status_block.status_block_index);
1638
1639         netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1640
1641         return IRQ_HANDLED;
1642 }
1643
1644 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1645 {
1646         struct net_device *dev = dev_instance;
1647         struct bnx2x *bp = netdev_priv(dev);
1648         u16 status = bnx2x_ack_int(bp);
1649         u16 mask;
1650
1651         /* Return here if interrupt is shared and it's not for us */
1652         if (unlikely(status == 0)) {
1653                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1654                 return IRQ_NONE;
1655         }
1656         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1657
1658         /* Return here if interrupt is disabled */
1659         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1660                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1661                 return IRQ_HANDLED;
1662         }
1663
1664 #ifdef BNX2X_STOP_ON_ERROR
1665         if (unlikely(bp->panic))
1666                 return IRQ_HANDLED;
1667 #endif
1668
1669         mask = 0x2 << bp->fp[0].sb_id;
1670         if (status & mask) {
1671                 struct bnx2x_fastpath *fp = &bp->fp[0];
1672
1673                 prefetch(fp->rx_cons_sb);
1674                 prefetch(fp->tx_cons_sb);
1675                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1676                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1677
1678                 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1679
1680                 status &= ~mask;
1681         }
1682
1683
1684         if (unlikely(status & 0x1)) {
1685                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1686
1687                 status &= ~0x1;
1688                 if (!status)
1689                         return IRQ_HANDLED;
1690         }
1691
1692         if (status)
1693                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1694                    status);
1695
1696         return IRQ_HANDLED;
1697 }
1698
1699 /* end of fast path */
1700
1701 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1702
1703 /* Link */
1704
1705 /*
1706  * General service functions
1707  */
1708
1709 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1710 {
1711         u32 lock_status;
1712         u32 resource_bit = (1 << resource);
1713         int func = BP_FUNC(bp);
1714         u32 hw_lock_control_reg;
1715         int cnt;
1716
1717         /* Validating that the resource is within range */
1718         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1719                 DP(NETIF_MSG_HW,
1720                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1721                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1722                 return -EINVAL;
1723         }
1724
1725         if (func <= 5) {
1726                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1727         } else {
1728                 hw_lock_control_reg =
1729                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1730         }
1731
1732         /* Validating that the resource is not already taken */
1733         lock_status = REG_RD(bp, hw_lock_control_reg);
1734         if (lock_status & resource_bit) {
1735                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1736                    lock_status, resource_bit);
1737                 return -EEXIST;
1738         }
1739
1740         /* Try for 5 second every 5ms */
1741         for (cnt = 0; cnt < 1000; cnt++) {
1742                 /* Try to acquire the lock */
1743                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1744                 lock_status = REG_RD(bp, hw_lock_control_reg);
1745                 if (lock_status & resource_bit)
1746                         return 0;
1747
1748                 msleep(5);
1749         }
1750         DP(NETIF_MSG_HW, "Timeout\n");
1751         return -EAGAIN;
1752 }
1753
1754 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1755 {
1756         u32 lock_status;
1757         u32 resource_bit = (1 << resource);
1758         int func = BP_FUNC(bp);
1759         u32 hw_lock_control_reg;
1760
1761         /* Validating that the resource is within range */
1762         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1763                 DP(NETIF_MSG_HW,
1764                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1765                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1766                 return -EINVAL;
1767         }
1768
1769         if (func <= 5) {
1770                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1771         } else {
1772                 hw_lock_control_reg =
1773                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1774         }
1775
1776         /* Validating that the resource is currently taken */
1777         lock_status = REG_RD(bp, hw_lock_control_reg);
1778         if (!(lock_status & resource_bit)) {
1779                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1780                    lock_status, resource_bit);
1781                 return -EFAULT;
1782         }
1783
1784         REG_WR(bp, hw_lock_control_reg, resource_bit);
1785         return 0;
1786 }
1787
1788 /* HW Lock for shared dual port PHYs */
1789 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1790 {
1791         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1792
1793         mutex_lock(&bp->port.phy_mutex);
1794
1795         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1796             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1797                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1798 }
1799
1800 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1801 {
1802         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1803
1804         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1805             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1806                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1807
1808         mutex_unlock(&bp->port.phy_mutex);
1809 }
1810
1811 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1812 {
1813         /* The GPIO should be swapped if swap register is set and active */
1814         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1815                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1816         int gpio_shift = gpio_num +
1817                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1818         u32 gpio_mask = (1 << gpio_shift);
1819         u32 gpio_reg;
1820
1821         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1822                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1823                 return -EINVAL;
1824         }
1825
1826         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1827         /* read GPIO and mask except the float bits */
1828         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1829
1830         switch (mode) {
1831         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1832                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1833                    gpio_num, gpio_shift);
1834                 /* clear FLOAT and set CLR */
1835                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1836                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1837                 break;
1838
1839         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1840                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1841                    gpio_num, gpio_shift);
1842                 /* clear FLOAT and set SET */
1843                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1844                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1845                 break;
1846
1847         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1848                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1849                    gpio_num, gpio_shift);
1850                 /* set FLOAT */
1851                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1852                 break;
1853
1854         default:
1855                 break;
1856         }
1857
1858         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1859         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1860
1861         return 0;
1862 }
1863
1864 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1865 {
1866         u32 spio_mask = (1 << spio_num);
1867         u32 spio_reg;
1868
1869         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1870             (spio_num > MISC_REGISTERS_SPIO_7)) {
1871                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1872                 return -EINVAL;
1873         }
1874
1875         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1876         /* read SPIO and mask except the float bits */
1877         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1878
1879         switch (mode) {
1880         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1881                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1882                 /* clear FLOAT and set CLR */
1883                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1884                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1885                 break;
1886
1887         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1888                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1889                 /* clear FLOAT and set SET */
1890                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1891                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1892                 break;
1893
1894         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1895                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1896                 /* set FLOAT */
1897                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1898                 break;
1899
1900         default:
1901                 break;
1902         }
1903
1904         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1905         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1906
1907         return 0;
1908 }
1909
1910 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1911 {
1912         switch (bp->link_vars.ieee_fc &
1913                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1914         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1915                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1916                                           ADVERTISED_Pause);
1917                 break;
1918         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1919                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1920                                          ADVERTISED_Pause);
1921                 break;
1922         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1923                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1924                 break;
1925         default:
1926                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1927                                           ADVERTISED_Pause);
1928                 break;
1929         }
1930 }
1931
1932 static void bnx2x_link_report(struct bnx2x *bp)
1933 {
1934         if (bp->link_vars.link_up) {
1935                 if (bp->state == BNX2X_STATE_OPEN)
1936                         netif_carrier_on(bp->dev);
1937                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1938
1939                 printk("%d Mbps ", bp->link_vars.line_speed);
1940
1941                 if (bp->link_vars.duplex == DUPLEX_FULL)
1942                         printk("full duplex");
1943                 else
1944                         printk("half duplex");
1945
1946                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1947                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1948                                 printk(", receive ");
1949                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1950                                         printk("& transmit ");
1951                         } else {
1952                                 printk(", transmit ");
1953                         }
1954                         printk("flow control ON");
1955                 }
1956                 printk("\n");
1957
1958         } else { /* link_down */
1959                 netif_carrier_off(bp->dev);
1960                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1961         }
1962 }
1963
1964 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1965 {
1966         if (!BP_NOMCP(bp)) {
1967                 u8 rc;
1968
1969                 /* Initialize link parameters structure variables */
1970                 /* It is recommended to turn off RX FC for jumbo frames
1971                    for better performance */
1972                 if (IS_E1HMF(bp))
1973                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1974                 else if (bp->dev->mtu > 5000)
1975                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1976                 else
1977                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1978
1979                 bnx2x_acquire_phy_lock(bp);
1980                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1981                 bnx2x_release_phy_lock(bp);
1982
1983                 bnx2x_calc_fc_adv(bp);
1984
1985                 if (bp->link_vars.link_up)
1986                         bnx2x_link_report(bp);
1987
1988
1989                 return rc;
1990         }
1991         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1992         return -EINVAL;
1993 }
1994
1995 static void bnx2x_link_set(struct bnx2x *bp)
1996 {
1997         if (!BP_NOMCP(bp)) {
1998                 bnx2x_acquire_phy_lock(bp);
1999                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2000                 bnx2x_release_phy_lock(bp);
2001
2002                 bnx2x_calc_fc_adv(bp);
2003         } else
2004                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2005 }
2006
2007 static void bnx2x__link_reset(struct bnx2x *bp)
2008 {
2009         if (!BP_NOMCP(bp)) {
2010                 bnx2x_acquire_phy_lock(bp);
2011                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2012                 bnx2x_release_phy_lock(bp);
2013         } else
2014                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2015 }
2016
2017 static u8 bnx2x_link_test(struct bnx2x *bp)
2018 {
2019         u8 rc;
2020
2021         bnx2x_acquire_phy_lock(bp);
2022         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2023         bnx2x_release_phy_lock(bp);
2024
2025         return rc;
2026 }
2027
2028 /* Calculates the sum of vn_min_rates.
2029    It's needed for further normalizing of the min_rates.
2030
2031    Returns:
2032      sum of vn_min_rates
2033        or
2034      0 - if all the min_rates are 0.
2035      In the later case fairness algorithm should be deactivated.
2036      If not all min_rates are zero then those that are zeroes will
2037      be set to 1.
2038  */
2039 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2040 {
2041         int i, port = BP_PORT(bp);
2042         u32 wsum = 0;
2043         int all_zero = 1;
2044
2045         for (i = 0; i < E1HVN_MAX; i++) {
2046                 u32 vn_cfg =
2047                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2048                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2049                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2050                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2051                         /* If min rate is zero - set it to 1 */
2052                         if (!vn_min_rate)
2053                                 vn_min_rate = DEF_MIN_RATE;
2054                         else
2055                                 all_zero = 0;
2056
2057                         wsum += vn_min_rate;
2058                 }
2059         }
2060
2061         /* ... only if all min rates are zeros - disable FAIRNESS */
2062         if (all_zero)
2063                 return 0;
2064
2065         return wsum;
2066 }
2067
2068 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2069                                    int en_fness,
2070                                    u16 port_rate,
2071                                    struct cmng_struct_per_port *m_cmng_port)
2072 {
2073         u32 r_param = port_rate / 8;
2074         int port = BP_PORT(bp);
2075         int i;
2076
2077         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2078
2079         /* Enable minmax only if we are in e1hmf mode */
2080         if (IS_E1HMF(bp)) {
2081                 u32 fair_periodic_timeout_usec;
2082                 u32 t_fair;
2083
2084                 /* Enable rate shaping and fairness */
2085                 m_cmng_port->flags.cmng_vn_enable = 1;
2086                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2087                 m_cmng_port->flags.rate_shaping_enable = 1;
2088
2089                 if (!en_fness)
2090                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2091                            "  fairness will be disabled\n");
2092
2093                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2094                 m_cmng_port->rs_vars.rs_periodic_timeout =
2095                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2096
2097                 /* this is the threshold below which no timer arming will occur
2098                    1.25 coefficient is for the threshold to be a little bigger
2099                    than the real time, to compensate for timer in-accuracy */
2100                 m_cmng_port->rs_vars.rs_threshold =
2101                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2102
2103                 /* resolution of fairness timer */
2104                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2105                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2106                 t_fair = T_FAIR_COEF / port_rate;
2107
2108                 /* this is the threshold below which we won't arm
2109                    the timer anymore */
2110                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2111
2112                 /* we multiply by 1e3/8 to get bytes/msec.
2113                    We don't want the credits to pass a credit
2114                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2115                 m_cmng_port->fair_vars.upper_bound =
2116                                                 r_param * t_fair * FAIR_MEM;
2117                 /* since each tick is 4 usec */
2118                 m_cmng_port->fair_vars.fairness_timeout =
2119                                                 fair_periodic_timeout_usec / 4;
2120
2121         } else {
2122                 /* Disable rate shaping and fairness */
2123                 m_cmng_port->flags.cmng_vn_enable = 0;
2124                 m_cmng_port->flags.fairness_enable = 0;
2125                 m_cmng_port->flags.rate_shaping_enable = 0;
2126
2127                 DP(NETIF_MSG_IFUP,
2128                    "Single function mode  minmax will be disabled\n");
2129         }
2130
2131         /* Store it to internal memory */
2132         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2133                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2134                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2135                        ((u32 *)(m_cmng_port))[i]);
2136 }
2137
2138 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2139                                    u32 wsum, u16 port_rate,
2140                                  struct cmng_struct_per_port *m_cmng_port)
2141 {
2142         struct rate_shaping_vars_per_vn m_rs_vn;
2143         struct fairness_vars_per_vn m_fair_vn;
2144         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2145         u16 vn_min_rate, vn_max_rate;
2146         int i;
2147
2148         /* If function is hidden - set min and max to zeroes */
2149         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2150                 vn_min_rate = 0;
2151                 vn_max_rate = 0;
2152
2153         } else {
2154                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2155                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2156                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2157                    if current min rate is zero - set it to 1.
2158                    This is a requirement of the algorithm. */
2159                 if ((vn_min_rate == 0) && wsum)
2160                         vn_min_rate = DEF_MIN_RATE;
2161                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2162                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2163         }
2164
2165         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2166            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2167
2168         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2169         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2170
2171         /* global vn counter - maximal Mbps for this vn */
2172         m_rs_vn.vn_counter.rate = vn_max_rate;
2173
2174         /* quota - number of bytes transmitted in this period */
2175         m_rs_vn.vn_counter.quota =
2176                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2177
2178 #ifdef BNX2X_PER_PROT_QOS
2179         /* per protocol counter */
2180         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2181                 /* maximal Mbps for this protocol */
2182                 m_rs_vn.protocol_counters[protocol].rate =
2183                                                 protocol_max_rate[protocol];
2184                 /* the quota in each timer period -
2185                    number of bytes transmitted in this period */
2186                 m_rs_vn.protocol_counters[protocol].quota =
2187                         (u32)(rs_periodic_timeout_usec *
2188                           ((double)m_rs_vn.
2189                                    protocol_counters[protocol].rate/8));
2190         }
2191 #endif
2192
2193         if (wsum) {
2194                 /* credit for each period of the fairness algorithm:
2195                    number of bytes in T_FAIR (the vn share the port rate).
2196                    wsum should not be larger than 10000, thus
2197                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2198                 m_fair_vn.vn_credit_delta =
2199                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2200                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2201                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2202                    m_fair_vn.vn_credit_delta);
2203         }
2204
2205 #ifdef BNX2X_PER_PROT_QOS
2206         do {
2207                 u32 protocolWeightSum = 0;
2208
2209                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2210                         protocolWeightSum +=
2211                                         drvInit.protocol_min_rate[protocol];
2212                 /* per protocol counter -
2213                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2214                 if (protocolWeightSum > 0) {
2215                         for (protocol = 0;
2216                              protocol < NUM_OF_PROTOCOLS; protocol++)
2217                                 /* credit for each period of the
2218                                    fairness algorithm - number of bytes in
2219                                    T_FAIR (the protocol share the vn rate) */
2220                                 m_fair_vn.protocol_credit_delta[protocol] =
2221                                         (u32)((vn_min_rate / 8) * t_fair *
2222                                         protocol_min_rate / protocolWeightSum);
2223                 }
2224         } while (0);
2225 #endif
2226
2227         /* Store it to internal memory */
2228         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2229                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2230                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2231                        ((u32 *)(&m_rs_vn))[i]);
2232
2233         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2234                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2235                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2236                        ((u32 *)(&m_fair_vn))[i]);
2237 }
2238
2239 /* This function is called upon link interrupt */
2240 static void bnx2x_link_attn(struct bnx2x *bp)
2241 {
2242         int vn;
2243
2244         /* Make sure that we are synced with the current statistics */
2245         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2246
2247         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2248
2249         if (bp->link_vars.link_up) {
2250
2251                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2252                         struct host_port_stats *pstats;
2253
2254                         pstats = bnx2x_sp(bp, port_stats);
2255                         /* reset old bmac stats */
2256                         memset(&(pstats->mac_stx[0]), 0,
2257                                sizeof(struct mac_stx));
2258                 }
2259                 if ((bp->state == BNX2X_STATE_OPEN) ||
2260                     (bp->state == BNX2X_STATE_DISABLED))
2261                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2262         }
2263
2264         /* indicate link status */
2265         bnx2x_link_report(bp);
2266
2267         if (IS_E1HMF(bp)) {
2268                 int func;
2269
2270                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2271                         if (vn == BP_E1HVN(bp))
2272                                 continue;
2273
2274                         func = ((vn << 1) | BP_PORT(bp));
2275
2276                         /* Set the attention towards other drivers
2277                            on the same port */
2278                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2279                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2280                 }
2281         }
2282
2283         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2284                 struct cmng_struct_per_port m_cmng_port;
2285                 u32 wsum;
2286                 int port = BP_PORT(bp);
2287
2288                 /* Init RATE SHAPING and FAIRNESS contexts */
2289                 wsum = bnx2x_calc_vn_wsum(bp);
2290                 bnx2x_init_port_minmax(bp, (int)wsum,
2291                                         bp->link_vars.line_speed,
2292                                         &m_cmng_port);
2293                 if (IS_E1HMF(bp))
2294                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2295                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2296                                         wsum, bp->link_vars.line_speed,
2297                                                      &m_cmng_port);
2298         }
2299 }
2300
2301 static void bnx2x__link_status_update(struct bnx2x *bp)
2302 {
2303         if (bp->state != BNX2X_STATE_OPEN)
2304                 return;
2305
2306         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2307
2308         if (bp->link_vars.link_up)
2309                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2310         else
2311                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2312
2313         /* indicate link status */
2314         bnx2x_link_report(bp);
2315 }
2316
2317 static void bnx2x_pmf_update(struct bnx2x *bp)
2318 {
2319         int port = BP_PORT(bp);
2320         u32 val;
2321
2322         bp->port.pmf = 1;
2323         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2324
2325         /* enable nig attention */
2326         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2327         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2328         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2329
2330         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2331 }
2332
2333 /* end of Link */
2334
2335 /* slow path */
2336
2337 /*
2338  * General service functions
2339  */
2340
2341 /* the slow path queue is odd since completions arrive on the fastpath ring */
2342 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2343                          u32 data_hi, u32 data_lo, int common)
2344 {
2345         int func = BP_FUNC(bp);
2346
2347         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2348            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2349            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2350            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2351            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2352
2353 #ifdef BNX2X_STOP_ON_ERROR
2354         if (unlikely(bp->panic))
2355                 return -EIO;
2356 #endif
2357
2358         spin_lock_bh(&bp->spq_lock);
2359
2360         if (!bp->spq_left) {
2361                 BNX2X_ERR("BUG! SPQ ring full!\n");
2362                 spin_unlock_bh(&bp->spq_lock);
2363                 bnx2x_panic();
2364                 return -EBUSY;
2365         }
2366
2367         /* CID needs port number to be encoded int it */
2368         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2369                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2370                                      HW_CID(bp, cid)));
2371         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2372         if (common)
2373                 bp->spq_prod_bd->hdr.type |=
2374                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2375
2376         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2377         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2378
2379         bp->spq_left--;
2380
2381         if (bp->spq_prod_bd == bp->spq_last_bd) {
2382                 bp->spq_prod_bd = bp->spq;
2383                 bp->spq_prod_idx = 0;
2384                 DP(NETIF_MSG_TIMER, "end of spq\n");
2385
2386         } else {
2387                 bp->spq_prod_bd++;
2388                 bp->spq_prod_idx++;
2389         }
2390
2391         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2392                bp->spq_prod_idx);
2393
2394         spin_unlock_bh(&bp->spq_lock);
2395         return 0;
2396 }
2397
2398 /* acquire split MCP access lock register */
2399 static int bnx2x_acquire_alr(struct bnx2x *bp)
2400 {
2401         u32 i, j, val;
2402         int rc = 0;
2403
2404         might_sleep();
2405         i = 100;
2406         for (j = 0; j < i*10; j++) {
2407                 val = (1UL << 31);
2408                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2410                 if (val & (1L << 31))
2411                         break;
2412
2413                 msleep(5);
2414         }
2415         if (!(val & (1L << 31))) {
2416                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2417                 rc = -EBUSY;
2418         }
2419
2420         return rc;
2421 }
2422
2423 /* release split MCP access lock register */
2424 static void bnx2x_release_alr(struct bnx2x *bp)
2425 {
2426         u32 val = 0;
2427
2428         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2429 }
2430
2431 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2432 {
2433         struct host_def_status_block *def_sb = bp->def_status_blk;
2434         u16 rc = 0;
2435
2436         barrier(); /* status block is written to by the chip */
2437         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2438                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2439                 rc |= 1;
2440         }
2441         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2442                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2443                 rc |= 2;
2444         }
2445         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2446                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2447                 rc |= 4;
2448         }
2449         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2450                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2451                 rc |= 8;
2452         }
2453         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2454                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2455                 rc |= 16;
2456         }
2457         return rc;
2458 }
2459
2460 /*
2461  * slow path service functions
2462  */
2463
2464 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2465 {
2466         int port = BP_PORT(bp);
2467         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2468                        COMMAND_REG_ATTN_BITS_SET);
2469         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2470                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2471         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2472                                        NIG_REG_MASK_INTERRUPT_PORT0;
2473         u32 aeu_mask;
2474
2475         if (bp->attn_state & asserted)
2476                 BNX2X_ERR("IGU ERROR\n");
2477
2478         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2479         aeu_mask = REG_RD(bp, aeu_addr);
2480
2481         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2482            aeu_mask, asserted);
2483         aeu_mask &= ~(asserted & 0xff);
2484         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2485
2486         REG_WR(bp, aeu_addr, aeu_mask);
2487         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2488
2489         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2490         bp->attn_state |= asserted;
2491         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2492
2493         if (asserted & ATTN_HARD_WIRED_MASK) {
2494                 if (asserted & ATTN_NIG_FOR_FUNC) {
2495
2496                         bnx2x_acquire_phy_lock(bp);
2497
2498                         /* save nig interrupt mask */
2499                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2500                         REG_WR(bp, nig_int_mask_addr, 0);
2501
2502                         bnx2x_link_attn(bp);
2503
2504                         /* handle unicore attn? */
2505                 }
2506                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2507                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2508
2509                 if (asserted & GPIO_2_FUNC)
2510                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2511
2512                 if (asserted & GPIO_3_FUNC)
2513                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2514
2515                 if (asserted & GPIO_4_FUNC)
2516                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2517
2518                 if (port == 0) {
2519                         if (asserted & ATTN_GENERAL_ATTN_1) {
2520                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2521                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2522                         }
2523                         if (asserted & ATTN_GENERAL_ATTN_2) {
2524                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2525                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2526                         }
2527                         if (asserted & ATTN_GENERAL_ATTN_3) {
2528                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2529                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2530                         }
2531                 } else {
2532                         if (asserted & ATTN_GENERAL_ATTN_4) {
2533                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2534                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2535                         }
2536                         if (asserted & ATTN_GENERAL_ATTN_5) {
2537                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2538                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2539                         }
2540                         if (asserted & ATTN_GENERAL_ATTN_6) {
2541                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2542                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2543                         }
2544                 }
2545
2546         } /* if hardwired */
2547
2548         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2549            asserted, hc_addr);
2550         REG_WR(bp, hc_addr, asserted);
2551
2552         /* now set back the mask */
2553         if (asserted & ATTN_NIG_FOR_FUNC) {
2554                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2555                 bnx2x_release_phy_lock(bp);
2556         }
2557 }
2558
2559 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2560 {
2561         int port = BP_PORT(bp);
2562         int reg_offset;
2563         u32 val;
2564
2565         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2566                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2567
2568         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2569
2570                 val = REG_RD(bp, reg_offset);
2571                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2572                 REG_WR(bp, reg_offset, val);
2573
2574                 BNX2X_ERR("SPIO5 hw attention\n");
2575
2576                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2577                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2578                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2579                         /* Fan failure attention */
2580
2581                         /* The PHY reset is controlled by GPIO 1 */
2582                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2583                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2584                         /* Low power mode is controlled by GPIO 2 */
2585                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2586                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2587                         /* mark the failure */
2588                         bp->link_params.ext_phy_config &=
2589                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2590                         bp->link_params.ext_phy_config |=
2591                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2592                         SHMEM_WR(bp,
2593                                  dev_info.port_hw_config[port].
2594                                                         external_phy_config,
2595                                  bp->link_params.ext_phy_config);
2596                         /* log the failure */
2597                         printk(KERN_ERR PFX "Fan Failure on Network"
2598                                " Controller %s has caused the driver to"
2599                                " shutdown the card to prevent permanent"
2600                                " damage.  Please contact Dell Support for"
2601                                " assistance\n", bp->dev->name);
2602                         break;
2603
2604                 default:
2605                         break;
2606                 }
2607         }
2608
2609         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2610
2611                 val = REG_RD(bp, reg_offset);
2612                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2613                 REG_WR(bp, reg_offset, val);
2614
2615                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2616                           (attn & HW_INTERRUT_ASSERT_SET_0));
2617                 bnx2x_panic();
2618         }
2619 }
2620
2621 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2622 {
2623         u32 val;
2624
2625         if (attn & BNX2X_DOORQ_ASSERT) {
2626
2627                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2628                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2629                 /* DORQ discard attention */
2630                 if (val & 0x2)
2631                         BNX2X_ERR("FATAL error from DORQ\n");
2632         }
2633
2634         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2635
2636                 int port = BP_PORT(bp);
2637                 int reg_offset;
2638
2639                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2640                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2641
2642                 val = REG_RD(bp, reg_offset);
2643                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2644                 REG_WR(bp, reg_offset, val);
2645
2646                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2647                           (attn & HW_INTERRUT_ASSERT_SET_1));
2648                 bnx2x_panic();
2649         }
2650 }
2651
2652 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2653 {
2654         u32 val;
2655
2656         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2657
2658                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2659                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2660                 /* CFC error attention */
2661                 if (val & 0x2)
2662                         BNX2X_ERR("FATAL error from CFC\n");
2663         }
2664
2665         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2666
2667                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2668                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2669                 /* RQ_USDMDP_FIFO_OVERFLOW */
2670                 if (val & 0x18000)
2671                         BNX2X_ERR("FATAL error from PXP\n");
2672         }
2673
2674         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2675
2676                 int port = BP_PORT(bp);
2677                 int reg_offset;
2678
2679                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2680                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2681
2682                 val = REG_RD(bp, reg_offset);
2683                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2684                 REG_WR(bp, reg_offset, val);
2685
2686                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2687                           (attn & HW_INTERRUT_ASSERT_SET_2));
2688                 bnx2x_panic();
2689         }
2690 }
2691
2692 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2693 {
2694         u32 val;
2695
2696         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2697
2698                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2699                         int func = BP_FUNC(bp);
2700
2701                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2702                         bnx2x__link_status_update(bp);
2703                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2704                                                         DRV_STATUS_PMF)
2705                                 bnx2x_pmf_update(bp);
2706
2707                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2708
2709                         BNX2X_ERR("MC assert!\n");
2710                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2711                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2712                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2713                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2714                         bnx2x_panic();
2715
2716                 } else if (attn & BNX2X_MCP_ASSERT) {
2717
2718                         BNX2X_ERR("MCP assert!\n");
2719                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2720                         bnx2x_fw_dump(bp);
2721
2722                 } else
2723                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2724         }
2725
2726         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2727                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2728                 if (attn & BNX2X_GRC_TIMEOUT) {
2729                         val = CHIP_IS_E1H(bp) ?
2730                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2731                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2732                 }
2733                 if (attn & BNX2X_GRC_RSV) {
2734                         val = CHIP_IS_E1H(bp) ?
2735                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2736                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2737                 }
2738                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2739         }
2740 }
2741
2742 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2743 {
2744         struct attn_route attn;
2745         struct attn_route group_mask;
2746         int port = BP_PORT(bp);
2747         int index;
2748         u32 reg_addr;
2749         u32 val;
2750         u32 aeu_mask;
2751
2752         /* need to take HW lock because MCP or other port might also
2753            try to handle this event */
2754         bnx2x_acquire_alr(bp);
2755
2756         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2757         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2758         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2759         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2760         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2761            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2762
2763         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2764                 if (deasserted & (1 << index)) {
2765                         group_mask = bp->attn_group[index];
2766
2767                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2768                            index, group_mask.sig[0], group_mask.sig[1],
2769                            group_mask.sig[2], group_mask.sig[3]);
2770
2771                         bnx2x_attn_int_deasserted3(bp,
2772                                         attn.sig[3] & group_mask.sig[3]);
2773                         bnx2x_attn_int_deasserted1(bp,
2774                                         attn.sig[1] & group_mask.sig[1]);
2775                         bnx2x_attn_int_deasserted2(bp,
2776                                         attn.sig[2] & group_mask.sig[2]);
2777                         bnx2x_attn_int_deasserted0(bp,
2778                                         attn.sig[0] & group_mask.sig[0]);
2779
2780                         if ((attn.sig[0] & group_mask.sig[0] &
2781                                                 HW_PRTY_ASSERT_SET_0) ||
2782                             (attn.sig[1] & group_mask.sig[1] &
2783                                                 HW_PRTY_ASSERT_SET_1) ||
2784                             (attn.sig[2] & group_mask.sig[2] &
2785                                                 HW_PRTY_ASSERT_SET_2))
2786                                 BNX2X_ERR("FATAL HW block parity attention\n");
2787                 }
2788         }
2789
2790         bnx2x_release_alr(bp);
2791
2792         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2793
2794         val = ~deasserted;
2795         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2796            val, reg_addr);
2797         REG_WR(bp, reg_addr, val);
2798
2799         if (~bp->attn_state & deasserted)
2800                 BNX2X_ERR("IGU ERROR\n");
2801
2802         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2803                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2804
2805         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2806         aeu_mask = REG_RD(bp, reg_addr);
2807
2808         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2809            aeu_mask, deasserted);
2810         aeu_mask |= (deasserted & 0xff);
2811         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2812
2813         REG_WR(bp, reg_addr, aeu_mask);
2814         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2815
2816         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2817         bp->attn_state &= ~deasserted;
2818         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2819 }
2820
2821 static void bnx2x_attn_int(struct bnx2x *bp)
2822 {
2823         /* read local copy of bits */
2824         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2825                                                                 attn_bits);
2826         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2827                                                                 attn_bits_ack);
2828         u32 attn_state = bp->attn_state;
2829
2830         /* look for changed bits */
2831         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2832         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2833
2834         DP(NETIF_MSG_HW,
2835            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2836            attn_bits, attn_ack, asserted, deasserted);
2837
2838         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2839                 BNX2X_ERR("BAD attention state\n");
2840
2841         /* handle bits that were raised */
2842         if (asserted)
2843                 bnx2x_attn_int_asserted(bp, asserted);
2844
2845         if (deasserted)
2846                 bnx2x_attn_int_deasserted(bp, deasserted);
2847 }
2848
2849 static void bnx2x_sp_task(struct work_struct *work)
2850 {
2851         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2852         u16 status;
2853
2854
2855         /* Return here if interrupt is disabled */
2856         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2857                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2858                 return;
2859         }
2860
2861         status = bnx2x_update_dsb_idx(bp);
2862 /*      if (status == 0)                                     */
2863 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2864
2865         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2866
2867         /* HW attentions */
2868         if (status & 0x1)
2869                 bnx2x_attn_int(bp);
2870
2871         /* CStorm events: query_stats, port delete ramrod */
2872         if (status & 0x2)
2873                 bp->stats_pending = 0;
2874
2875         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2876                      IGU_INT_NOP, 1);
2877         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2878                      IGU_INT_NOP, 1);
2879         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2880                      IGU_INT_NOP, 1);
2881         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2882                      IGU_INT_NOP, 1);
2883         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2884                      IGU_INT_ENABLE, 1);
2885
2886 }
2887
2888 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2889 {
2890         struct net_device *dev = dev_instance;
2891         struct bnx2x *bp = netdev_priv(dev);
2892
2893         /* Return here if interrupt is disabled */
2894         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2895                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2896                 return IRQ_HANDLED;
2897         }
2898
2899         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2900
2901 #ifdef BNX2X_STOP_ON_ERROR
2902         if (unlikely(bp->panic))
2903                 return IRQ_HANDLED;
2904 #endif
2905
2906         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2907
2908         return IRQ_HANDLED;
2909 }
2910
2911 /* end of slow path */
2912
2913 /* Statistics */
2914
2915 /****************************************************************************
2916 * Macros
2917 ****************************************************************************/
2918
2919 /* sum[hi:lo] += add[hi:lo] */
2920 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2921         do { \
2922                 s_lo += a_lo; \
2923                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2924         } while (0)
2925
2926 /* difference = minuend - subtrahend */
2927 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2928         do { \
2929                 if (m_lo < s_lo) { \
2930                         /* underflow */ \
2931                         d_hi = m_hi - s_hi; \
2932                         if (d_hi > 0) { \
2933                                 /* we can 'loan' 1 */ \
2934                                 d_hi--; \
2935                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2936                         } else { \
2937                                 /* m_hi <= s_hi */ \
2938                                 d_hi = 0; \
2939                                 d_lo = 0; \
2940                         } \
2941                 } else { \
2942                         /* m_lo >= s_lo */ \
2943                         if (m_hi < s_hi) { \
2944                                 d_hi = 0; \
2945                                 d_lo = 0; \
2946                         } else { \
2947                                 /* m_hi >= s_hi */ \
2948                                 d_hi = m_hi - s_hi; \
2949                                 d_lo = m_lo - s_lo; \
2950                         } \
2951                 } \
2952         } while (0)
2953
2954 #define UPDATE_STAT64(s, t) \
2955         do { \
2956                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2957                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2958                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2959                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2960                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2961                        pstats->mac_stx[1].t##_lo, diff.lo); \
2962         } while (0)
2963
2964 #define UPDATE_STAT64_NIG(s, t) \
2965         do { \
2966                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2967                         diff.lo, new->s##_lo, old->s##_lo); \
2968                 ADD_64(estats->t##_hi, diff.hi, \
2969                        estats->t##_lo, diff.lo); \
2970         } while (0)
2971
2972 /* sum[hi:lo] += add */
2973 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2974         do { \
2975                 s_lo += a; \
2976                 s_hi += (s_lo < a) ? 1 : 0; \
2977         } while (0)
2978
2979 #define UPDATE_EXTEND_STAT(s) \
2980         do { \
2981                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2982                               pstats->mac_stx[1].s##_lo, \
2983                               new->s); \
2984         } while (0)
2985
2986 #define UPDATE_EXTEND_TSTAT(s, t) \
2987         do { \
2988                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2989                 old_tclient->s = le32_to_cpu(tclient->s); \
2990                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2991         } while (0)
2992
2993 #define UPDATE_EXTEND_XSTAT(s, t) \
2994         do { \
2995                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2996                 old_xclient->s = le32_to_cpu(xclient->s); \
2997                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2998         } while (0)
2999
3000 /*
3001  * General service functions
3002  */
3003
3004 static inline long bnx2x_hilo(u32 *hiref)
3005 {
3006         u32 lo = *(hiref + 1);
3007 #if (BITS_PER_LONG == 64)
3008         u32 hi = *hiref;
3009
3010         return HILO_U64(hi, lo);
3011 #else
3012         return lo;
3013 #endif
3014 }
3015
3016 /*
3017  * Init service functions
3018  */
3019
3020 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3021 {
3022         if (!bp->stats_pending) {
3023                 struct eth_query_ramrod_data ramrod_data = {0};
3024                 int rc;
3025
3026                 ramrod_data.drv_counter = bp->stats_counter++;
3027                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3028                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3029
3030                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3031                                    ((u32 *)&ramrod_data)[1],
3032                                    ((u32 *)&ramrod_data)[0], 0);
3033                 if (rc == 0) {
3034                         /* stats ramrod has it's own slot on the spq */
3035                         bp->spq_left++;
3036                         bp->stats_pending = 1;
3037                 }
3038         }
3039 }
3040
3041 static void bnx2x_stats_init(struct bnx2x *bp)
3042 {
3043         int port = BP_PORT(bp);
3044
3045         bp->executer_idx = 0;
3046         bp->stats_counter = 0;
3047
3048         /* port stats */
3049         if (!BP_NOMCP(bp))
3050                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3051         else
3052                 bp->port.port_stx = 0;
3053         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3054
3055         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3056         bp->port.old_nig_stats.brb_discard =
3057                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3058         bp->port.old_nig_stats.brb_truncate =
3059                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3060         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3061                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3062         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3063                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3064
3065         /* function stats */
3066         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3067         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3068         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3069         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3070
3071         bp->stats_state = STATS_STATE_DISABLED;
3072         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3073                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3074 }
3075
3076 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3077 {
3078         struct dmae_command *dmae = &bp->stats_dmae;
3079         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3080
3081         *stats_comp = DMAE_COMP_VAL;
3082
3083         /* loader */
3084         if (bp->executer_idx) {
3085                 int loader_idx = PMF_DMAE_C(bp);
3086
3087                 memset(dmae, 0, sizeof(struct dmae_command));
3088
3089                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3090                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3091                                 DMAE_CMD_DST_RESET |
3092 #ifdef __BIG_ENDIAN
3093                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3094 #else
3095                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3096 #endif
3097                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3098                                                DMAE_CMD_PORT_0) |
3099                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3100                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3101                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3102                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3103                                      sizeof(struct dmae_command) *
3104                                      (loader_idx + 1)) >> 2;
3105                 dmae->dst_addr_hi = 0;
3106                 dmae->len = sizeof(struct dmae_command) >> 2;
3107                 if (CHIP_IS_E1(bp))
3108                         dmae->len--;
3109                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3110                 dmae->comp_addr_hi = 0;
3111                 dmae->comp_val = 1;
3112
3113                 *stats_comp = 0;
3114                 bnx2x_post_dmae(bp, dmae, loader_idx);
3115
3116         } else if (bp->func_stx) {
3117                 *stats_comp = 0;
3118                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3119         }
3120 }
3121
3122 static int bnx2x_stats_comp(struct bnx2x *bp)
3123 {
3124         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3125         int cnt = 10;
3126
3127         might_sleep();
3128         while (*stats_comp != DMAE_COMP_VAL) {
3129                 if (!cnt) {
3130                         BNX2X_ERR("timeout waiting for stats finished\n");
3131                         break;
3132                 }
3133                 cnt--;
3134                 msleep(1);
3135         }
3136         return 1;
3137 }
3138
3139 /*
3140  * Statistics service functions
3141  */
3142
3143 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3144 {
3145         struct dmae_command *dmae;
3146         u32 opcode;
3147         int loader_idx = PMF_DMAE_C(bp);
3148         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3149
3150         /* sanity */
3151         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3152                 BNX2X_ERR("BUG!\n");
3153                 return;
3154         }
3155
3156         bp->executer_idx = 0;
3157
3158         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3159                   DMAE_CMD_C_ENABLE |
3160                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3161 #ifdef __BIG_ENDIAN
3162                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3163 #else
3164                   DMAE_CMD_ENDIANITY_DW_SWAP |
3165 #endif
3166                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3167                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3168
3169         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3170         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3171         dmae->src_addr_lo = bp->port.port_stx >> 2;
3172         dmae->src_addr_hi = 0;
3173         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3174         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3175         dmae->len = DMAE_LEN32_RD_MAX;
3176         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3177         dmae->comp_addr_hi = 0;
3178         dmae->comp_val = 1;
3179
3180         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3182         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3183         dmae->src_addr_hi = 0;
3184         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3185                                    DMAE_LEN32_RD_MAX * 4);
3186         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3187                                    DMAE_LEN32_RD_MAX * 4);
3188         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3189         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3190         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3191         dmae->comp_val = DMAE_COMP_VAL;
3192
3193         *stats_comp = 0;
3194         bnx2x_hw_stats_post(bp);
3195         bnx2x_stats_comp(bp);
3196 }
3197
3198 static void bnx2x_port_stats_init(struct bnx2x *bp)
3199 {
3200         struct dmae_command *dmae;
3201         int port = BP_PORT(bp);
3202         int vn = BP_E1HVN(bp);
3203         u32 opcode;
3204         int loader_idx = PMF_DMAE_C(bp);
3205         u32 mac_addr;
3206         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3207
3208         /* sanity */
3209         if (!bp->link_vars.link_up || !bp->port.pmf) {
3210                 BNX2X_ERR("BUG!\n");
3211                 return;
3212         }
3213
3214         bp->executer_idx = 0;
3215
3216         /* MCP */
3217         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3218                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3219                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3220 #ifdef __BIG_ENDIAN
3221                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3222 #else
3223                   DMAE_CMD_ENDIANITY_DW_SWAP |
3224 #endif
3225                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3226                   (vn << DMAE_CMD_E1HVN_SHIFT));
3227
3228         if (bp->port.port_stx) {
3229
3230                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3231                 dmae->opcode = opcode;
3232                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3233                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3234                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3235                 dmae->dst_addr_hi = 0;
3236                 dmae->len = sizeof(struct host_port_stats) >> 2;
3237                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3238                 dmae->comp_addr_hi = 0;
3239                 dmae->comp_val = 1;
3240         }
3241
3242         if (bp->func_stx) {
3243
3244                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3245                 dmae->opcode = opcode;
3246                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3247                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3248                 dmae->dst_addr_lo = bp->func_stx >> 2;
3249                 dmae->dst_addr_hi = 0;
3250                 dmae->len = sizeof(struct host_func_stats) >> 2;
3251                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252                 dmae->comp_addr_hi = 0;
3253                 dmae->comp_val = 1;
3254         }
3255
3256         /* MAC */
3257         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3258                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3259                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3260 #ifdef __BIG_ENDIAN
3261                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3262 #else
3263                   DMAE_CMD_ENDIANITY_DW_SWAP |
3264 #endif
3265                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3266                   (vn << DMAE_CMD_E1HVN_SHIFT));
3267
3268         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3269
3270                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3271                                    NIG_REG_INGRESS_BMAC0_MEM);
3272
3273                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3274                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3275                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3276                 dmae->opcode = opcode;
3277                 dmae->src_addr_lo = (mac_addr +
3278                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3279                 dmae->src_addr_hi = 0;
3280                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3281                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3282                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3283                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3284                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285                 dmae->comp_addr_hi = 0;
3286                 dmae->comp_val = 1;
3287
3288                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3289                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3290                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3291                 dmae->opcode = opcode;
3292                 dmae->src_addr_lo = (mac_addr +
3293                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3294                 dmae->src_addr_hi = 0;
3295                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3296                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3297                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3298                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3299                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3300                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3301                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302                 dmae->comp_addr_hi = 0;
3303                 dmae->comp_val = 1;
3304
3305         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3306
3307                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3308
3309                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3310                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3311                 dmae->opcode = opcode;
3312                 dmae->src_addr_lo = (mac_addr +
3313                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3314                 dmae->src_addr_hi = 0;
3315                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3316                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3317                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3318                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3319                 dmae->comp_addr_hi = 0;
3320                 dmae->comp_val = 1;
3321
3322                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3323                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324                 dmae->opcode = opcode;
3325                 dmae->src_addr_lo = (mac_addr +
3326                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3327                 dmae->src_addr_hi = 0;
3328                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3329                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3330                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3331                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3332                 dmae->len = 1;
3333                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3334                 dmae->comp_addr_hi = 0;
3335                 dmae->comp_val = 1;
3336
3337                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3338                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339                 dmae->opcode = opcode;
3340                 dmae->src_addr_lo = (mac_addr +
3341                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3342                 dmae->src_addr_hi = 0;
3343                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3344                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3345                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3346                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3347                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3348                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3349                 dmae->comp_addr_hi = 0;
3350                 dmae->comp_val = 1;
3351         }
3352
3353         /* NIG */
3354         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355         dmae->opcode = opcode;
3356         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3357                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3358         dmae->src_addr_hi = 0;
3359         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3360         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3361         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3362         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363         dmae->comp_addr_hi = 0;
3364         dmae->comp_val = 1;
3365
3366         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367         dmae->opcode = opcode;
3368         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3369                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3370         dmae->src_addr_hi = 0;
3371         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3372                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3373         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3374                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3375         dmae->len = (2*sizeof(u32)) >> 2;
3376         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3377         dmae->comp_addr_hi = 0;
3378         dmae->comp_val = 1;
3379
3380         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3381         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3382                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3383                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3384 #ifdef __BIG_ENDIAN
3385                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3386 #else
3387                         DMAE_CMD_ENDIANITY_DW_SWAP |
3388 #endif
3389                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3390                         (vn << DMAE_CMD_E1HVN_SHIFT));
3391         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3392                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3393         dmae->src_addr_hi = 0;
3394         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3395                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3396         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3397                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3398         dmae->len = (2*sizeof(u32)) >> 2;
3399         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3400         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3401         dmae->comp_val = DMAE_COMP_VAL;
3402
3403         *stats_comp = 0;
3404 }
3405
3406 static void bnx2x_func_stats_init(struct bnx2x *bp)
3407 {
3408         struct dmae_command *dmae = &bp->stats_dmae;
3409         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3410
3411         /* sanity */
3412         if (!bp->func_stx) {
3413                 BNX2X_ERR("BUG!\n");
3414                 return;
3415         }
3416
3417         bp->executer_idx = 0;
3418         memset(dmae, 0, sizeof(struct dmae_command));
3419
3420         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3421                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3422                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3423 #ifdef __BIG_ENDIAN
3424                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3425 #else
3426                         DMAE_CMD_ENDIANITY_DW_SWAP |
3427 #endif
3428                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3429                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3430         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3431         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3432         dmae->dst_addr_lo = bp->func_stx >> 2;
3433         dmae->dst_addr_hi = 0;
3434         dmae->len = sizeof(struct host_func_stats) >> 2;
3435         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3436         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3437         dmae->comp_val = DMAE_COMP_VAL;
3438
3439         *stats_comp = 0;
3440 }
3441
3442 static void bnx2x_stats_start(struct bnx2x *bp)
3443 {
3444         if (bp->port.pmf)
3445                 bnx2x_port_stats_init(bp);
3446
3447         else if (bp->func_stx)
3448                 bnx2x_func_stats_init(bp);
3449
3450         bnx2x_hw_stats_post(bp);
3451         bnx2x_storm_stats_post(bp);
3452 }
3453
3454 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3455 {
3456         bnx2x_stats_comp(bp);
3457         bnx2x_stats_pmf_update(bp);
3458         bnx2x_stats_start(bp);
3459 }
3460
3461 static void bnx2x_stats_restart(struct bnx2x *bp)
3462 {
3463         bnx2x_stats_comp(bp);
3464         bnx2x_stats_start(bp);
3465 }
3466
3467 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3468 {
3469         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3470         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3471         struct regpair diff;
3472
3473         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3474         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3475         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3476         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3477         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3478         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3479         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3480         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3481         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3482         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3483         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3484         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3485         UPDATE_STAT64(tx_stat_gt127,
3486                                 tx_stat_etherstatspkts65octetsto127octets);
3487         UPDATE_STAT64(tx_stat_gt255,
3488                                 tx_stat_etherstatspkts128octetsto255octets);
3489         UPDATE_STAT64(tx_stat_gt511,
3490                                 tx_stat_etherstatspkts256octetsto511octets);
3491         UPDATE_STAT64(tx_stat_gt1023,
3492                                 tx_stat_etherstatspkts512octetsto1023octets);
3493         UPDATE_STAT64(tx_stat_gt1518,
3494                                 tx_stat_etherstatspkts1024octetsto1522octets);
3495         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3496         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3497         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3498         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3499         UPDATE_STAT64(tx_stat_gterr,
3500                                 tx_stat_dot3statsinternalmactransmiterrors);
3501         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3502 }
3503
3504 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3505 {
3506         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3507         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3508
3509         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3510         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3511         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3512         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3513         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3514         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3515         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3516         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3517         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3518         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3519         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3520         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3521         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3522         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3523         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3524         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3525         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3526         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3527         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3528         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3529         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3530         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3531         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3532         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3533         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3534         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3535         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3536         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3537         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3538         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3539         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3540 }
3541
3542 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3543 {
3544         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3545         struct nig_stats *old = &(bp->port.old_nig_stats);
3546         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3547         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3548         struct regpair diff;
3549
3550         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3551                 bnx2x_bmac_stats_update(bp);
3552
3553         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3554                 bnx2x_emac_stats_update(bp);
3555
3556         else { /* unreached */
3557                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3558                 return -1;
3559         }
3560
3561         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3562                       new->brb_discard - old->brb_discard);
3563         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3564                       new->brb_truncate - old->brb_truncate);
3565
3566         UPDATE_STAT64_NIG(egress_mac_pkt0,
3567                                         etherstatspkts1024octetsto1522octets);
3568         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3569
3570         memcpy(old, new, sizeof(struct nig_stats));
3571
3572         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3573                sizeof(struct mac_stx));
3574         estats->brb_drop_hi = pstats->brb_drop_hi;
3575         estats->brb_drop_lo = pstats->brb_drop_lo;
3576
3577         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3578
3579         return 0;
3580 }
3581
3582 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3583 {
3584         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3585         int cl_id = BP_CL_ID(bp);
3586         struct tstorm_per_port_stats *tport =
3587                                 &stats->tstorm_common.port_statistics;
3588         struct tstorm_per_client_stats *tclient =
3589                         &stats->tstorm_common.client_statistics[cl_id];
3590         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3591         struct xstorm_per_client_stats *xclient =
3592                         &stats->xstorm_common.client_statistics[cl_id];
3593         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3594         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3595         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3596         u32 diff;
3597
3598         /* are storm stats valid? */
3599         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3600                                                         bp->stats_counter) {
3601                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3602                    "  tstorm counter (%d) != stats_counter (%d)\n",
3603                    tclient->stats_counter, bp->stats_counter);
3604                 return -1;
3605         }
3606         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3607                                                         bp->stats_counter) {
3608                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3609                    "  xstorm counter (%d) != stats_counter (%d)\n",
3610                    xclient->stats_counter, bp->stats_counter);
3611                 return -2;
3612         }
3613
3614         fstats->total_bytes_received_hi =
3615         fstats->valid_bytes_received_hi =
3616                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3617         fstats->total_bytes_received_lo =
3618         fstats->valid_bytes_received_lo =
3619                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3620
3621         estats->error_bytes_received_hi =
3622                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3623         estats->error_bytes_received_lo =
3624                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3625         ADD_64(estats->error_bytes_received_hi,
3626                estats->rx_stat_ifhcinbadoctets_hi,
3627                estats->error_bytes_received_lo,
3628                estats->rx_stat_ifhcinbadoctets_lo);
3629
3630         ADD_64(fstats->total_bytes_received_hi,
3631                estats->error_bytes_received_hi,
3632                fstats->total_bytes_received_lo,
3633                estats->error_bytes_received_lo);
3634
3635         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3636         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3637                                 total_multicast_packets_received);
3638         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3639                                 total_broadcast_packets_received);
3640
3641         fstats->total_bytes_transmitted_hi =
3642                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3643         fstats->total_bytes_transmitted_lo =
3644                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3645
3646         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3647                                 total_unicast_packets_transmitted);
3648         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3649                                 total_multicast_packets_transmitted);
3650         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3651                                 total_broadcast_packets_transmitted);
3652
3653         memcpy(estats, &(fstats->total_bytes_received_hi),
3654                sizeof(struct host_func_stats) - 2*sizeof(u32));
3655
3656         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3657         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3658         estats->brb_truncate_discard =
3659                                 le32_to_cpu(tport->brb_truncate_discard);
3660         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3661
3662         old_tclient->rcv_unicast_bytes.hi =
3663                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3664         old_tclient->rcv_unicast_bytes.lo =
3665                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3666         old_tclient->rcv_broadcast_bytes.hi =
3667                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3668         old_tclient->rcv_broadcast_bytes.lo =
3669                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3670         old_tclient->rcv_multicast_bytes.hi =
3671                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3672         old_tclient->rcv_multicast_bytes.lo =
3673                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3674         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3675
3676         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3677         old_tclient->packets_too_big_discard =
3678                                 le32_to_cpu(tclient->packets_too_big_discard);
3679         estats->no_buff_discard =
3680         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3681         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3682
3683         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3684         old_xclient->unicast_bytes_sent.hi =
3685                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3686         old_xclient->unicast_bytes_sent.lo =
3687                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3688         old_xclient->multicast_bytes_sent.hi =
3689                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3690         old_xclient->multicast_bytes_sent.lo =
3691                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3692         old_xclient->broadcast_bytes_sent.hi =
3693                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3694         old_xclient->broadcast_bytes_sent.lo =
3695                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3696
3697         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3698
3699         return 0;
3700 }
3701
3702 static void bnx2x_net_stats_update(struct bnx2x *bp)
3703 {
3704         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3705         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3706         struct net_device_stats *nstats = &bp->dev->stats;
3707
3708         nstats->rx_packets =
3709                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3710                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3711                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3712
3713         nstats->tx_packets =
3714                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3715                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3716                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3717
3718         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3719
3720         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3721
3722         nstats->rx_dropped = old_tclient->checksum_discard +
3723                              estats->mac_discard;
3724         nstats->tx_dropped = 0;
3725
3726         nstats->multicast =
3727                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3728
3729         nstats->collisions =
3730                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3731                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3732                         estats->tx_stat_dot3statslatecollisions_lo +
3733                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3734
3735         estats->jabber_packets_received =
3736                                 old_tclient->packets_too_big_discard +
3737                                 estats->rx_stat_dot3statsframestoolong_lo;
3738
3739         nstats->rx_length_errors =
3740                                 estats->rx_stat_etherstatsundersizepkts_lo +
3741                                 estats->jabber_packets_received;
3742         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3743         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3744         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3745         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3746         nstats->rx_missed_errors = estats->xxoverflow_discard;
3747
3748         nstats->rx_errors = nstats->rx_length_errors +
3749                             nstats->rx_over_errors +
3750                             nstats->rx_crc_errors +
3751                             nstats->rx_frame_errors +
3752                             nstats->rx_fifo_errors +
3753                             nstats->rx_missed_errors;
3754
3755         nstats->tx_aborted_errors =
3756                         estats->tx_stat_dot3statslatecollisions_lo +
3757                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3758         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3759         nstats->tx_fifo_errors = 0;
3760         nstats->tx_heartbeat_errors = 0;
3761         nstats->tx_window_errors = 0;
3762
3763         nstats->tx_errors = nstats->tx_aborted_errors +
3764                             nstats->tx_carrier_errors;
3765 }
3766
3767 static void bnx2x_stats_update(struct bnx2x *bp)
3768 {
3769         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3770         int update = 0;
3771
3772         if (*stats_comp != DMAE_COMP_VAL)
3773                 return;
3774
3775         if (bp->port.pmf)
3776                 update = (bnx2x_hw_stats_update(bp) == 0);
3777
3778         update |= (bnx2x_storm_stats_update(bp) == 0);
3779
3780         if (update)
3781                 bnx2x_net_stats_update(bp);
3782
3783         else {
3784                 if (bp->stats_pending) {
3785                         bp->stats_pending++;
3786                         if (bp->stats_pending == 3) {
3787                                 BNX2X_ERR("stats not updated for 3 times\n");
3788                                 bnx2x_panic();
3789                                 return;
3790                         }
3791                 }
3792         }
3793
3794         if (bp->msglevel & NETIF_MSG_TIMER) {
3795                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3796                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3797                 struct net_device_stats *nstats = &bp->dev->stats;
3798                 int i;
3799
3800                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3801                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3802                                   "  tx pkt (%lx)\n",
3803                        bnx2x_tx_avail(bp->fp),
3804                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3805                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3806                                   "  rx pkt (%lx)\n",
3807                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3808                              bp->fp->rx_comp_cons),
3809                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3810                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3811                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3812                        estats->driver_xoff, estats->brb_drop_lo);
3813                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3814                         "packets_too_big_discard %u  no_buff_discard %u  "
3815                         "mac_discard %u  mac_filter_discard %u  "
3816                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3817                         "ttl0_discard %u\n",
3818                        old_tclient->checksum_discard,
3819                        old_tclient->packets_too_big_discard,
3820                        old_tclient->no_buff_discard, estats->mac_discard,
3821                        estats->mac_filter_discard, estats->xxoverflow_discard,
3822                        estats->brb_truncate_discard,
3823                        old_tclient->ttl0_discard);
3824
3825                 for_each_queue(bp, i) {
3826                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3827                                bnx2x_fp(bp, i, tx_pkt),
3828                                bnx2x_fp(bp, i, rx_pkt),
3829                                bnx2x_fp(bp, i, rx_calls));
3830                 }
3831         }
3832
3833         bnx2x_hw_stats_post(bp);
3834         bnx2x_storm_stats_post(bp);
3835 }
3836
3837 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3838 {
3839         struct dmae_command *dmae;
3840         u32 opcode;
3841         int loader_idx = PMF_DMAE_C(bp);
3842         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3843
3844         bp->executer_idx = 0;
3845
3846         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3847                   DMAE_CMD_C_ENABLE |
3848                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3849 #ifdef __BIG_ENDIAN
3850                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3851 #else
3852                   DMAE_CMD_ENDIANITY_DW_SWAP |
3853 #endif
3854                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3855                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3856
3857         if (bp->port.port_stx) {
3858
3859                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860                 if (bp->func_stx)
3861                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3862                 else
3863                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3864                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3865                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3866                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3867                 dmae->dst_addr_hi = 0;
3868                 dmae->len = sizeof(struct host_port_stats) >> 2;
3869                 if (bp->func_stx) {
3870                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3871                         dmae->comp_addr_hi = 0;
3872                         dmae->comp_val = 1;
3873                 } else {
3874                         dmae->comp_addr_lo =
3875                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3876                         dmae->comp_addr_hi =
3877                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3878                         dmae->comp_val = DMAE_COMP_VAL;
3879
3880                         *stats_comp = 0;
3881                 }
3882         }
3883
3884         if (bp->func_stx) {
3885
3886                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3887                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3888                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3889                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3890                 dmae->dst_addr_lo = bp->func_stx >> 2;
3891                 dmae->dst_addr_hi = 0;
3892                 dmae->len = sizeof(struct host_func_stats) >> 2;
3893                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3894                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3895                 dmae->comp_val = DMAE_COMP_VAL;
3896
3897                 *stats_comp = 0;
3898         }
3899 }
3900
3901 static void bnx2x_stats_stop(struct bnx2x *bp)
3902 {
3903         int update = 0;
3904
3905         bnx2x_stats_comp(bp);
3906
3907         if (bp->port.pmf)
3908                 update = (bnx2x_hw_stats_update(bp) == 0);
3909
3910         update |= (bnx2x_storm_stats_update(bp) == 0);
3911
3912         if (update) {
3913                 bnx2x_net_stats_update(bp);
3914
3915                 if (bp->port.pmf)
3916                         bnx2x_port_stats_stop(bp);
3917
3918                 bnx2x_hw_stats_post(bp);
3919                 bnx2x_stats_comp(bp);
3920         }
3921 }
3922
3923 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3924 {
3925 }
3926
3927 static const struct {
3928         void (*action)(struct bnx2x *bp);
3929         enum bnx2x_stats_state next_state;
3930 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3931 /* state        event   */
3932 {
3933 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3934 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3935 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3936 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3937 },
3938 {
3939 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3940 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3941 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3942 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3943 }
3944 };
3945
3946 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3947 {
3948         enum bnx2x_stats_state state = bp->stats_state;
3949
3950         bnx2x_stats_stm[state][event].action(bp);
3951         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3952
3953         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3954                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3955                    state, event, bp->stats_state);
3956 }
3957
3958 static void bnx2x_timer(unsigned long data)
3959 {
3960         struct bnx2x *bp = (struct bnx2x *) data;
3961
3962         if (!netif_running(bp->dev))
3963                 return;
3964
3965         if (atomic_read(&bp->intr_sem) != 0)
3966                 goto timer_restart;
3967
3968         if (poll) {
3969                 struct bnx2x_fastpath *fp = &bp->fp[0];
3970                 int rc;
3971
3972                 bnx2x_tx_int(fp, 1000);
3973                 rc = bnx2x_rx_int(fp, 1000);
3974         }
3975
3976         if (!BP_NOMCP(bp)) {
3977                 int func = BP_FUNC(bp);
3978                 u32 drv_pulse;
3979                 u32 mcp_pulse;
3980
3981                 ++bp->fw_drv_pulse_wr_seq;
3982                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3983                 /* TBD - add SYSTEM_TIME */
3984                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3985                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3986
3987                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3988                              MCP_PULSE_SEQ_MASK);
3989                 /* The delta between driver pulse and mcp response
3990                  * should be 1 (before mcp response) or 0 (after mcp response)
3991                  */
3992                 if ((drv_pulse != mcp_pulse) &&
3993                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3994                         /* someone lost a heartbeat... */
3995                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3996                                   drv_pulse, mcp_pulse);
3997                 }
3998         }
3999
4000         if ((bp->state == BNX2X_STATE_OPEN) ||
4001             (bp->state == BNX2X_STATE_DISABLED))
4002                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4003
4004 timer_restart:
4005         mod_timer(&bp->timer, jiffies + bp->current_interval);
4006 }
4007
4008 /* end of Statistics */
4009
4010 /* nic init */
4011
4012 /*
4013  * nic init service functions
4014  */
4015
4016 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4017 {
4018         int port = BP_PORT(bp);
4019
4020         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4021                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4022                         sizeof(struct ustorm_status_block)/4);
4023         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4024                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4025                         sizeof(struct cstorm_status_block)/4);
4026 }
4027
4028 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4029                           dma_addr_t mapping, int sb_id)
4030 {
4031         int port = BP_PORT(bp);
4032         int func = BP_FUNC(bp);
4033         int index;
4034         u64 section;
4035
4036         /* USTORM */
4037         section = ((u64)mapping) + offsetof(struct host_status_block,
4038                                             u_status_block);
4039         sb->u_status_block.status_block_id = sb_id;
4040
4041         REG_WR(bp, BAR_USTRORM_INTMEM +
4042                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4043         REG_WR(bp, BAR_USTRORM_INTMEM +
4044                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4045                U64_HI(section));
4046         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4047                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4048
4049         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4050                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4051                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4052
4053         /* CSTORM */
4054         section = ((u64)mapping) + offsetof(struct host_status_block,
4055                                             c_status_block);
4056         sb->c_status_block.status_block_id = sb_id;
4057
4058         REG_WR(bp, BAR_CSTRORM_INTMEM +
4059                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4060         REG_WR(bp, BAR_CSTRORM_INTMEM +
4061                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4062                U64_HI(section));
4063         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4064                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4065
4066         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4067                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4068                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4069
4070         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4071 }
4072
4073 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4074 {
4075         int func = BP_FUNC(bp);
4076
4077         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4078                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4079                         sizeof(struct ustorm_def_status_block)/4);
4080         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4081                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4082                         sizeof(struct cstorm_def_status_block)/4);
4083         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4084                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4085                         sizeof(struct xstorm_def_status_block)/4);
4086         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4087                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4088                         sizeof(struct tstorm_def_status_block)/4);
4089 }
4090
4091 static void bnx2x_init_def_sb(struct bnx2x *bp,
4092                               struct host_def_status_block *def_sb,
4093                               dma_addr_t mapping, int sb_id)
4094 {
4095         int port = BP_PORT(bp);
4096         int func = BP_FUNC(bp);
4097         int index, val, reg_offset;
4098         u64 section;
4099
4100         /* ATTN */
4101         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4102                                             atten_status_block);
4103         def_sb->atten_status_block.status_block_id = sb_id;
4104
4105         bp->attn_state = 0;
4106
4107         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4108                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4109
4110         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4111                 bp->attn_group[index].sig[0] = REG_RD(bp,
4112                                                      reg_offset + 0x10*index);
4113                 bp->attn_group[index].sig[1] = REG_RD(bp,
4114                                                reg_offset + 0x4 + 0x10*index);
4115                 bp->attn_group[index].sig[2] = REG_RD(bp,
4116                                                reg_offset + 0x8 + 0x10*index);
4117                 bp->attn_group[index].sig[3] = REG_RD(bp,
4118                                                reg_offset + 0xc + 0x10*index);
4119         }
4120
4121         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4122                              HC_REG_ATTN_MSG0_ADDR_L);
4123
4124         REG_WR(bp, reg_offset, U64_LO(section));
4125         REG_WR(bp, reg_offset + 4, U64_HI(section));
4126
4127         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4128
4129         val = REG_RD(bp, reg_offset);
4130         val |= sb_id;
4131         REG_WR(bp, reg_offset, val);
4132
4133         /* USTORM */
4134         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135                                             u_def_status_block);
4136         def_sb->u_def_status_block.status_block_id = sb_id;
4137
4138         REG_WR(bp, BAR_USTRORM_INTMEM +
4139                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4140         REG_WR(bp, BAR_USTRORM_INTMEM +
4141                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4142                U64_HI(section));
4143         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4144                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4145
4146         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4147                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4148                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4149
4150         /* CSTORM */
4151         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152                                             c_def_status_block);
4153         def_sb->c_def_status_block.status_block_id = sb_id;
4154
4155         REG_WR(bp, BAR_CSTRORM_INTMEM +
4156                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4157         REG_WR(bp, BAR_CSTRORM_INTMEM +
4158                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4159                U64_HI(section));
4160         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4161                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4162
4163         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4164                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4165                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4166
4167         /* TSTORM */
4168         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4169                                             t_def_status_block);
4170         def_sb->t_def_status_block.status_block_id = sb_id;
4171
4172         REG_WR(bp, BAR_TSTRORM_INTMEM +
4173                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4174         REG_WR(bp, BAR_TSTRORM_INTMEM +
4175                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4176                U64_HI(section));
4177         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4178                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4179
4180         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4181                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4182                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4183
4184         /* XSTORM */
4185         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4186                                             x_def_status_block);
4187         def_sb->x_def_status_block.status_block_id = sb_id;
4188
4189         REG_WR(bp, BAR_XSTRORM_INTMEM +
4190                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4191         REG_WR(bp, BAR_XSTRORM_INTMEM +
4192                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4193                U64_HI(section));
4194         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4195                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4196
4197         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4198                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4199                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4200
4201         bp->stats_pending = 0;
4202         bp->set_mac_pending = 0;
4203
4204         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4205 }
4206
4207 static void bnx2x_update_coalesce(struct bnx2x *bp)
4208 {
4209         int port = BP_PORT(bp);
4210         int i;
4211
4212         for_each_queue(bp, i) {
4213                 int sb_id = bp->fp[i].sb_id;
4214
4215                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4216                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4217                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4218                                                     U_SB_ETH_RX_CQ_INDEX),
4219                         bp->rx_ticks/12);
4220                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4221                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4222                                                      U_SB_ETH_RX_CQ_INDEX),
4223                          bp->rx_ticks ? 0 : 1);
4224                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4225                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226                                                      U_SB_ETH_RX_BD_INDEX),
4227                          bp->rx_ticks ? 0 : 1);
4228
4229                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4230                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4231                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4232                                                     C_SB_ETH_TX_CQ_INDEX),
4233                         bp->tx_ticks/12);
4234                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4235                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4236                                                      C_SB_ETH_TX_CQ_INDEX),
4237                          bp->tx_ticks ? 0 : 1);
4238         }
4239 }
4240
4241 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4242                                        struct bnx2x_fastpath *fp, int last)
4243 {
4244         int i;
4245
4246         for (i = 0; i < last; i++) {
4247                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4248                 struct sk_buff *skb = rx_buf->skb;
4249
4250                 if (skb == NULL) {
4251                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4252                         continue;
4253                 }
4254
4255                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4256                         pci_unmap_single(bp->pdev,
4257                                          pci_unmap_addr(rx_buf, mapping),
4258                                          bp->rx_buf_size,
4259                                          PCI_DMA_FROMDEVICE);
4260
4261                 dev_kfree_skb(skb);
4262                 rx_buf->skb = NULL;
4263         }
4264 }
4265
4266 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4267 {
4268         int func = BP_FUNC(bp);
4269         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4270                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4271         u16 ring_prod, cqe_ring_prod;
4272         int i, j;
4273
4274         bp->rx_buf_size = bp->dev->mtu;
4275         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4276                 BCM_RX_ETH_PAYLOAD_ALIGN;
4277
4278         if (bp->flags & TPA_ENABLE_FLAG) {
4279                 DP(NETIF_MSG_IFUP,
4280                    "rx_buf_size %d  effective_mtu %d\n",
4281                    bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4282
4283                 for_each_queue(bp, j) {
4284                         struct bnx2x_fastpath *fp = &bp->fp[j];
4285
4286                         for (i = 0; i < max_agg_queues; i++) {
4287                                 fp->tpa_pool[i].skb =
4288                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4289                                 if (!fp->tpa_pool[i].skb) {
4290                                         BNX2X_ERR("Failed to allocate TPA "
4291                                                   "skb pool for queue[%d] - "
4292                                                   "disabling TPA on this "
4293                                                   "queue!\n", j);
4294                                         bnx2x_free_tpa_pool(bp, fp, i);
4295                                         fp->disable_tpa = 1;
4296                                         break;
4297                                 }
4298                                 pci_unmap_addr_set((struct sw_rx_bd *)
4299                                                         &bp->fp->tpa_pool[i],
4300                                                    mapping, 0);
4301                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4302                         }
4303                 }
4304         }
4305
4306         for_each_queue(bp, j) {
4307                 struct bnx2x_fastpath *fp = &bp->fp[j];
4308
4309                 fp->rx_bd_cons = 0;
4310                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4311                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4312
4313                 /* "next page" elements initialization */
4314                 /* SGE ring */
4315                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4316                         struct eth_rx_sge *sge;
4317
4318                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4319                         sge->addr_hi =
4320                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4321                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4322                         sge->addr_lo =
4323                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4324                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4325                 }
4326
4327                 bnx2x_init_sge_ring_bit_mask(fp);
4328
4329                 /* RX BD ring */
4330                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4331                         struct eth_rx_bd *rx_bd;
4332
4333                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4334                         rx_bd->addr_hi =
4335                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4336                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4337                         rx_bd->addr_lo =
4338                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4339                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4340                 }
4341
4342                 /* CQ ring */
4343                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4344                         struct eth_rx_cqe_next_page *nextpg;
4345
4346                         nextpg = (struct eth_rx_cqe_next_page *)
4347                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4348                         nextpg->addr_hi =
4349                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4350                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4351                         nextpg->addr_lo =
4352                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4353                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4354                 }
4355
4356                 /* Allocate SGEs and initialize the ring elements */
4357                 for (i = 0, ring_prod = 0;
4358                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4359
4360                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4361                                 BNX2X_ERR("was only able to allocate "
4362                                           "%d rx sges\n", i);
4363                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4364                                 /* Cleanup already allocated elements */
4365                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4366                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4367                                 fp->disable_tpa = 1;
4368                                 ring_prod = 0;
4369                                 break;
4370                         }
4371                         ring_prod = NEXT_SGE_IDX(ring_prod);
4372                 }
4373                 fp->rx_sge_prod = ring_prod;
4374
4375                 /* Allocate BDs and initialize BD ring */
4376                 fp->rx_comp_cons = 0;
4377                 cqe_ring_prod = ring_prod = 0;
4378                 for (i = 0; i < bp->rx_ring_size; i++) {
4379                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4380                                 BNX2X_ERR("was only able to allocate "
4381                                           "%d rx skbs\n", i);
4382                                 bp->eth_stats.rx_skb_alloc_failed++;
4383                                 break;
4384                         }
4385                         ring_prod = NEXT_RX_IDX(ring_prod);
4386                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4387                         WARN_ON(ring_prod <= i);
4388                 }
4389
4390                 fp->rx_bd_prod = ring_prod;
4391                 /* must not have more available CQEs than BDs */
4392                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4393                                        cqe_ring_prod);
4394                 fp->rx_pkt = fp->rx_calls = 0;
4395
4396                 /* Warning!
4397                  * this will generate an interrupt (to the TSTORM)
4398                  * must only be done after chip is initialized
4399                  */
4400                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4401                                      fp->rx_sge_prod);
4402                 if (j != 0)
4403                         continue;
4404
4405                 REG_WR(bp, BAR_USTRORM_INTMEM +
4406                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4407                        U64_LO(fp->rx_comp_mapping));
4408                 REG_WR(bp, BAR_USTRORM_INTMEM +
4409                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4410                        U64_HI(fp->rx_comp_mapping));
4411         }
4412 }
4413
4414 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4415 {
4416         int i, j;
4417
4418         for_each_queue(bp, j) {
4419                 struct bnx2x_fastpath *fp = &bp->fp[j];
4420
4421                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4422                         struct eth_tx_bd *tx_bd =
4423                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4424
4425                         tx_bd->addr_hi =
4426                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4427                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4428                         tx_bd->addr_lo =
4429                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4430                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4431                 }
4432
4433                 fp->tx_pkt_prod = 0;
4434                 fp->tx_pkt_cons = 0;
4435                 fp->tx_bd_prod = 0;
4436                 fp->tx_bd_cons = 0;
4437                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4438                 fp->tx_pkt = 0;
4439         }
4440 }
4441
4442 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4443 {
4444         int func = BP_FUNC(bp);
4445
4446         spin_lock_init(&bp->spq_lock);
4447
4448         bp->spq_left = MAX_SPQ_PENDING;
4449         bp->spq_prod_idx = 0;
4450         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4451         bp->spq_prod_bd = bp->spq;
4452         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4453
4454         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4455                U64_LO(bp->spq_mapping));
4456         REG_WR(bp,
4457                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4458                U64_HI(bp->spq_mapping));
4459
4460         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4461                bp->spq_prod_idx);
4462 }
4463
4464 static void bnx2x_init_context(struct bnx2x *bp)
4465 {
4466         int i;
4467
4468         for_each_queue(bp, i) {
4469                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4470                 struct bnx2x_fastpath *fp = &bp->fp[i];
4471                 u8 sb_id = FP_SB_ID(fp);
4472
4473                 context->xstorm_st_context.tx_bd_page_base_hi =
4474                                                 U64_HI(fp->tx_desc_mapping);
4475                 context->xstorm_st_context.tx_bd_page_base_lo =
4476                                                 U64_LO(fp->tx_desc_mapping);
4477                 context->xstorm_st_context.db_data_addr_hi =
4478                                                 U64_HI(fp->tx_prods_mapping);
4479                 context->xstorm_st_context.db_data_addr_lo =
4480                                                 U64_LO(fp->tx_prods_mapping);
4481                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4482                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4483
4484                 context->ustorm_st_context.common.sb_index_numbers =
4485                                                 BNX2X_RX_SB_INDEX_NUM;
4486                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4487                 context->ustorm_st_context.common.status_block_id = sb_id;
4488                 context->ustorm_st_context.common.flags =
4489                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4490                 context->ustorm_st_context.common.mc_alignment_size =
4491                         BCM_RX_ETH_PAYLOAD_ALIGN;
4492                 context->ustorm_st_context.common.bd_buff_size =
4493                                                 bp->rx_buf_size;
4494                 context->ustorm_st_context.common.bd_page_base_hi =
4495                                                 U64_HI(fp->rx_desc_mapping);
4496                 context->ustorm_st_context.common.bd_page_base_lo =
4497                                                 U64_LO(fp->rx_desc_mapping);
4498                 if (!fp->disable_tpa) {
4499                         context->ustorm_st_context.common.flags |=
4500                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4501                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4502                         context->ustorm_st_context.common.sge_buff_size =
4503                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4504                         context->ustorm_st_context.common.sge_page_base_hi =
4505                                                 U64_HI(fp->rx_sge_mapping);
4506                         context->ustorm_st_context.common.sge_page_base_lo =
4507                                                 U64_LO(fp->rx_sge_mapping);
4508                 }
4509
4510                 context->cstorm_st_context.sb_index_number =
4511                                                 C_SB_ETH_TX_CQ_INDEX;
4512                 context->cstorm_st_context.status_block_id = sb_id;
4513
4514                 context->xstorm_ag_context.cdu_reserved =
4515                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4516                                                CDU_REGION_NUMBER_XCM_AG,
4517                                                ETH_CONNECTION_TYPE);
4518                 context->ustorm_ag_context.cdu_usage =
4519                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4520                                                CDU_REGION_NUMBER_UCM_AG,
4521                                                ETH_CONNECTION_TYPE);
4522         }
4523 }
4524
4525 static void bnx2x_init_ind_table(struct bnx2x *bp)
4526 {
4527         int port = BP_PORT(bp);
4528         int i;
4529
4530         if (!is_multi(bp))
4531                 return;
4532
4533         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4534         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4535                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4536                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4537                         i % bp->num_queues);
4538
4539         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4540 }
4541
4542 static void bnx2x_set_client_config(struct bnx2x *bp)
4543 {
4544         struct tstorm_eth_client_config tstorm_client = {0};
4545         int port = BP_PORT(bp);
4546         int i;
4547
4548         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4549         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4550         tstorm_client.config_flags =
4551                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4552 #ifdef BCM_VLAN
4553         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4554                 tstorm_client.config_flags |=
4555                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4556                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4557         }
4558 #endif
4559
4560         if (bp->flags & TPA_ENABLE_FLAG) {
4561                 tstorm_client.max_sges_for_packet =
4562                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4563                 tstorm_client.max_sges_for_packet =
4564                         ((tstorm_client.max_sges_for_packet +
4565                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4566                         PAGES_PER_SGE_SHIFT;
4567
4568                 tstorm_client.config_flags |=
4569                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4570         }
4571
4572         for_each_queue(bp, i) {
4573                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4574                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4575                        ((u32 *)&tstorm_client)[0]);
4576                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4577                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4578                        ((u32 *)&tstorm_client)[1]);
4579         }
4580
4581         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4582            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4583 }
4584
4585 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4586 {
4587         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4588         int mode = bp->rx_mode;
4589         int mask = (1 << BP_L_ID(bp));
4590         int func = BP_FUNC(bp);
4591         int i;
4592
4593         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4594
4595         switch (mode) {
4596         case BNX2X_RX_MODE_NONE: /* no Rx */
4597                 tstorm_mac_filter.ucast_drop_all = mask;
4598                 tstorm_mac_filter.mcast_drop_all = mask;
4599                 tstorm_mac_filter.bcast_drop_all = mask;
4600                 break;
4601         case BNX2X_RX_MODE_NORMAL:
4602                 tstorm_mac_filter.bcast_accept_all = mask;
4603                 break;
4604         case BNX2X_RX_MODE_ALLMULTI:
4605                 tstorm_mac_filter.mcast_accept_all = mask;
4606                 tstorm_mac_filter.bcast_accept_all = mask;
4607                 break;
4608         case BNX2X_RX_MODE_PROMISC:
4609                 tstorm_mac_filter.ucast_accept_all = mask;
4610                 tstorm_mac_filter.mcast_accept_all = mask;
4611                 tstorm_mac_filter.bcast_accept_all = mask;
4612                 break;
4613         default:
4614                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4615                 break;
4616         }
4617
4618         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4619                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4620                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4621                        ((u32 *)&tstorm_mac_filter)[i]);
4622
4623 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4624                    ((u32 *)&tstorm_mac_filter)[i]); */
4625         }
4626
4627         if (mode != BNX2X_RX_MODE_NONE)
4628                 bnx2x_set_client_config(bp);
4629 }
4630
4631 static void bnx2x_init_internal_common(struct bnx2x *bp)
4632 {
4633         int i;
4634
4635         if (bp->flags & TPA_ENABLE_FLAG) {
4636                 struct tstorm_eth_tpa_exist tpa = {0};
4637
4638                 tpa.tpa_exist = 1;
4639
4640                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4641                        ((u32 *)&tpa)[0]);
4642                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4643                        ((u32 *)&tpa)[1]);
4644         }
4645
4646         /* Zero this manually as its initialization is
4647            currently missing in the initTool */
4648         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4649                 REG_WR(bp, BAR_USTRORM_INTMEM +
4650                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4651 }
4652
4653 static void bnx2x_init_internal_port(struct bnx2x *bp)
4654 {
4655         int port = BP_PORT(bp);
4656
4657         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4658         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4659         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4660         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4661 }
4662
4663 static void bnx2x_init_internal_func(struct bnx2x *bp)
4664 {
4665         struct tstorm_eth_function_common_config tstorm_config = {0};
4666         struct stats_indication_flags stats_flags = {0};
4667         int port = BP_PORT(bp);
4668         int func = BP_FUNC(bp);
4669         int i;
4670         u16 max_agg_size;
4671
4672         if (is_multi(bp)) {
4673                 tstorm_config.config_flags = MULTI_FLAGS;
4674                 tstorm_config.rss_result_mask = MULTI_MASK;
4675         }
4676
4677         tstorm_config.leading_client_id = BP_L_ID(bp);
4678
4679         REG_WR(bp, BAR_TSTRORM_INTMEM +
4680                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4681                (*(u32 *)&tstorm_config));
4682
4683         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4684         bnx2x_set_storm_rx_mode(bp);
4685
4686         /* reset xstorm per client statistics */
4687         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4688                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4689                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4690                        i*4, 0);
4691         }
4692         /* reset tstorm per client statistics */
4693         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4694                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4695                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4696                        i*4, 0);
4697         }
4698
4699         /* Init statistics related context */
4700         stats_flags.collect_eth = 1;
4701
4702         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4703                ((u32 *)&stats_flags)[0]);
4704         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4705                ((u32 *)&stats_flags)[1]);
4706
4707         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4708                ((u32 *)&stats_flags)[0]);
4709         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4710                ((u32 *)&stats_flags)[1]);
4711
4712         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4713                ((u32 *)&stats_flags)[0]);
4714         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4715                ((u32 *)&stats_flags)[1]);
4716
4717         REG_WR(bp, BAR_XSTRORM_INTMEM +
4718                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4719                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4720         REG_WR(bp, BAR_XSTRORM_INTMEM +
4721                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4722                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4723
4724         REG_WR(bp, BAR_TSTRORM_INTMEM +
4725                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4726                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4727         REG_WR(bp, BAR_TSTRORM_INTMEM +
4728                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4729                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4730
4731         if (CHIP_IS_E1H(bp)) {
4732                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4733                         IS_E1HMF(bp));
4734                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4735                         IS_E1HMF(bp));
4736                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4737                         IS_E1HMF(bp));
4738                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4739                         IS_E1HMF(bp));
4740
4741                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4742                          bp->e1hov);
4743         }
4744
4745         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4746         max_agg_size =
4747                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4748                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4749                     (u32)0xffff);
4750         for_each_queue(bp, i) {
4751                 struct bnx2x_fastpath *fp = &bp->fp[i];
4752
4753                 REG_WR(bp, BAR_USTRORM_INTMEM +
4754                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4755                        U64_LO(fp->rx_comp_mapping));
4756                 REG_WR(bp, BAR_USTRORM_INTMEM +
4757                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4758                        U64_HI(fp->rx_comp_mapping));
4759
4760                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4761                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4762                          max_agg_size);
4763         }
4764 }
4765
4766 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4767 {
4768         switch (load_code) {
4769         case FW_MSG_CODE_DRV_LOAD_COMMON:
4770                 bnx2x_init_internal_common(bp);
4771                 /* no break */
4772
4773         case FW_MSG_CODE_DRV_LOAD_PORT:
4774                 bnx2x_init_internal_port(bp);
4775                 /* no break */
4776
4777         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4778                 bnx2x_init_internal_func(bp);
4779                 break;
4780
4781         default:
4782                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4783                 break;
4784         }
4785 }
4786
4787 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4788 {
4789         int i;
4790
4791         for_each_queue(bp, i) {
4792                 struct bnx2x_fastpath *fp = &bp->fp[i];
4793
4794                 fp->bp = bp;
4795                 fp->state = BNX2X_FP_STATE_CLOSED;
4796                 fp->index = i;
4797                 fp->cl_id = BP_L_ID(bp) + i;
4798                 fp->sb_id = fp->cl_id;
4799                 DP(NETIF_MSG_IFUP,
4800                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4801                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4802                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4803                               FP_SB_ID(fp));
4804                 bnx2x_update_fpsb_idx(fp);
4805         }
4806
4807         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4808                           DEF_SB_ID);
4809         bnx2x_update_dsb_idx(bp);
4810         bnx2x_update_coalesce(bp);
4811         bnx2x_init_rx_rings(bp);
4812         bnx2x_init_tx_ring(bp);
4813         bnx2x_init_sp_ring(bp);
4814         bnx2x_init_context(bp);
4815         bnx2x_init_internal(bp, load_code);
4816         bnx2x_init_ind_table(bp);
4817         bnx2x_int_enable(bp);
4818 }
4819
4820 /* end of nic init */
4821
4822 /*
4823  * gzip service functions
4824  */
4825
4826 static int bnx2x_gunzip_init(struct bnx2x *bp)
4827 {
4828         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4829                                               &bp->gunzip_mapping);
4830         if (bp->gunzip_buf  == NULL)
4831                 goto gunzip_nomem1;
4832
4833         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4834         if (bp->strm  == NULL)
4835                 goto gunzip_nomem2;
4836
4837         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4838                                       GFP_KERNEL);
4839         if (bp->strm->workspace == NULL)
4840                 goto gunzip_nomem3;
4841
4842         return 0;
4843
4844 gunzip_nomem3:
4845         kfree(bp->strm);
4846         bp->strm = NULL;
4847
4848 gunzip_nomem2:
4849         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4850                             bp->gunzip_mapping);
4851         bp->gunzip_buf = NULL;
4852
4853 gunzip_nomem1:
4854         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4855                " un-compression\n", bp->dev->name);
4856         return -ENOMEM;
4857 }
4858
4859 static void bnx2x_gunzip_end(struct bnx2x *bp)
4860 {
4861         kfree(bp->strm->workspace);
4862
4863         kfree(bp->strm);
4864         bp->strm = NULL;
4865
4866         if (bp->gunzip_buf) {
4867                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4868                                     bp->gunzip_mapping);
4869                 bp->gunzip_buf = NULL;
4870         }
4871 }
4872
4873 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4874 {
4875         int n, rc;
4876
4877         /* check gzip header */
4878         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4879                 return -EINVAL;
4880
4881         n = 10;
4882
4883 #define FNAME                           0x8
4884
4885         if (zbuf[3] & FNAME)
4886                 while ((zbuf[n++] != 0) && (n < len));
4887
4888         bp->strm->next_in = zbuf + n;
4889         bp->strm->avail_in = len - n;
4890         bp->strm->next_out = bp->gunzip_buf;
4891         bp->strm->avail_out = FW_BUF_SIZE;
4892
4893         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4894         if (rc != Z_OK)
4895                 return rc;
4896
4897         rc = zlib_inflate(bp->strm, Z_FINISH);
4898         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4899                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4900                        bp->dev->name, bp->strm->msg);
4901
4902         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4903         if (bp->gunzip_outlen & 0x3)
4904                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4905                                     " gunzip_outlen (%d) not aligned\n",
4906                        bp->dev->name, bp->gunzip_outlen);
4907         bp->gunzip_outlen >>= 2;
4908
4909         zlib_inflateEnd(bp->strm);
4910
4911         if (rc == Z_STREAM_END)
4912                 return 0;
4913
4914         return rc;
4915 }
4916
4917 /* nic load/unload */
4918
4919 /*
4920  * General service functions
4921  */
4922
4923 /* send a NIG loopback debug packet */
4924 static void bnx2x_lb_pckt(struct bnx2x *bp)
4925 {
4926         u32 wb_write[3];
4927
4928         /* Ethernet source and destination addresses */
4929         wb_write[0] = 0x55555555;
4930         wb_write[1] = 0x55555555;
4931         wb_write[2] = 0x20;             /* SOP */
4932         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4933
4934         /* NON-IP protocol */
4935         wb_write[0] = 0x09000000;
4936         wb_write[1] = 0x55555555;
4937         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4938         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4939 }
4940
4941 /* some of the internal memories
4942  * are not directly readable from the driver
4943  * to test them we send debug packets
4944  */
4945 static int bnx2x_int_mem_test(struct bnx2x *bp)
4946 {
4947         int factor;
4948         int count, i;
4949         u32 val = 0;
4950
4951         if (CHIP_REV_IS_FPGA(bp))
4952                 factor = 120;
4953         else if (CHIP_REV_IS_EMUL(bp))
4954                 factor = 200;
4955         else
4956                 factor = 1;
4957
4958         DP(NETIF_MSG_HW, "start part1\n");
4959
4960         /* Disable inputs of parser neighbor blocks */
4961         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4962         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4963         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4964         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4965
4966         /*  Write 0 to parser credits for CFC search request */
4967         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4968
4969         /* send Ethernet packet */
4970         bnx2x_lb_pckt(bp);
4971
4972         /* TODO do i reset NIG statistic? */
4973         /* Wait until NIG register shows 1 packet of size 0x10 */
4974         count = 1000 * factor;
4975         while (count) {
4976
4977                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4978                 val = *bnx2x_sp(bp, wb_data[0]);
4979                 if (val == 0x10)
4980                         break;
4981
4982                 msleep(10);
4983                 count--;
4984         }
4985         if (val != 0x10) {
4986                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4987                 return -1;
4988         }
4989
4990         /* Wait until PRS register shows 1 packet */
4991         count = 1000 * factor;
4992         while (count) {
4993                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4994                 if (val == 1)
4995                         break;
4996
4997                 msleep(10);
4998                 count--;
4999         }
5000         if (val != 0x1) {
5001                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5002                 return -2;
5003         }
5004
5005         /* Reset and init BRB, PRS */
5006         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5007         msleep(50);
5008         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5009         msleep(50);
5010         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5011         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5012
5013         DP(NETIF_MSG_HW, "part2\n");
5014
5015         /* Disable inputs of parser neighbor blocks */
5016         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5017         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5018         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5019         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5020
5021         /* Write 0 to parser credits for CFC search request */
5022         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5023
5024         /* send 10 Ethernet packets */
5025         for (i = 0; i < 10; i++)
5026                 bnx2x_lb_pckt(bp);
5027
5028         /* Wait until NIG register shows 10 + 1
5029            packets of size 11*0x10 = 0xb0 */
5030         count = 1000 * factor;
5031         while (count) {
5032
5033                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5034                 val = *bnx2x_sp(bp, wb_data[0]);
5035                 if (val == 0xb0)
5036                         break;
5037
5038                 msleep(10);
5039                 count--;
5040         }
5041         if (val != 0xb0) {
5042                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5043                 return -3;
5044         }
5045
5046         /* Wait until PRS register shows 2 packets */
5047         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5048         if (val != 2)
5049                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5050
5051         /* Write 1 to parser credits for CFC search request */
5052         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5053
5054         /* Wait until PRS register shows 3 packets */
5055         msleep(10 * factor);
5056         /* Wait until NIG register shows 1 packet of size 0x10 */
5057         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5058         if (val != 3)
5059                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5060
5061         /* clear NIG EOP FIFO */
5062         for (i = 0; i < 11; i++)
5063                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5064         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5065         if (val != 1) {
5066                 BNX2X_ERR("clear of NIG failed\n");
5067                 return -4;
5068         }
5069
5070         /* Reset and init BRB, PRS, NIG */
5071         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5072         msleep(50);
5073         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5074         msleep(50);
5075         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5076         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5077 #ifndef BCM_ISCSI
5078         /* set NIC mode */
5079         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5080 #endif
5081
5082         /* Enable inputs of parser neighbor blocks */
5083         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5084         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5085         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5086         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5087
5088         DP(NETIF_MSG_HW, "done\n");
5089
5090         return 0; /* OK */
5091 }
5092
5093 static void enable_blocks_attention(struct bnx2x *bp)
5094 {
5095         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5096         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5097         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5098         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5099         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5100         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5101         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5102         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5103         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5104 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5105 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5106         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5107         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5108         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5109 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5110 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5111         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5112         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5113         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5114         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5115 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5116 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5117         if (CHIP_REV_IS_FPGA(bp))
5118                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5119         else
5120                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5121         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5122         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5123         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5124 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5125 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5126         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5127         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5128 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5129         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5130 }
5131
5132
5133 static int bnx2x_init_common(struct bnx2x *bp)
5134 {
5135         u32 val, i;
5136
5137         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5138
5139         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5140         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5141
5142         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5143         if (CHIP_IS_E1H(bp))
5144                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5145
5146         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5147         msleep(30);
5148         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5149
5150         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5151         if (CHIP_IS_E1(bp)) {
5152                 /* enable HW interrupt from PXP on USDM overflow
5153                    bit 16 on INT_MASK_0 */
5154                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5155         }
5156
5157         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5158         bnx2x_init_pxp(bp);
5159
5160 #ifdef __BIG_ENDIAN
5161         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5162         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5163         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5164         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5165         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5166
5167 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5168         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5169         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5170         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5171         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5172 #endif
5173
5174         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5175 #ifdef BCM_ISCSI
5176         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5177         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5178         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5179 #endif
5180
5181         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5182                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5183
5184         /* let the HW do it's magic ... */
5185         msleep(100);
5186         /* finish PXP init */
5187         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5188         if (val != 1) {
5189                 BNX2X_ERR("PXP2 CFG failed\n");
5190                 return -EBUSY;
5191         }
5192         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5193         if (val != 1) {
5194                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5195                 return -EBUSY;
5196         }
5197
5198         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5199         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5200
5201         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5202
5203         /* clean the DMAE memory */
5204         bp->dmae_ready = 1;
5205         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5206
5207         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5208         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5209         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5210         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5211
5212         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5213         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5214         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5215         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5216
5217         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5218         /* soft reset pulse */
5219         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5220         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5221
5222 #ifdef BCM_ISCSI
5223         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5224 #endif
5225
5226         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5227         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5228         if (!CHIP_REV_IS_SLOW(bp)) {
5229                 /* enable hw interrupt from doorbell Q */
5230                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5231         }
5232
5233         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5234         if (CHIP_REV_IS_SLOW(bp)) {
5235                 /* fix for emulation and FPGA for no pause */
5236                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5237                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5238                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5239                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5240         }
5241
5242         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5243         /* set NIC mode */
5244         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5245         if (CHIP_IS_E1H(bp))
5246                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5247
5248         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5249         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5250         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5251         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5252
5253         if (CHIP_IS_E1H(bp)) {
5254                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5255                                 STORM_INTMEM_SIZE_E1H/2);
5256                 bnx2x_init_fill(bp,
5257                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5258                                 0, STORM_INTMEM_SIZE_E1H/2);
5259                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5260                                 STORM_INTMEM_SIZE_E1H/2);
5261                 bnx2x_init_fill(bp,
5262                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5263                                 0, STORM_INTMEM_SIZE_E1H/2);
5264                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5265                                 STORM_INTMEM_SIZE_E1H/2);
5266                 bnx2x_init_fill(bp,
5267                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5268                                 0, STORM_INTMEM_SIZE_E1H/2);
5269                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5270                                 STORM_INTMEM_SIZE_E1H/2);
5271                 bnx2x_init_fill(bp,
5272                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5273                                 0, STORM_INTMEM_SIZE_E1H/2);
5274         } else { /* E1 */
5275                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5276                                 STORM_INTMEM_SIZE_E1);
5277                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5278                                 STORM_INTMEM_SIZE_E1);
5279                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5280                                 STORM_INTMEM_SIZE_E1);
5281                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5282                                 STORM_INTMEM_SIZE_E1);
5283         }
5284
5285         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5286         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5287         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5288         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5289
5290         /* sync semi rtc */
5291         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5292                0x80000000);
5293         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5294                0x80000000);
5295
5296         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5297         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5298         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5299
5300         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5301         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5302                 REG_WR(bp, i, 0xc0cac01a);
5303                 /* TODO: replace with something meaningful */
5304         }
5305         if (CHIP_IS_E1H(bp))
5306                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5307         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5308
5309         if (sizeof(union cdu_context) != 1024)
5310                 /* we currently assume that a context is 1024 bytes */
5311                 printk(KERN_ALERT PFX "please adjust the size of"
5312                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5313
5314         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5315         val = (4 << 24) + (0 << 12) + 1024;
5316         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5317         if (CHIP_IS_E1(bp)) {
5318                 /* !!! fix pxp client crdit until excel update */
5319                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5320                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5321         }
5322
5323         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5324         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5325
5326         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5327         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5328
5329         /* PXPCS COMMON comes here */
5330         /* Reset PCIE errors for debug */
5331         REG_WR(bp, 0x2814, 0xffffffff);
5332         REG_WR(bp, 0x3820, 0xffffffff);
5333
5334         /* EMAC0 COMMON comes here */
5335         /* EMAC1 COMMON comes here */
5336         /* DBU COMMON comes here */
5337         /* DBG COMMON comes here */
5338
5339         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5340         if (CHIP_IS_E1H(bp)) {
5341                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5342                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5343         }
5344
5345         if (CHIP_REV_IS_SLOW(bp))
5346                 msleep(200);
5347
5348         /* finish CFC init */
5349         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5350         if (val != 1) {
5351                 BNX2X_ERR("CFC LL_INIT failed\n");
5352                 return -EBUSY;
5353         }
5354         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5355         if (val != 1) {
5356                 BNX2X_ERR("CFC AC_INIT failed\n");
5357                 return -EBUSY;
5358         }
5359         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5360         if (val != 1) {
5361                 BNX2X_ERR("CFC CAM_INIT failed\n");
5362                 return -EBUSY;
5363         }
5364         REG_WR(bp, CFC_REG_DEBUG0, 0);
5365
5366         /* read NIG statistic
5367            to see if this is our first up since powerup */
5368         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5369         val = *bnx2x_sp(bp, wb_data[0]);
5370
5371         /* do internal memory self test */
5372         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5373                 BNX2X_ERR("internal mem self test failed\n");
5374                 return -EBUSY;
5375         }
5376
5377         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5378         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5379         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5380                 /* Fan failure is indicated by SPIO 5 */
5381                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5382                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5383
5384                 /* set to active low mode */
5385                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5386                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5387                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5388                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5389
5390                 /* enable interrupt to signal the IGU */
5391                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5392                 val |= (1 << MISC_REGISTERS_SPIO_5);
5393                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5394                 break;
5395
5396         default:
5397                 break;
5398         }
5399
5400         /* clear PXP2 attentions */
5401         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5402
5403         enable_blocks_attention(bp);
5404
5405         if (!BP_NOMCP(bp)) {
5406                 bnx2x_acquire_phy_lock(bp);
5407                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5408                 bnx2x_release_phy_lock(bp);
5409         } else
5410                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5411
5412         return 0;
5413 }
5414
5415 static int bnx2x_init_port(struct bnx2x *bp)
5416 {
5417         int port = BP_PORT(bp);
5418         u32 val;
5419
5420         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5421
5422         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5423
5424         /* Port PXP comes here */
5425         /* Port PXP2 comes here */
5426 #ifdef BCM_ISCSI
5427         /* Port0  1
5428          * Port1  385 */
5429         i++;
5430         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5431         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5432         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5433         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5434
5435         /* Port0  2
5436          * Port1  386 */
5437         i++;
5438         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5439         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5440         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5441         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5442
5443         /* Port0  3
5444          * Port1  387 */
5445         i++;
5446         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5447         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5448         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5449         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5450 #endif
5451         /* Port CMs come here */
5452
5453         /* Port QM comes here */
5454 #ifdef BCM_ISCSI
5455         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5456         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5457
5458         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5459                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5460 #endif
5461         /* Port DQ comes here */
5462         /* Port BRB1 comes here */
5463         /* Port PRS comes here */
5464         /* Port TSDM comes here */
5465         /* Port CSDM comes here */
5466         /* Port USDM comes here */
5467         /* Port XSDM comes here */
5468         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5469                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5470         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5471                              port ? USEM_PORT1_END : USEM_PORT0_END);
5472         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5473                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5474         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5475                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5476         /* Port UPB comes here */
5477         /* Port XPB comes here */
5478
5479         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5480                              port ? PBF_PORT1_END : PBF_PORT0_END);
5481
5482         /* configure PBF to work without PAUSE mtu 9000 */
5483         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5484
5485         /* update threshold */
5486         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5487         /* update init credit */
5488         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5489
5490         /* probe changes */
5491         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5492         msleep(5);
5493         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5494
5495 #ifdef BCM_ISCSI
5496         /* tell the searcher where the T2 table is */
5497         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5498
5499         wb_write[0] = U64_LO(bp->t2_mapping);
5500         wb_write[1] = U64_HI(bp->t2_mapping);
5501         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5502         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5503         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5504         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5505
5506         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5507         /* Port SRCH comes here */
5508 #endif
5509         /* Port CDU comes here */
5510         /* Port CFC comes here */
5511
5512         if (CHIP_IS_E1(bp)) {
5513                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5514                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5515         }
5516         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5517                              port ? HC_PORT1_END : HC_PORT0_END);
5518
5519         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5520                                     MISC_AEU_PORT0_START,
5521                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5522         /* init aeu_mask_attn_func_0/1:
5523          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5524          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5525          *             bits 4-7 are used for "per vn group attention" */
5526         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5527                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5528
5529         /* Port PXPCS comes here */
5530         /* Port EMAC0 comes here */
5531         /* Port EMAC1 comes here */
5532         /* Port DBU comes here */
5533         /* Port DBG comes here */
5534         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5535                              port ? NIG_PORT1_END : NIG_PORT0_END);
5536
5537         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5538
5539         if (CHIP_IS_E1H(bp)) {
5540                 u32 wsum;
5541                 struct cmng_struct_per_port m_cmng_port;
5542                 int vn;
5543
5544                 /* 0x2 disable e1hov, 0x1 enable */
5545                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5546                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5547
5548                 /* Init RATE SHAPING and FAIRNESS contexts.
5549                    Initialize as if there is 10G link. */
5550                 wsum = bnx2x_calc_vn_wsum(bp);
5551                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5552                 if (IS_E1HMF(bp))
5553                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5554                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5555                                         wsum, 10000, &m_cmng_port);
5556         }
5557
5558         /* Port MCP comes here */
5559         /* Port DMAE comes here */
5560
5561         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5562         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5563         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5564                 /* add SPIO 5 to group 0 */
5565                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5566                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5567                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5568                 break;
5569
5570         default:
5571                 break;
5572         }
5573
5574         bnx2x__link_reset(bp);
5575
5576         return 0;
5577 }
5578
5579 #define ILT_PER_FUNC            (768/2)
5580 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5581 /* the phys address is shifted right 12 bits and has an added
5582    1=valid bit added to the 53rd bit
5583    then since this is a wide register(TM)
5584    we split it into two 32 bit writes
5585  */
5586 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5587 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5588 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5589 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5590
5591 #define CNIC_ILT_LINES          0
5592
5593 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5594 {
5595         int reg;
5596
5597         if (CHIP_IS_E1H(bp))
5598                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5599         else /* E1 */
5600                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5601
5602         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5603 }
5604
5605 static int bnx2x_init_func(struct bnx2x *bp)
5606 {
5607         int port = BP_PORT(bp);
5608         int func = BP_FUNC(bp);
5609         int i;
5610
5611         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5612
5613         i = FUNC_ILT_BASE(func);
5614
5615         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5616         if (CHIP_IS_E1H(bp)) {
5617                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5618                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5619         } else /* E1 */
5620                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5621                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5622
5623
5624         if (CHIP_IS_E1H(bp)) {
5625                 for (i = 0; i < 9; i++)
5626                         bnx2x_init_block(bp,
5627                                          cm_start[func][i], cm_end[func][i]);
5628
5629                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5630                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5631         }
5632
5633         /* HC init per function */
5634         if (CHIP_IS_E1H(bp)) {
5635                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5636
5637                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5638                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5639         }
5640         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5641
5642         if (CHIP_IS_E1H(bp))
5643                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5644
5645         /* Reset PCIE errors for debug */
5646         REG_WR(bp, 0x2114, 0xffffffff);
5647         REG_WR(bp, 0x2120, 0xffffffff);
5648
5649         return 0;
5650 }
5651
5652 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5653 {
5654         int i, rc = 0;
5655
5656         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5657            BP_FUNC(bp), load_code);
5658
5659         bp->dmae_ready = 0;
5660         mutex_init(&bp->dmae_mutex);
5661         bnx2x_gunzip_init(bp);
5662
5663         switch (load_code) {
5664         case FW_MSG_CODE_DRV_LOAD_COMMON:
5665                 rc = bnx2x_init_common(bp);
5666                 if (rc)
5667                         goto init_hw_err;
5668                 /* no break */
5669
5670         case FW_MSG_CODE_DRV_LOAD_PORT:
5671                 bp->dmae_ready = 1;
5672                 rc = bnx2x_init_port(bp);
5673                 if (rc)
5674                         goto init_hw_err;
5675                 /* no break */
5676
5677         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5678                 bp->dmae_ready = 1;
5679                 rc = bnx2x_init_func(bp);
5680                 if (rc)
5681                         goto init_hw_err;
5682                 break;
5683
5684         default:
5685                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5686                 break;
5687         }
5688
5689         if (!BP_NOMCP(bp)) {
5690                 int func = BP_FUNC(bp);
5691
5692                 bp->fw_drv_pulse_wr_seq =
5693                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5694                                  DRV_PULSE_SEQ_MASK);
5695                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5696                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5697                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5698         } else
5699                 bp->func_stx = 0;
5700
5701         /* this needs to be done before gunzip end */
5702         bnx2x_zero_def_sb(bp);
5703         for_each_queue(bp, i)
5704                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5705
5706 init_hw_err:
5707         bnx2x_gunzip_end(bp);
5708
5709         return rc;
5710 }
5711
5712 /* send the MCP a request, block until there is a reply */
5713 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5714 {
5715         int func = BP_FUNC(bp);
5716         u32 seq = ++bp->fw_seq;
5717         u32 rc = 0;
5718         u32 cnt = 1;
5719         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5720
5721         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5722         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5723
5724         do {
5725                 /* let the FW do it's magic ... */
5726                 msleep(delay);
5727
5728                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5729
5730                 /* Give the FW up to 2 second (200*10ms) */
5731         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5732
5733         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5734            cnt*delay, rc, seq);
5735
5736         /* is this a reply to our command? */
5737         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5738                 rc &= FW_MSG_CODE_MASK;
5739
5740         } else {
5741                 /* FW BUG! */
5742                 BNX2X_ERR("FW failed to respond!\n");
5743                 bnx2x_fw_dump(bp);
5744                 rc = 0;
5745         }
5746
5747         return rc;
5748 }
5749
5750 static void bnx2x_free_mem(struct bnx2x *bp)
5751 {
5752
5753 #define BNX2X_PCI_FREE(x, y, size) \
5754         do { \
5755                 if (x) { \
5756                         pci_free_consistent(bp->pdev, size, x, y); \
5757                         x = NULL; \
5758                         y = 0; \
5759                 } \
5760         } while (0)
5761
5762 #define BNX2X_FREE(x) \
5763         do { \
5764                 if (x) { \
5765                         vfree(x); \
5766                         x = NULL; \
5767                 } \
5768         } while (0)
5769
5770         int i;
5771
5772         /* fastpath */
5773         for_each_queue(bp, i) {
5774
5775                 /* Status blocks */
5776                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5777                                bnx2x_fp(bp, i, status_blk_mapping),
5778                                sizeof(struct host_status_block) +
5779                                sizeof(struct eth_tx_db_data));
5780
5781                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5782                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5783                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5784                                bnx2x_fp(bp, i, tx_desc_mapping),
5785                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5786
5787                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5788                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5789                                bnx2x_fp(bp, i, rx_desc_mapping),
5790                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5791
5792                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5793                                bnx2x_fp(bp, i, rx_comp_mapping),
5794                                sizeof(struct eth_fast_path_rx_cqe) *
5795                                NUM_RCQ_BD);
5796
5797                 /* SGE ring */
5798                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5799                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5800                                bnx2x_fp(bp, i, rx_sge_mapping),
5801                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5802         }
5803         /* end of fastpath */
5804
5805         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5806                        sizeof(struct host_def_status_block));
5807
5808         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5809                        sizeof(struct bnx2x_slowpath));
5810
5811 #ifdef BCM_ISCSI
5812         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5813         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5814         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5815         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5816 #endif
5817         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5818
5819 #undef BNX2X_PCI_FREE
5820 #undef BNX2X_KFREE
5821 }
5822
5823 static int bnx2x_alloc_mem(struct bnx2x *bp)
5824 {
5825
5826 #define BNX2X_PCI_ALLOC(x, y, size) \
5827         do { \
5828                 x = pci_alloc_consistent(bp->pdev, size, y); \
5829                 if (x == NULL) \
5830                         goto alloc_mem_err; \
5831                 memset(x, 0, size); \
5832         } while (0)
5833
5834 #define BNX2X_ALLOC(x, size) \
5835         do { \
5836                 x = vmalloc(size); \
5837                 if (x == NULL) \
5838                         goto alloc_mem_err; \
5839                 memset(x, 0, size); \
5840         } while (0)
5841
5842         int i;
5843
5844         /* fastpath */
5845         for_each_queue(bp, i) {
5846                 bnx2x_fp(bp, i, bp) = bp;
5847
5848                 /* Status blocks */
5849                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5850                                 &bnx2x_fp(bp, i, status_blk_mapping),
5851                                 sizeof(struct host_status_block) +
5852                                 sizeof(struct eth_tx_db_data));
5853
5854                 bnx2x_fp(bp, i, hw_tx_prods) =
5855                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5856
5857                 bnx2x_fp(bp, i, tx_prods_mapping) =
5858                                 bnx2x_fp(bp, i, status_blk_mapping) +
5859                                 sizeof(struct host_status_block);
5860
5861                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5862                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5863                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5864                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5865                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5866                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5867
5868                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5869                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5870                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5871                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5872                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5873
5874                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5875                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5876                                 sizeof(struct eth_fast_path_rx_cqe) *
5877                                 NUM_RCQ_BD);
5878
5879                 /* SGE ring */
5880                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5881                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5882                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5883                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5884                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5885         }
5886         /* end of fastpath */
5887
5888         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5889                         sizeof(struct host_def_status_block));
5890
5891         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5892                         sizeof(struct bnx2x_slowpath));
5893
5894 #ifdef BCM_ISCSI
5895         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5896
5897         /* Initialize T1 */
5898         for (i = 0; i < 64*1024; i += 64) {
5899                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5900                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5901         }
5902
5903         /* allocate searcher T2 table
5904            we allocate 1/4 of alloc num for T2
5905           (which is not entered into the ILT) */
5906         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5907
5908         /* Initialize T2 */
5909         for (i = 0; i < 16*1024; i += 64)
5910                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5911
5912         /* now fixup the last line in the block to point to the next block */
5913         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5914
5915         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5916         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5917
5918         /* QM queues (128*MAX_CONN) */
5919         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5920 #endif
5921
5922         /* Slow path ring */
5923         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5924
5925         return 0;
5926
5927 alloc_mem_err:
5928         bnx2x_free_mem(bp);
5929         return -ENOMEM;
5930
5931 #undef BNX2X_PCI_ALLOC
5932 #undef BNX2X_ALLOC
5933 }
5934
5935 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5936 {
5937         int i;
5938
5939         for_each_queue(bp, i) {
5940                 struct bnx2x_fastpath *fp = &bp->fp[i];
5941
5942                 u16 bd_cons = fp->tx_bd_cons;
5943                 u16 sw_prod = fp->tx_pkt_prod;
5944                 u16 sw_cons = fp->tx_pkt_cons;
5945
5946                 while (sw_cons != sw_prod) {
5947                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5948                         sw_cons++;
5949                 }
5950         }
5951 }
5952
5953 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5954 {
5955         int i, j;
5956
5957         for_each_queue(bp, j) {
5958                 struct bnx2x_fastpath *fp = &bp->fp[j];
5959
5960                 for (i = 0; i < NUM_RX_BD; i++) {
5961                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5962                         struct sk_buff *skb = rx_buf->skb;
5963
5964                         if (skb == NULL)
5965                                 continue;
5966
5967                         pci_unmap_single(bp->pdev,
5968                                          pci_unmap_addr(rx_buf, mapping),
5969                                          bp->rx_buf_size,
5970                                          PCI_DMA_FROMDEVICE);
5971
5972                         rx_buf->skb = NULL;
5973                         dev_kfree_skb(skb);
5974                 }
5975                 if (!fp->disable_tpa)
5976                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5977                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5978                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5979         }
5980 }
5981
5982 static void bnx2x_free_skbs(struct bnx2x *bp)
5983 {
5984         bnx2x_free_tx_skbs(bp);
5985         bnx2x_free_rx_skbs(bp);
5986 }
5987
5988 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5989 {
5990         int i, offset = 1;
5991
5992         free_irq(bp->msix_table[0].vector, bp->dev);
5993         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5994            bp->msix_table[0].vector);
5995
5996         for_each_queue(bp, i) {
5997                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
5998                    "state %x\n", i, bp->msix_table[i + offset].vector,
5999                    bnx2x_fp(bp, i, state));
6000
6001                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6002                         BNX2X_ERR("IRQ of fp #%d being freed while "
6003                                   "state != closed\n", i);
6004
6005                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6006         }
6007 }
6008
6009 static void bnx2x_free_irq(struct bnx2x *bp)
6010 {
6011         if (bp->flags & USING_MSIX_FLAG) {
6012                 bnx2x_free_msix_irqs(bp);
6013                 pci_disable_msix(bp->pdev);
6014                 bp->flags &= ~USING_MSIX_FLAG;
6015
6016         } else
6017                 free_irq(bp->pdev->irq, bp->dev);
6018 }
6019
6020 static int bnx2x_enable_msix(struct bnx2x *bp)
6021 {
6022         int i, rc, offset;
6023
6024         bp->msix_table[0].entry = 0;
6025         offset = 1;
6026         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6027
6028         for_each_queue(bp, i) {
6029                 int igu_vec = offset + i + BP_L_ID(bp);
6030
6031                 bp->msix_table[i + offset].entry = igu_vec;
6032                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6033                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6034         }
6035
6036         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6037                              bp->num_queues + offset);
6038         if (rc) {
6039                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6040                 return -1;
6041         }
6042         bp->flags |= USING_MSIX_FLAG;
6043
6044         return 0;
6045 }
6046
6047 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6048 {
6049         int i, rc, offset = 1;
6050
6051         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6052                          bp->dev->name, bp->dev);
6053         if (rc) {
6054                 BNX2X_ERR("request sp irq failed\n");
6055                 return -EBUSY;
6056         }
6057
6058         for_each_queue(bp, i) {
6059                 rc = request_irq(bp->msix_table[i + offset].vector,
6060                                  bnx2x_msix_fp_int, 0,
6061                                  bp->dev->name, &bp->fp[i]);
6062                 if (rc) {
6063                         BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6064                                   i + offset, -rc);
6065                         bnx2x_free_msix_irqs(bp);
6066                         return -EBUSY;
6067                 }
6068
6069                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6070         }
6071
6072         return 0;
6073 }
6074
6075 static int bnx2x_req_irq(struct bnx2x *bp)
6076 {
6077         int rc;
6078
6079         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6080                          bp->dev->name, bp->dev);
6081         if (!rc)
6082                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6083
6084         return rc;
6085 }
6086
6087 static void bnx2x_napi_enable(struct bnx2x *bp)
6088 {
6089         int i;
6090
6091         for_each_queue(bp, i)
6092                 napi_enable(&bnx2x_fp(bp, i, napi));
6093 }
6094
6095 static void bnx2x_napi_disable(struct bnx2x *bp)
6096 {
6097         int i;
6098
6099         for_each_queue(bp, i)
6100                 napi_disable(&bnx2x_fp(bp, i, napi));
6101 }
6102
6103 static void bnx2x_netif_start(struct bnx2x *bp)
6104 {
6105         if (atomic_dec_and_test(&bp->intr_sem)) {
6106                 if (netif_running(bp->dev)) {
6107                         if (bp->state == BNX2X_STATE_OPEN)
6108                                 netif_wake_queue(bp->dev);
6109                         bnx2x_napi_enable(bp);
6110                         bnx2x_int_enable(bp);
6111                 }
6112         }
6113 }
6114
6115 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6116 {
6117         bnx2x_int_disable_sync(bp, disable_hw);
6118         if (netif_running(bp->dev)) {
6119                 bnx2x_napi_disable(bp);
6120                 netif_tx_disable(bp->dev);
6121                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6122         }
6123 }
6124
6125 /*
6126  * Init service functions
6127  */
6128
6129 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6130 {
6131         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6132         int port = BP_PORT(bp);
6133
6134         /* CAM allocation
6135          * unicasts 0-31:port0 32-63:port1
6136          * multicast 64-127:port0 128-191:port1
6137          */
6138         config->hdr.length_6b = 2;
6139         config->hdr.offset = port ? 31 : 0;
6140         config->hdr.client_id = BP_CL_ID(bp);
6141         config->hdr.reserved1 = 0;
6142
6143         /* primary MAC */
6144         config->config_table[0].cam_entry.msb_mac_addr =
6145                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6146         config->config_table[0].cam_entry.middle_mac_addr =
6147                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6148         config->config_table[0].cam_entry.lsb_mac_addr =
6149                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6150         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6151         if (set)
6152                 config->config_table[0].target_table_entry.flags = 0;
6153         else
6154                 CAM_INVALIDATE(config->config_table[0]);
6155         config->config_table[0].target_table_entry.client_id = 0;
6156         config->config_table[0].target_table_entry.vlan_id = 0;
6157
6158         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6159            (set ? "setting" : "clearing"),
6160            config->config_table[0].cam_entry.msb_mac_addr,
6161            config->config_table[0].cam_entry.middle_mac_addr,
6162            config->config_table[0].cam_entry.lsb_mac_addr);
6163
6164         /* broadcast */
6165         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6166         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6167         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6168         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6169         if (set)
6170                 config->config_table[1].target_table_entry.flags =
6171                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6172         else
6173                 CAM_INVALIDATE(config->config_table[1]);
6174         config->config_table[1].target_table_entry.client_id = 0;
6175         config->config_table[1].target_table_entry.vlan_id = 0;
6176
6177         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6178                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6179                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6180 }
6181
6182 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6183 {
6184         struct mac_configuration_cmd_e1h *config =
6185                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6186
6187         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6188                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6189                 return;
6190         }
6191
6192         /* CAM allocation for E1H
6193          * unicasts: by func number
6194          * multicast: 20+FUNC*20, 20 each
6195          */
6196         config->hdr.length_6b = 1;
6197         config->hdr.offset = BP_FUNC(bp);
6198         config->hdr.client_id = BP_CL_ID(bp);
6199         config->hdr.reserved1 = 0;
6200
6201         /* primary MAC */
6202         config->config_table[0].msb_mac_addr =
6203                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6204         config->config_table[0].middle_mac_addr =
6205                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6206         config->config_table[0].lsb_mac_addr =
6207                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6208         config->config_table[0].client_id = BP_L_ID(bp);
6209         config->config_table[0].vlan_id = 0;
6210         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6211         if (set)
6212                 config->config_table[0].flags = BP_PORT(bp);
6213         else
6214                 config->config_table[0].flags =
6215                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6216
6217         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6218            (set ? "setting" : "clearing"),
6219            config->config_table[0].msb_mac_addr,
6220            config->config_table[0].middle_mac_addr,
6221            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6222
6223         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6224                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6225                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6226 }
6227
6228 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6229                              int *state_p, int poll)
6230 {
6231         /* can take a while if any port is running */
6232         int cnt = 500;
6233
6234         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6235            poll ? "polling" : "waiting", state, idx);
6236
6237         might_sleep();
6238         while (cnt--) {
6239                 if (poll) {
6240                         bnx2x_rx_int(bp->fp, 10);
6241                         /* if index is different from 0
6242                          * the reply for some commands will
6243                          * be on the non default queue
6244                          */
6245                         if (idx)
6246                                 bnx2x_rx_int(&bp->fp[idx], 10);
6247                 }
6248
6249                 mb(); /* state is changed by bnx2x_sp_event() */
6250                 if (*state_p == state)
6251                         return 0;
6252
6253                 msleep(1);
6254         }
6255
6256         /* timeout! */
6257         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6258                   poll ? "polling" : "waiting", state, idx);
6259 #ifdef BNX2X_STOP_ON_ERROR
6260         bnx2x_panic();
6261 #endif
6262
6263         return -EBUSY;
6264 }
6265
6266 static int bnx2x_setup_leading(struct bnx2x *bp)
6267 {
6268         int rc;
6269
6270         /* reset IGU state */
6271         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6272
6273         /* SETUP ramrod */
6274         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6275
6276         /* Wait for completion */
6277         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6278
6279         return rc;
6280 }
6281
6282 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6283 {
6284         /* reset IGU state */
6285         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6286
6287         /* SETUP ramrod */
6288         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6289         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6290
6291         /* Wait for completion */
6292         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6293                                  &(bp->fp[index].state), 0);
6294 }
6295
6296 static int bnx2x_poll(struct napi_struct *napi, int budget);
6297 static void bnx2x_set_rx_mode(struct net_device *dev);
6298
6299 /* must be called with rtnl_lock */
6300 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6301 {
6302         u32 load_code;
6303         int i, rc;
6304 #ifdef BNX2X_STOP_ON_ERROR
6305         if (unlikely(bp->panic))
6306                 return -EPERM;
6307 #endif
6308
6309         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6310
6311         /* Send LOAD_REQUEST command to MCP
6312            Returns the type of LOAD command:
6313            if it is the first port to be initialized
6314            common blocks should be initialized, otherwise - not
6315         */
6316         if (!BP_NOMCP(bp)) {
6317                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6318                 if (!load_code) {
6319                         BNX2X_ERR("MCP response failure, aborting\n");
6320                         return -EBUSY;
6321                 }
6322                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6323                         return -EBUSY; /* other port in diagnostic mode */
6324
6325         } else {
6326                 int port = BP_PORT(bp);
6327
6328                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6329                    load_count[0], load_count[1], load_count[2]);
6330                 load_count[0]++;
6331                 load_count[1 + port]++;
6332                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6333                    load_count[0], load_count[1], load_count[2]);
6334                 if (load_count[0] == 1)
6335                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6336                 else if (load_count[1 + port] == 1)
6337                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6338                 else
6339                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6340         }
6341
6342         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6343             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6344                 bp->port.pmf = 1;
6345         else
6346                 bp->port.pmf = 0;
6347         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6348
6349         /* if we can't use MSI-X we only need one fp,
6350          * so try to enable MSI-X with the requested number of fp's
6351          * and fallback to inta with one fp
6352          */
6353         if (use_inta) {
6354                 bp->num_queues = 1;
6355
6356         } else {
6357                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6358                         /* user requested number */
6359                         bp->num_queues = use_multi;
6360
6361                 else if (use_multi)
6362                         bp->num_queues = min_t(u32, num_online_cpus(),
6363                                                BP_MAX_QUEUES(bp));
6364                 else
6365                         bp->num_queues = 1;
6366
6367                 if (bnx2x_enable_msix(bp)) {
6368                         /* failed to enable MSI-X */
6369                         bp->num_queues = 1;
6370                         if (use_multi)
6371                                 BNX2X_ERR("Multi requested but failed"
6372                                           " to enable MSI-X\n");
6373                 }
6374         }
6375         DP(NETIF_MSG_IFUP,
6376            "set number of queues to %d\n", bp->num_queues);
6377
6378         if (bnx2x_alloc_mem(bp))
6379                 return -ENOMEM;
6380
6381         for_each_queue(bp, i)
6382                 bnx2x_fp(bp, i, disable_tpa) =
6383                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6384
6385         if (bp->flags & USING_MSIX_FLAG) {
6386                 rc = bnx2x_req_msix_irqs(bp);
6387                 if (rc) {
6388                         pci_disable_msix(bp->pdev);
6389                         goto load_error;
6390                 }
6391         } else {
6392                 bnx2x_ack_int(bp);
6393                 rc = bnx2x_req_irq(bp);
6394                 if (rc) {
6395                         BNX2X_ERR("IRQ request failed, aborting\n");
6396                         goto load_error;
6397                 }
6398         }
6399
6400         for_each_queue(bp, i)
6401                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6402                                bnx2x_poll, 128);
6403
6404         /* Initialize HW */
6405         rc = bnx2x_init_hw(bp, load_code);
6406         if (rc) {
6407                 BNX2X_ERR("HW init failed, aborting\n");
6408                 goto load_int_disable;
6409         }
6410
6411         /* Setup NIC internals and enable interrupts */
6412         bnx2x_nic_init(bp, load_code);
6413
6414         /* Send LOAD_DONE command to MCP */
6415         if (!BP_NOMCP(bp)) {
6416                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6417                 if (!load_code) {
6418                         BNX2X_ERR("MCP response failure, aborting\n");
6419                         rc = -EBUSY;
6420                         goto load_rings_free;
6421                 }
6422         }
6423
6424         bnx2x_stats_init(bp);
6425
6426         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6427
6428         /* Enable Rx interrupt handling before sending the ramrod
6429            as it's completed on Rx FP queue */
6430         bnx2x_napi_enable(bp);
6431
6432         /* Enable interrupt handling */
6433         atomic_set(&bp->intr_sem, 0);
6434
6435         rc = bnx2x_setup_leading(bp);
6436         if (rc) {
6437                 BNX2X_ERR("Setup leading failed!\n");
6438                 goto load_netif_stop;
6439         }
6440
6441         if (CHIP_IS_E1H(bp))
6442                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6443                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6444                         bp->state = BNX2X_STATE_DISABLED;
6445                 }
6446
6447         if (bp->state == BNX2X_STATE_OPEN)
6448                 for_each_nondefault_queue(bp, i) {
6449                         rc = bnx2x_setup_multi(bp, i);
6450                         if (rc)
6451                                 goto load_netif_stop;
6452                 }
6453
6454         if (CHIP_IS_E1(bp))
6455                 bnx2x_set_mac_addr_e1(bp, 1);
6456         else
6457                 bnx2x_set_mac_addr_e1h(bp, 1);
6458
6459         if (bp->port.pmf)
6460                 bnx2x_initial_phy_init(bp);
6461
6462         /* Start fast path */
6463         switch (load_mode) {
6464         case LOAD_NORMAL:
6465                 /* Tx queue should be only reenabled */
6466                 netif_wake_queue(bp->dev);
6467                 bnx2x_set_rx_mode(bp->dev);
6468                 break;
6469
6470         case LOAD_OPEN:
6471                 netif_start_queue(bp->dev);
6472                 bnx2x_set_rx_mode(bp->dev);
6473                 if (bp->flags & USING_MSIX_FLAG)
6474                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6475                                bp->dev->name);
6476                 break;
6477
6478         case LOAD_DIAG:
6479                 bnx2x_set_rx_mode(bp->dev);
6480                 bp->state = BNX2X_STATE_DIAG;
6481                 break;
6482
6483         default:
6484                 break;
6485         }
6486
6487         if (!bp->port.pmf)
6488                 bnx2x__link_status_update(bp);
6489
6490         /* start the timer */
6491         mod_timer(&bp->timer, jiffies + bp->current_interval);
6492
6493
6494         return 0;
6495
6496 load_netif_stop:
6497         bnx2x_napi_disable(bp);
6498 load_rings_free:
6499         /* Free SKBs, SGEs, TPA pool and driver internals */
6500         bnx2x_free_skbs(bp);
6501         for_each_queue(bp, i)
6502                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6503 load_int_disable:
6504         bnx2x_int_disable_sync(bp, 1);
6505         /* Release IRQs */
6506         bnx2x_free_irq(bp);
6507 load_error:
6508         bnx2x_free_mem(bp);
6509         bp->port.pmf = 0;
6510
6511         /* TBD we really need to reset the chip
6512            if we want to recover from this */
6513         return rc;
6514 }
6515
6516 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6517 {
6518         int rc;
6519
6520         /* halt the connection */
6521         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6522         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6523
6524         /* Wait for completion */
6525         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6526                                &(bp->fp[index].state), 1);
6527         if (rc) /* timeout */
6528                 return rc;
6529
6530         /* delete cfc entry */
6531         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6532
6533         /* Wait for completion */
6534         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6535                                &(bp->fp[index].state), 1);
6536         return rc;
6537 }
6538
6539 static int bnx2x_stop_leading(struct bnx2x *bp)
6540 {
6541         u16 dsb_sp_prod_idx;
6542         /* if the other port is handling traffic,
6543            this can take a lot of time */
6544         int cnt = 500;
6545         int rc;
6546
6547         might_sleep();
6548
6549         /* Send HALT ramrod */
6550         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6551         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6552
6553         /* Wait for completion */
6554         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6555                                &(bp->fp[0].state), 1);
6556         if (rc) /* timeout */
6557                 return rc;
6558
6559         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6560
6561         /* Send PORT_DELETE ramrod */
6562         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6563
6564         /* Wait for completion to arrive on default status block
6565            we are going to reset the chip anyway
6566            so there is not much to do if this times out
6567          */
6568         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6569                 if (!cnt) {
6570                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6571                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6572                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6573 #ifdef BNX2X_STOP_ON_ERROR
6574                         bnx2x_panic();
6575 #else
6576                         rc = -EBUSY;
6577 #endif
6578                         break;
6579                 }
6580                 cnt--;
6581                 msleep(1);
6582         }
6583         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6584         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6585
6586         return rc;
6587 }
6588
6589 static void bnx2x_reset_func(struct bnx2x *bp)
6590 {
6591         int port = BP_PORT(bp);
6592         int func = BP_FUNC(bp);
6593         int base, i;
6594
6595         /* Configure IGU */
6596         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6597         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6598
6599         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6600
6601         /* Clear ILT */
6602         base = FUNC_ILT_BASE(func);
6603         for (i = base; i < base + ILT_PER_FUNC; i++)
6604                 bnx2x_ilt_wr(bp, i, 0);
6605 }
6606
6607 static void bnx2x_reset_port(struct bnx2x *bp)
6608 {
6609         int port = BP_PORT(bp);
6610         u32 val;
6611
6612         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6613
6614         /* Do not rcv packets to BRB */
6615         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6616         /* Do not direct rcv packets that are not for MCP to the BRB */
6617         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6618                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6619
6620         /* Configure AEU */
6621         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6622
6623         msleep(100);
6624         /* Check for BRB port occupancy */
6625         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6626         if (val)
6627                 DP(NETIF_MSG_IFDOWN,
6628                    "BRB1 is not empty  %d blocks are occupied\n", val);
6629
6630         /* TODO: Close Doorbell port? */
6631 }
6632
6633 static void bnx2x_reset_common(struct bnx2x *bp)
6634 {
6635         /* reset_common */
6636         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6637                0xd3ffff7f);
6638         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6639 }
6640
6641 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6642 {
6643         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6644            BP_FUNC(bp), reset_code);
6645
6646         switch (reset_code) {
6647         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6648                 bnx2x_reset_port(bp);
6649                 bnx2x_reset_func(bp);
6650                 bnx2x_reset_common(bp);
6651                 break;
6652
6653         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6654                 bnx2x_reset_port(bp);
6655                 bnx2x_reset_func(bp);
6656                 break;
6657
6658         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6659                 bnx2x_reset_func(bp);
6660                 break;
6661
6662         default:
6663                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6664                 break;
6665         }
6666 }
6667
6668 /* must be called with rtnl_lock */
6669 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6670 {
6671         int port = BP_PORT(bp);
6672         u32 reset_code = 0;
6673         int i, cnt, rc;
6674
6675         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6676
6677         bp->rx_mode = BNX2X_RX_MODE_NONE;
6678         bnx2x_set_storm_rx_mode(bp);
6679
6680         bnx2x_netif_stop(bp, 1);
6681         if (!netif_running(bp->dev))
6682                 bnx2x_napi_disable(bp);
6683         del_timer_sync(&bp->timer);
6684         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6685                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6686         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6687
6688         /* Wait until tx fast path tasks complete */
6689         for_each_queue(bp, i) {
6690                 struct bnx2x_fastpath *fp = &bp->fp[i];
6691
6692                 cnt = 1000;
6693                 smp_rmb();
6694                 while (BNX2X_HAS_TX_WORK(fp)) {
6695
6696                         bnx2x_tx_int(fp, 1000);
6697                         if (!cnt) {
6698                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6699                                           i);
6700 #ifdef BNX2X_STOP_ON_ERROR
6701                                 bnx2x_panic();
6702                                 return -EBUSY;
6703 #else
6704                                 break;
6705 #endif
6706                         }
6707                         cnt--;
6708                         msleep(1);
6709                         smp_rmb();
6710                 }
6711         }
6712         /* Give HW time to discard old tx messages */
6713         msleep(1);
6714
6715         /* Release IRQs */
6716         bnx2x_free_irq(bp);
6717
6718         if (CHIP_IS_E1(bp)) {
6719                 struct mac_configuration_cmd *config =
6720                                                 bnx2x_sp(bp, mcast_config);
6721
6722                 bnx2x_set_mac_addr_e1(bp, 0);
6723
6724                 for (i = 0; i < config->hdr.length_6b; i++)
6725                         CAM_INVALIDATE(config->config_table[i]);
6726
6727                 config->hdr.length_6b = i;
6728                 if (CHIP_REV_IS_SLOW(bp))
6729                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6730                 else
6731                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6732                 config->hdr.client_id = BP_CL_ID(bp);
6733                 config->hdr.reserved1 = 0;
6734
6735                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6736                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6737                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6738
6739         } else { /* E1H */
6740                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6741
6742                 bnx2x_set_mac_addr_e1h(bp, 0);
6743
6744                 for (i = 0; i < MC_HASH_SIZE; i++)
6745                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6746         }
6747
6748         if (unload_mode == UNLOAD_NORMAL)
6749                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6750
6751         else if (bp->flags & NO_WOL_FLAG) {
6752                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6753                 if (CHIP_IS_E1H(bp))
6754                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6755
6756         } else if (bp->wol) {
6757                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6758                 u8 *mac_addr = bp->dev->dev_addr;
6759                 u32 val;
6760                 /* The mac address is written to entries 1-4 to
6761                    preserve entry 0 which is used by the PMF */
6762                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6763
6764                 val = (mac_addr[0] << 8) | mac_addr[1];
6765                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6766
6767                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6768                       (mac_addr[4] << 8) | mac_addr[5];
6769                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6770
6771                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6772
6773         } else
6774                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6775
6776         /* Close multi and leading connections
6777            Completions for ramrods are collected in a synchronous way */
6778         for_each_nondefault_queue(bp, i)
6779                 if (bnx2x_stop_multi(bp, i))
6780                         goto unload_error;
6781
6782         rc = bnx2x_stop_leading(bp);
6783         if (rc) {
6784                 BNX2X_ERR("Stop leading failed!\n");
6785 #ifdef BNX2X_STOP_ON_ERROR
6786                 return -EBUSY;
6787 #else
6788                 goto unload_error;
6789 #endif
6790         }
6791
6792 unload_error:
6793         if (!BP_NOMCP(bp))
6794                 reset_code = bnx2x_fw_command(bp, reset_code);
6795         else {
6796                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6797                    load_count[0], load_count[1], load_count[2]);
6798                 load_count[0]--;
6799                 load_count[1 + port]--;
6800                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6801                    load_count[0], load_count[1], load_count[2]);
6802                 if (load_count[0] == 0)
6803                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6804                 else if (load_count[1 + port] == 0)
6805                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6806                 else
6807                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6808         }
6809
6810         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6811             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6812                 bnx2x__link_reset(bp);
6813
6814         /* Reset the chip */
6815         bnx2x_reset_chip(bp, reset_code);
6816
6817         /* Report UNLOAD_DONE to MCP */
6818         if (!BP_NOMCP(bp))
6819                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6820         bp->port.pmf = 0;
6821
6822         /* Free SKBs, SGEs, TPA pool and driver internals */
6823         bnx2x_free_skbs(bp);
6824         for_each_queue(bp, i)
6825                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6826         bnx2x_free_mem(bp);
6827
6828         bp->state = BNX2X_STATE_CLOSED;
6829
6830         netif_carrier_off(bp->dev);
6831
6832         return 0;
6833 }
6834
6835 static void bnx2x_reset_task(struct work_struct *work)
6836 {
6837         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6838
6839 #ifdef BNX2X_STOP_ON_ERROR
6840         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6841                   " so reset not done to allow debug dump,\n"
6842          KERN_ERR " you will need to reboot when done\n");
6843         return;
6844 #endif
6845
6846         rtnl_lock();
6847
6848         if (!netif_running(bp->dev))
6849                 goto reset_task_exit;
6850
6851         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6852         bnx2x_nic_load(bp, LOAD_NORMAL);
6853
6854 reset_task_exit:
6855         rtnl_unlock();
6856 }
6857
6858 /* end of nic load/unload */
6859
6860 /* ethtool_ops */
6861
6862 /*
6863  * Init service functions
6864  */
6865
6866 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6867 {
6868         u32 val;
6869
6870         /* Check if there is any driver already loaded */
6871         val = REG_RD(bp, MISC_REG_UNPREPARED);
6872         if (val == 0x1) {
6873                 /* Check if it is the UNDI driver
6874                  * UNDI driver initializes CID offset for normal bell to 0x7
6875                  */
6876                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6877                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6878                 if (val == 0x7)
6879                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6880                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6881
6882                 if (val == 0x7) {
6883                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6884                         /* save our func */
6885                         int func = BP_FUNC(bp);
6886                         u32 swap_en;
6887                         u32 swap_val;
6888
6889                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6890
6891                         /* try unload UNDI on port 0 */
6892                         bp->func = 0;
6893                         bp->fw_seq =
6894                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6895                                 DRV_MSG_SEQ_NUMBER_MASK);
6896                         reset_code = bnx2x_fw_command(bp, reset_code);
6897
6898                         /* if UNDI is loaded on the other port */
6899                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6900
6901                                 /* send "DONE" for previous unload */
6902                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6903
6904                                 /* unload UNDI on port 1 */
6905                                 bp->func = 1;
6906                                 bp->fw_seq =
6907                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6908                                         DRV_MSG_SEQ_NUMBER_MASK);
6909                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6910
6911                                 bnx2x_fw_command(bp, reset_code);
6912                         }
6913
6914                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6915                                     HC_REG_CONFIG_0), 0x1000);
6916
6917                         /* close input traffic and wait for it */
6918                         /* Do not rcv packets to BRB */
6919                         REG_WR(bp,
6920                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6921                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6922                         /* Do not direct rcv packets that are not for MCP to
6923                          * the BRB */
6924                         REG_WR(bp,
6925                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6926                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6927                         /* clear AEU */
6928                         REG_WR(bp,
6929                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6930                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6931                         msleep(10);
6932
6933                         /* save NIG port swap info */
6934                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6935                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6936                         /* reset device */
6937                         REG_WR(bp,
6938                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6939                                0xd3ffffff);
6940                         REG_WR(bp,
6941                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6942                                0x1403);
6943                         /* take the NIG out of reset and restore swap values */
6944                         REG_WR(bp,
6945                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6946                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6947                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6948                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6949
6950                         /* send unload done to the MCP */
6951                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6952
6953                         /* restore our func and fw_seq */
6954                         bp->func = func;
6955                         bp->fw_seq =
6956                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6957                                 DRV_MSG_SEQ_NUMBER_MASK);
6958                 }
6959         }
6960 }
6961
6962 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6963 {
6964         u32 val, val2, val3, val4, id;
6965         u16 pmc;
6966
6967         /* Get the chip revision id and number. */
6968         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6969         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6970         id = ((val & 0xffff) << 16);
6971         val = REG_RD(bp, MISC_REG_CHIP_REV);
6972         id |= ((val & 0xf) << 12);
6973         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6974         id |= ((val & 0xff) << 4);
6975         REG_RD(bp, MISC_REG_BOND_ID);
6976         id |= (val & 0xf);
6977         bp->common.chip_id = id;
6978         bp->link_params.chip_id = bp->common.chip_id;
6979         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6980
6981         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6982         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6983                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6984         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6985                        bp->common.flash_size, bp->common.flash_size);
6986
6987         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6988         bp->link_params.shmem_base = bp->common.shmem_base;
6989         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6990
6991         if (!bp->common.shmem_base ||
6992             (bp->common.shmem_base < 0xA0000) ||
6993             (bp->common.shmem_base >= 0xC0000)) {
6994                 BNX2X_DEV_INFO("MCP not active\n");
6995                 bp->flags |= NO_MCP_FLAG;
6996                 return;
6997         }
6998
6999         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7000         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7001                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7002                 BNX2X_ERR("BAD MCP validity signature\n");
7003
7004         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7005         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7006
7007         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
7008                        bp->common.hw_config, bp->common.board);
7009
7010         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7011                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7012                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7013
7014         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7015         bp->common.bc_ver = val;
7016         BNX2X_DEV_INFO("bc_ver %X\n", val);
7017         if (val < BNX2X_BC_VER) {
7018                 /* for now only warn
7019                  * later we might need to enforce this */
7020                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7021                           " please upgrade BC\n", BNX2X_BC_VER, val);
7022         }
7023
7024         if (BP_E1HVN(bp) == 0) {
7025                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7026                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7027         } else {
7028                 /* no WOL capability for E1HVN != 0 */
7029                 bp->flags |= NO_WOL_FLAG;
7030         }
7031         BNX2X_DEV_INFO("%sWoL capable\n",
7032                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7033
7034         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7035         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7036         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7037         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7038
7039         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7040                val, val2, val3, val4);
7041 }
7042
7043 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7044                                                     u32 switch_cfg)
7045 {
7046         int port = BP_PORT(bp);
7047         u32 ext_phy_type;
7048
7049         switch (switch_cfg) {
7050         case SWITCH_CFG_1G:
7051                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7052
7053                 ext_phy_type =
7054                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7055                 switch (ext_phy_type) {
7056                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7057                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7058                                        ext_phy_type);
7059
7060                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7061                                                SUPPORTED_10baseT_Full |
7062                                                SUPPORTED_100baseT_Half |
7063                                                SUPPORTED_100baseT_Full |
7064                                                SUPPORTED_1000baseT_Full |
7065                                                SUPPORTED_2500baseX_Full |
7066                                                SUPPORTED_TP |
7067                                                SUPPORTED_FIBRE |
7068                                                SUPPORTED_Autoneg |
7069                                                SUPPORTED_Pause |
7070                                                SUPPORTED_Asym_Pause);
7071                         break;
7072
7073                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7074                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7075                                        ext_phy_type);
7076
7077                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7078                                                SUPPORTED_10baseT_Full |
7079                                                SUPPORTED_100baseT_Half |
7080                                                SUPPORTED_100baseT_Full |
7081                                                SUPPORTED_1000baseT_Full |
7082                                                SUPPORTED_TP |
7083                                                SUPPORTED_FIBRE |
7084                                                SUPPORTED_Autoneg |
7085                                                SUPPORTED_Pause |
7086                                                SUPPORTED_Asym_Pause);
7087                         break;
7088
7089                 default:
7090                         BNX2X_ERR("NVRAM config error. "
7091                                   "BAD SerDes ext_phy_config 0x%x\n",
7092                                   bp->link_params.ext_phy_config);
7093                         return;
7094                 }
7095
7096                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7097                                            port*0x10);
7098                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7099                 break;
7100
7101         case SWITCH_CFG_10G:
7102                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7103
7104                 ext_phy_type =
7105                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7106                 switch (ext_phy_type) {
7107                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7108                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7109                                        ext_phy_type);
7110
7111                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7112                                                SUPPORTED_10baseT_Full |
7113                                                SUPPORTED_100baseT_Half |
7114                                                SUPPORTED_100baseT_Full |
7115                                                SUPPORTED_1000baseT_Full |
7116                                                SUPPORTED_2500baseX_Full |
7117                                                SUPPORTED_10000baseT_Full |
7118                                                SUPPORTED_TP |
7119                                                SUPPORTED_FIBRE |
7120                                                SUPPORTED_Autoneg |
7121                                                SUPPORTED_Pause |
7122                                                SUPPORTED_Asym_Pause);
7123                         break;
7124
7125                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7126                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7127                                        ext_phy_type);
7128
7129                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7130                                                SUPPORTED_FIBRE |
7131                                                SUPPORTED_Pause |
7132                                                SUPPORTED_Asym_Pause);
7133                         break;
7134
7135                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7136                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7137                                        ext_phy_type);
7138
7139                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7140                                                SUPPORTED_1000baseT_Full |
7141                                                SUPPORTED_FIBRE |
7142                                                SUPPORTED_Pause |
7143                                                SUPPORTED_Asym_Pause);
7144                         break;
7145
7146                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7147                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7148                                        ext_phy_type);
7149
7150                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7151                                                SUPPORTED_1000baseT_Full |
7152                                                SUPPORTED_FIBRE |
7153                                                SUPPORTED_Autoneg |
7154                                                SUPPORTED_Pause |
7155                                                SUPPORTED_Asym_Pause);
7156                         break;
7157
7158                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7159                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7160                                        ext_phy_type);
7161
7162                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7163                                                SUPPORTED_2500baseX_Full |
7164                                                SUPPORTED_1000baseT_Full |
7165                                                SUPPORTED_FIBRE |
7166                                                SUPPORTED_Autoneg |
7167                                                SUPPORTED_Pause |
7168                                                SUPPORTED_Asym_Pause);
7169                         break;
7170
7171                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7172                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7173                                        ext_phy_type);
7174
7175                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7176                                                SUPPORTED_TP |
7177                                                SUPPORTED_Autoneg |
7178                                                SUPPORTED_Pause |
7179                                                SUPPORTED_Asym_Pause);
7180                         break;
7181
7182                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7183                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7184                                   bp->link_params.ext_phy_config);
7185                         break;
7186
7187                 default:
7188                         BNX2X_ERR("NVRAM config error. "
7189                                   "BAD XGXS ext_phy_config 0x%x\n",
7190                                   bp->link_params.ext_phy_config);
7191                         return;
7192                 }
7193
7194                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7195                                            port*0x18);
7196                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7197
7198                 break;
7199
7200         default:
7201                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7202                           bp->port.link_config);
7203                 return;
7204         }
7205         bp->link_params.phy_addr = bp->port.phy_addr;
7206
7207         /* mask what we support according to speed_cap_mask */
7208         if (!(bp->link_params.speed_cap_mask &
7209                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7210                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7211
7212         if (!(bp->link_params.speed_cap_mask &
7213                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7214                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7215
7216         if (!(bp->link_params.speed_cap_mask &
7217                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7218                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7219
7220         if (!(bp->link_params.speed_cap_mask &
7221                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7222                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7223
7224         if (!(bp->link_params.speed_cap_mask &
7225                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7226                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7227                                         SUPPORTED_1000baseT_Full);
7228
7229         if (!(bp->link_params.speed_cap_mask &
7230                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7231                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7232
7233         if (!(bp->link_params.speed_cap_mask &
7234                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7235                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7236
7237         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7238 }
7239
7240 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7241 {
7242         bp->link_params.req_duplex = DUPLEX_FULL;
7243
7244         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7245         case PORT_FEATURE_LINK_SPEED_AUTO:
7246                 if (bp->port.supported & SUPPORTED_Autoneg) {
7247                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7248                         bp->port.advertising = bp->port.supported;
7249                 } else {
7250                         u32 ext_phy_type =
7251                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7252
7253                         if ((ext_phy_type ==
7254                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7255                             (ext_phy_type ==
7256                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7257                                 /* force 10G, no AN */
7258                                 bp->link_params.req_line_speed = SPEED_10000;
7259                                 bp->port.advertising =
7260                                                 (ADVERTISED_10000baseT_Full |
7261                                                  ADVERTISED_FIBRE);
7262                                 break;
7263                         }
7264                         BNX2X_ERR("NVRAM config error. "
7265                                   "Invalid link_config 0x%x"
7266                                   "  Autoneg not supported\n",
7267                                   bp->port.link_config);
7268                         return;
7269                 }
7270                 break;
7271
7272         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7273                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7274                         bp->link_params.req_line_speed = SPEED_10;
7275                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7276                                                 ADVERTISED_TP);
7277                 } else {
7278                         BNX2X_ERR("NVRAM config error. "
7279                                   "Invalid link_config 0x%x"
7280                                   "  speed_cap_mask 0x%x\n",
7281                                   bp->port.link_config,
7282                                   bp->link_params.speed_cap_mask);
7283                         return;
7284                 }
7285                 break;
7286
7287         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7288                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7289                         bp->link_params.req_line_speed = SPEED_10;
7290                         bp->link_params.req_duplex = DUPLEX_HALF;
7291                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7292                                                 ADVERTISED_TP);
7293                 } else {
7294                         BNX2X_ERR("NVRAM config error. "
7295                                   "Invalid link_config 0x%x"
7296                                   "  speed_cap_mask 0x%x\n",
7297                                   bp->port.link_config,
7298                                   bp->link_params.speed_cap_mask);
7299                         return;
7300                 }
7301                 break;
7302
7303         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7304                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7305                         bp->link_params.req_line_speed = SPEED_100;
7306                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7307                                                 ADVERTISED_TP);
7308                 } else {
7309                         BNX2X_ERR("NVRAM config error. "
7310                                   "Invalid link_config 0x%x"
7311                                   "  speed_cap_mask 0x%x\n",
7312                                   bp->port.link_config,
7313                                   bp->link_params.speed_cap_mask);
7314                         return;
7315                 }
7316                 break;
7317
7318         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7319                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7320                         bp->link_params.req_line_speed = SPEED_100;
7321                         bp->link_params.req_duplex = DUPLEX_HALF;
7322                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7323                                                 ADVERTISED_TP);
7324                 } else {
7325                         BNX2X_ERR("NVRAM config error. "
7326                                   "Invalid link_config 0x%x"
7327                                   "  speed_cap_mask 0x%x\n",
7328                                   bp->port.link_config,
7329                                   bp->link_params.speed_cap_mask);
7330                         return;
7331                 }
7332                 break;
7333
7334         case PORT_FEATURE_LINK_SPEED_1G:
7335                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7336                         bp->link_params.req_line_speed = SPEED_1000;
7337                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7338                                                 ADVERTISED_TP);
7339                 } else {
7340                         BNX2X_ERR("NVRAM config error. "
7341                                   "Invalid link_config 0x%x"
7342                                   "  speed_cap_mask 0x%x\n",
7343                                   bp->port.link_config,
7344                                   bp->link_params.speed_cap_mask);
7345                         return;
7346                 }
7347                 break;
7348
7349         case PORT_FEATURE_LINK_SPEED_2_5G:
7350                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7351                         bp->link_params.req_line_speed = SPEED_2500;
7352                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7353                                                 ADVERTISED_TP);
7354                 } else {
7355                         BNX2X_ERR("NVRAM config error. "
7356                                   "Invalid link_config 0x%x"
7357                                   "  speed_cap_mask 0x%x\n",
7358                                   bp->port.link_config,
7359                                   bp->link_params.speed_cap_mask);
7360                         return;
7361                 }
7362                 break;
7363
7364         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7365         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7366         case PORT_FEATURE_LINK_SPEED_10G_KR:
7367                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7368                         bp->link_params.req_line_speed = SPEED_10000;
7369                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7370                                                 ADVERTISED_FIBRE);
7371                 } else {
7372                         BNX2X_ERR("NVRAM config error. "
7373                                   "Invalid link_config 0x%x"
7374                                   "  speed_cap_mask 0x%x\n",
7375                                   bp->port.link_config,
7376                                   bp->link_params.speed_cap_mask);
7377                         return;
7378                 }
7379                 break;
7380
7381         default:
7382                 BNX2X_ERR("NVRAM config error. "
7383                           "BAD link speed link_config 0x%x\n",
7384                           bp->port.link_config);
7385                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7386                 bp->port.advertising = bp->port.supported;
7387                 break;
7388         }
7389
7390         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7391                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7392         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7393             !(bp->port.supported & SUPPORTED_Autoneg))
7394                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7395
7396         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7397                        "  advertising 0x%x\n",
7398                        bp->link_params.req_line_speed,
7399                        bp->link_params.req_duplex,
7400                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7401 }
7402
7403 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7404 {
7405         int port = BP_PORT(bp);
7406         u32 val, val2;
7407
7408         bp->link_params.bp = bp;
7409         bp->link_params.port = port;
7410
7411         bp->link_params.serdes_config =
7412                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7413         bp->link_params.lane_config =
7414                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7415         bp->link_params.ext_phy_config =
7416                 SHMEM_RD(bp,
7417                          dev_info.port_hw_config[port].external_phy_config);
7418         bp->link_params.speed_cap_mask =
7419                 SHMEM_RD(bp,
7420                          dev_info.port_hw_config[port].speed_capability_mask);
7421
7422         bp->port.link_config =
7423                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7424
7425         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7426              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7427                        "  link_config 0x%08x\n",
7428                        bp->link_params.serdes_config,
7429                        bp->link_params.lane_config,
7430                        bp->link_params.ext_phy_config,
7431                        bp->link_params.speed_cap_mask, bp->port.link_config);
7432
7433         bp->link_params.switch_cfg = (bp->port.link_config &
7434                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7435         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7436
7437         bnx2x_link_settings_requested(bp);
7438
7439         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7440         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7441         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7442         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7443         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7444         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7445         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7446         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7447         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7448         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7449 }
7450
7451 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7452 {
7453         int func = BP_FUNC(bp);
7454         u32 val, val2;
7455         int rc = 0;
7456
7457         bnx2x_get_common_hwinfo(bp);
7458
7459         bp->e1hov = 0;
7460         bp->e1hmf = 0;
7461         if (CHIP_IS_E1H(bp)) {
7462                 bp->mf_config =
7463                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7464
7465                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7466                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7467                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7468
7469                         bp->e1hov = val;
7470                         bp->e1hmf = 1;
7471                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7472                                        "(0x%04x)\n",
7473                                        func, bp->e1hov, bp->e1hov);
7474                 } else {
7475                         BNX2X_DEV_INFO("Single function mode\n");
7476                         if (BP_E1HVN(bp)) {
7477                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7478                                           "  aborting\n", func);
7479                                 rc = -EPERM;
7480                         }
7481                 }
7482         }
7483
7484         if (!BP_NOMCP(bp)) {
7485                 bnx2x_get_port_hwinfo(bp);
7486
7487                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7488                               DRV_MSG_SEQ_NUMBER_MASK);
7489                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7490         }
7491
7492         if (IS_E1HMF(bp)) {
7493                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7494                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7495                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7496                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7497                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7498                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7499                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7500                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7501                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7502                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7503                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7504                                ETH_ALEN);
7505                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7506                                ETH_ALEN);
7507                 }
7508
7509                 return rc;
7510         }
7511
7512         if (BP_NOMCP(bp)) {
7513                 /* only supposed to happen on emulation/FPGA */
7514                 BNX2X_ERR("warning random MAC workaround active\n");
7515                 random_ether_addr(bp->dev->dev_addr);
7516                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7517         }
7518
7519         return rc;
7520 }
7521
7522 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7523 {
7524         int func = BP_FUNC(bp);
7525         int rc;
7526
7527         /* Disable interrupt handling until HW is initialized */
7528         atomic_set(&bp->intr_sem, 1);
7529
7530         mutex_init(&bp->port.phy_mutex);
7531
7532         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7533         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7534
7535         rc = bnx2x_get_hwinfo(bp);
7536
7537         /* need to reset chip if undi was active */
7538         if (!BP_NOMCP(bp))
7539                 bnx2x_undi_unload(bp);
7540
7541         if (CHIP_REV_IS_FPGA(bp))
7542                 printk(KERN_ERR PFX "FPGA detected\n");
7543
7544         if (BP_NOMCP(bp) && (func == 0))
7545                 printk(KERN_ERR PFX
7546                        "MCP disabled, must load devices in order!\n");
7547
7548         /* Set TPA flags */
7549         if (disable_tpa) {
7550                 bp->flags &= ~TPA_ENABLE_FLAG;
7551                 bp->dev->features &= ~NETIF_F_LRO;
7552         } else {
7553                 bp->flags |= TPA_ENABLE_FLAG;
7554                 bp->dev->features |= NETIF_F_LRO;
7555         }
7556
7557
7558         bp->tx_ring_size = MAX_TX_AVAIL;
7559         bp->rx_ring_size = MAX_RX_AVAIL;
7560
7561         bp->rx_csum = 1;
7562         bp->rx_offset = 0;
7563
7564         bp->tx_ticks = 50;
7565         bp->rx_ticks = 25;
7566
7567         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7568         bp->current_interval = (poll ? poll : bp->timer_interval);
7569
7570         init_timer(&bp->timer);
7571         bp->timer.expires = jiffies + bp->current_interval;
7572         bp->timer.data = (unsigned long) bp;
7573         bp->timer.function = bnx2x_timer;
7574
7575         return rc;
7576 }
7577
7578 /*
7579  * ethtool service functions
7580  */
7581
7582 /* All ethtool functions called with rtnl_lock */
7583
7584 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7585 {
7586         struct bnx2x *bp = netdev_priv(dev);
7587
7588         cmd->supported = bp->port.supported;
7589         cmd->advertising = bp->port.advertising;
7590
7591         if (netif_carrier_ok(dev)) {
7592                 cmd->speed = bp->link_vars.line_speed;
7593                 cmd->duplex = bp->link_vars.duplex;
7594         } else {
7595                 cmd->speed = bp->link_params.req_line_speed;
7596                 cmd->duplex = bp->link_params.req_duplex;
7597         }
7598         if (IS_E1HMF(bp)) {
7599                 u16 vn_max_rate;
7600
7601                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7602                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7603                 if (vn_max_rate < cmd->speed)
7604                         cmd->speed = vn_max_rate;
7605         }
7606
7607         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7608                 u32 ext_phy_type =
7609                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7610
7611                 switch (ext_phy_type) {
7612                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7613                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7614                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7615                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7616                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7617                         cmd->port = PORT_FIBRE;
7618                         break;
7619
7620                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7621                         cmd->port = PORT_TP;
7622                         break;
7623
7624                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7625                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7626                                   bp->link_params.ext_phy_config);
7627                         break;
7628
7629                 default:
7630                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7631                            bp->link_params.ext_phy_config);
7632                         break;
7633                 }
7634         } else
7635                 cmd->port = PORT_TP;
7636
7637         cmd->phy_address = bp->port.phy_addr;
7638         cmd->transceiver = XCVR_INTERNAL;
7639
7640         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7641                 cmd->autoneg = AUTONEG_ENABLE;
7642         else
7643                 cmd->autoneg = AUTONEG_DISABLE;
7644
7645         cmd->maxtxpkt = 0;
7646         cmd->maxrxpkt = 0;
7647
7648         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7649            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7650            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7651            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7652            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7653            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7654            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7655
7656         return 0;
7657 }
7658
7659 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7660 {
7661         struct bnx2x *bp = netdev_priv(dev);
7662         u32 advertising;
7663
7664         if (IS_E1HMF(bp))
7665                 return 0;
7666
7667         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7668            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7669            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7670            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7671            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7672            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7673            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7674
7675         if (cmd->autoneg == AUTONEG_ENABLE) {
7676                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7677                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7678                         return -EINVAL;
7679                 }
7680
7681                 /* advertise the requested speed and duplex if supported */
7682                 cmd->advertising &= bp->port.supported;
7683
7684                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7685                 bp->link_params.req_duplex = DUPLEX_FULL;
7686                 bp->port.advertising |= (ADVERTISED_Autoneg |
7687                                          cmd->advertising);
7688
7689         } else { /* forced speed */
7690                 /* advertise the requested speed and duplex if supported */
7691                 switch (cmd->speed) {
7692                 case SPEED_10:
7693                         if (cmd->duplex == DUPLEX_FULL) {
7694                                 if (!(bp->port.supported &
7695                                       SUPPORTED_10baseT_Full)) {
7696                                         DP(NETIF_MSG_LINK,
7697                                            "10M full not supported\n");
7698                                         return -EINVAL;
7699                                 }
7700
7701                                 advertising = (ADVERTISED_10baseT_Full |
7702                                                ADVERTISED_TP);
7703                         } else {
7704                                 if (!(bp->port.supported &
7705                                       SUPPORTED_10baseT_Half)) {
7706                                         DP(NETIF_MSG_LINK,
7707                                            "10M half not supported\n");
7708                                         return -EINVAL;
7709                                 }
7710
7711                                 advertising = (ADVERTISED_10baseT_Half |
7712                                                ADVERTISED_TP);
7713                         }
7714                         break;
7715
7716                 case SPEED_100:
7717                         if (cmd->duplex == DUPLEX_FULL) {
7718                                 if (!(bp->port.supported &
7719                                                 SUPPORTED_100baseT_Full)) {
7720                                         DP(NETIF_MSG_LINK,
7721                                            "100M full not supported\n");
7722                                         return -EINVAL;
7723                                 }
7724
7725                                 advertising = (ADVERTISED_100baseT_Full |
7726                                                ADVERTISED_TP);
7727                         } else {
7728                                 if (!(bp->port.supported &
7729                                                 SUPPORTED_100baseT_Half)) {
7730                                         DP(NETIF_MSG_LINK,
7731                                            "100M half not supported\n");
7732                                         return -EINVAL;
7733                                 }
7734
7735                                 advertising = (ADVERTISED_100baseT_Half |
7736                                                ADVERTISED_TP);
7737                         }
7738                         break;
7739
7740                 case SPEED_1000:
7741                         if (cmd->duplex != DUPLEX_FULL) {
7742                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7743                                 return -EINVAL;
7744                         }
7745
7746                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7747                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7748                                 return -EINVAL;
7749                         }
7750
7751                         advertising = (ADVERTISED_1000baseT_Full |
7752                                        ADVERTISED_TP);
7753                         break;
7754
7755                 case SPEED_2500:
7756                         if (cmd->duplex != DUPLEX_FULL) {
7757                                 DP(NETIF_MSG_LINK,
7758                                    "2.5G half not supported\n");
7759                                 return -EINVAL;
7760                         }
7761
7762                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7763                                 DP(NETIF_MSG_LINK,
7764                                    "2.5G full not supported\n");
7765                                 return -EINVAL;
7766                         }
7767
7768                         advertising = (ADVERTISED_2500baseX_Full |
7769                                        ADVERTISED_TP);
7770                         break;
7771
7772                 case SPEED_10000:
7773                         if (cmd->duplex != DUPLEX_FULL) {
7774                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7775                                 return -EINVAL;
7776                         }
7777
7778                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7779                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7780                                 return -EINVAL;
7781                         }
7782
7783                         advertising = (ADVERTISED_10000baseT_Full |
7784                                        ADVERTISED_FIBRE);
7785                         break;
7786
7787                 default:
7788                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7789                         return -EINVAL;
7790                 }
7791
7792                 bp->link_params.req_line_speed = cmd->speed;
7793                 bp->link_params.req_duplex = cmd->duplex;
7794                 bp->port.advertising = advertising;
7795         }
7796
7797         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7798            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7799            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7800            bp->port.advertising);
7801
7802         if (netif_running(dev)) {
7803                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7804                 bnx2x_link_set(bp);
7805         }
7806
7807         return 0;
7808 }
7809
7810 #define PHY_FW_VER_LEN                  10
7811
7812 static void bnx2x_get_drvinfo(struct net_device *dev,
7813                               struct ethtool_drvinfo *info)
7814 {
7815         struct bnx2x *bp = netdev_priv(dev);
7816         u8 phy_fw_ver[PHY_FW_VER_LEN];
7817
7818         strcpy(info->driver, DRV_MODULE_NAME);
7819         strcpy(info->version, DRV_MODULE_VERSION);
7820
7821         phy_fw_ver[0] = '\0';
7822         if (bp->port.pmf) {
7823                 bnx2x_acquire_phy_lock(bp);
7824                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7825                                              (bp->state != BNX2X_STATE_CLOSED),
7826                                              phy_fw_ver, PHY_FW_VER_LEN);
7827                 bnx2x_release_phy_lock(bp);
7828         }
7829
7830         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7831                  (bp->common.bc_ver & 0xff0000) >> 16,
7832                  (bp->common.bc_ver & 0xff00) >> 8,
7833                  (bp->common.bc_ver & 0xff),
7834                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7835         strcpy(info->bus_info, pci_name(bp->pdev));
7836         info->n_stats = BNX2X_NUM_STATS;
7837         info->testinfo_len = BNX2X_NUM_TESTS;
7838         info->eedump_len = bp->common.flash_size;
7839         info->regdump_len = 0;
7840 }
7841
7842 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7843 {
7844         struct bnx2x *bp = netdev_priv(dev);
7845
7846         if (bp->flags & NO_WOL_FLAG) {
7847                 wol->supported = 0;
7848                 wol->wolopts = 0;
7849         } else {
7850                 wol->supported = WAKE_MAGIC;
7851                 if (bp->wol)
7852                         wol->wolopts = WAKE_MAGIC;
7853                 else
7854                         wol->wolopts = 0;
7855         }
7856         memset(&wol->sopass, 0, sizeof(wol->sopass));
7857 }
7858
7859 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7860 {
7861         struct bnx2x *bp = netdev_priv(dev);
7862
7863         if (wol->wolopts & ~WAKE_MAGIC)
7864                 return -EINVAL;
7865
7866         if (wol->wolopts & WAKE_MAGIC) {
7867                 if (bp->flags & NO_WOL_FLAG)
7868                         return -EINVAL;
7869
7870                 bp->wol = 1;
7871         } else
7872                 bp->wol = 0;
7873
7874         return 0;
7875 }
7876
7877 static u32 bnx2x_get_msglevel(struct net_device *dev)
7878 {
7879         struct bnx2x *bp = netdev_priv(dev);
7880
7881         return bp->msglevel;
7882 }
7883
7884 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7885 {
7886         struct bnx2x *bp = netdev_priv(dev);
7887
7888         if (capable(CAP_NET_ADMIN))
7889                 bp->msglevel = level;
7890 }
7891
7892 static int bnx2x_nway_reset(struct net_device *dev)
7893 {
7894         struct bnx2x *bp = netdev_priv(dev);
7895
7896         if (!bp->port.pmf)
7897                 return 0;
7898
7899         if (netif_running(dev)) {
7900                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7901                 bnx2x_link_set(bp);
7902         }
7903
7904         return 0;
7905 }
7906
7907 static int bnx2x_get_eeprom_len(struct net_device *dev)
7908 {
7909         struct bnx2x *bp = netdev_priv(dev);
7910
7911         return bp->common.flash_size;
7912 }
7913
7914 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7915 {
7916         int port = BP_PORT(bp);
7917         int count, i;
7918         u32 val = 0;
7919
7920         /* adjust timeout for emulation/FPGA */
7921         count = NVRAM_TIMEOUT_COUNT;
7922         if (CHIP_REV_IS_SLOW(bp))
7923                 count *= 100;
7924
7925         /* request access to nvram interface */
7926         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7927                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7928
7929         for (i = 0; i < count*10; i++) {
7930                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7931                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7932                         break;
7933
7934                 udelay(5);
7935         }
7936
7937         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7938                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7939                 return -EBUSY;
7940         }
7941
7942         return 0;
7943 }
7944
7945 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7946 {
7947         int port = BP_PORT(bp);
7948         int count, i;
7949         u32 val = 0;
7950
7951         /* adjust timeout for emulation/FPGA */
7952         count = NVRAM_TIMEOUT_COUNT;
7953         if (CHIP_REV_IS_SLOW(bp))
7954                 count *= 100;
7955
7956         /* relinquish nvram interface */
7957         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7958                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7959
7960         for (i = 0; i < count*10; i++) {
7961                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7962                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7963                         break;
7964
7965                 udelay(5);
7966         }
7967
7968         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7969                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7970                 return -EBUSY;
7971         }
7972
7973         return 0;
7974 }
7975
7976 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7977 {
7978         u32 val;
7979
7980         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7981
7982         /* enable both bits, even on read */
7983         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7984                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7985                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7986 }
7987
7988 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7989 {
7990         u32 val;
7991
7992         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7993
7994         /* disable both bits, even after read */
7995         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7996                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7997                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7998 }
7999
8000 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8001                                   u32 cmd_flags)
8002 {
8003         int count, i, rc;
8004         u32 val;
8005
8006         /* build the command word */
8007         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8008
8009         /* need to clear DONE bit separately */
8010         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8011
8012         /* address of the NVRAM to read from */
8013         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8014                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8015
8016         /* issue a read command */
8017         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8018
8019         /* adjust timeout for emulation/FPGA */
8020         count = NVRAM_TIMEOUT_COUNT;
8021         if (CHIP_REV_IS_SLOW(bp))
8022                 count *= 100;
8023
8024         /* wait for completion */
8025         *ret_val = 0;
8026         rc = -EBUSY;
8027         for (i = 0; i < count; i++) {
8028                 udelay(5);
8029                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8030
8031                 if (val & MCPR_NVM_COMMAND_DONE) {
8032                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8033                         /* we read nvram data in cpu order
8034                          * but ethtool sees it as an array of bytes
8035                          * converting to big-endian will do the work */
8036                         val = cpu_to_be32(val);
8037                         *ret_val = val;
8038                         rc = 0;
8039                         break;
8040                 }
8041         }
8042
8043         return rc;
8044 }
8045
8046 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8047                             int buf_size)
8048 {
8049         int rc;
8050         u32 cmd_flags;
8051         u32 val;
8052
8053         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8054                 DP(BNX2X_MSG_NVM,
8055                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8056                    offset, buf_size);
8057                 return -EINVAL;
8058         }
8059
8060         if (offset + buf_size > bp->common.flash_size) {
8061                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8062                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8063                    offset, buf_size, bp->common.flash_size);
8064                 return -EINVAL;
8065         }
8066
8067         /* request access to nvram interface */
8068         rc = bnx2x_acquire_nvram_lock(bp);
8069         if (rc)
8070                 return rc;
8071
8072         /* enable access to nvram interface */
8073         bnx2x_enable_nvram_access(bp);
8074
8075         /* read the first word(s) */
8076         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8077         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8078                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8079                 memcpy(ret_buf, &val, 4);
8080
8081                 /* advance to the next dword */
8082                 offset += sizeof(u32);
8083                 ret_buf += sizeof(u32);
8084                 buf_size -= sizeof(u32);
8085                 cmd_flags = 0;
8086         }
8087
8088         if (rc == 0) {
8089                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8090                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8091                 memcpy(ret_buf, &val, 4);
8092         }
8093
8094         /* disable access to nvram interface */
8095         bnx2x_disable_nvram_access(bp);
8096         bnx2x_release_nvram_lock(bp);
8097
8098         return rc;
8099 }
8100
8101 static int bnx2x_get_eeprom(struct net_device *dev,
8102                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8103 {
8104         struct bnx2x *bp = netdev_priv(dev);
8105         int rc;
8106
8107         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8108            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8109            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8110            eeprom->len, eeprom->len);
8111
8112         /* parameters already validated in ethtool_get_eeprom */
8113
8114         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8115
8116         return rc;
8117 }
8118
8119 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8120                                    u32 cmd_flags)
8121 {
8122         int count, i, rc;
8123
8124         /* build the command word */
8125         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8126
8127         /* need to clear DONE bit separately */
8128         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8129
8130         /* write the data */
8131         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8132
8133         /* address of the NVRAM to write to */
8134         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8135                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8136
8137         /* issue the write command */
8138         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8139
8140         /* adjust timeout for emulation/FPGA */
8141         count = NVRAM_TIMEOUT_COUNT;
8142         if (CHIP_REV_IS_SLOW(bp))
8143                 count *= 100;
8144
8145         /* wait for completion */
8146         rc = -EBUSY;
8147         for (i = 0; i < count; i++) {
8148                 udelay(5);
8149                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8150                 if (val & MCPR_NVM_COMMAND_DONE) {
8151                         rc = 0;
8152                         break;
8153                 }
8154         }
8155
8156         return rc;
8157 }
8158
8159 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8160
8161 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8162                               int buf_size)
8163 {
8164         int rc;
8165         u32 cmd_flags;
8166         u32 align_offset;
8167         u32 val;
8168
8169         if (offset + buf_size > bp->common.flash_size) {
8170                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8171                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8172                    offset, buf_size, bp->common.flash_size);
8173                 return -EINVAL;
8174         }
8175
8176         /* request access to nvram interface */
8177         rc = bnx2x_acquire_nvram_lock(bp);
8178         if (rc)
8179                 return rc;
8180
8181         /* enable access to nvram interface */
8182         bnx2x_enable_nvram_access(bp);
8183
8184         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8185         align_offset = (offset & ~0x03);
8186         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8187
8188         if (rc == 0) {
8189                 val &= ~(0xff << BYTE_OFFSET(offset));
8190                 val |= (*data_buf << BYTE_OFFSET(offset));
8191
8192                 /* nvram data is returned as an array of bytes
8193                  * convert it back to cpu order */
8194                 val = be32_to_cpu(val);
8195
8196                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8197                                              cmd_flags);
8198         }
8199
8200         /* disable access to nvram interface */
8201         bnx2x_disable_nvram_access(bp);
8202         bnx2x_release_nvram_lock(bp);
8203
8204         return rc;
8205 }
8206
8207 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8208                              int buf_size)
8209 {
8210         int rc;
8211         u32 cmd_flags;
8212         u32 val;
8213         u32 written_so_far;
8214
8215         if (buf_size == 1)      /* ethtool */
8216                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8217
8218         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8219                 DP(BNX2X_MSG_NVM,
8220                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8221                    offset, buf_size);
8222                 return -EINVAL;
8223         }
8224
8225         if (offset + buf_size > bp->common.flash_size) {
8226                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8227                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8228                    offset, buf_size, bp->common.flash_size);
8229                 return -EINVAL;
8230         }
8231
8232         /* request access to nvram interface */
8233         rc = bnx2x_acquire_nvram_lock(bp);
8234         if (rc)
8235                 return rc;
8236
8237         /* enable access to nvram interface */
8238         bnx2x_enable_nvram_access(bp);
8239
8240         written_so_far = 0;
8241         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8242         while ((written_so_far < buf_size) && (rc == 0)) {
8243                 if (written_so_far == (buf_size - sizeof(u32)))
8244                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8245                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8246                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8247                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8248                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8249
8250                 memcpy(&val, data_buf, 4);
8251
8252                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8253
8254                 /* advance to the next dword */
8255                 offset += sizeof(u32);
8256                 data_buf += sizeof(u32);
8257                 written_so_far += sizeof(u32);
8258                 cmd_flags = 0;
8259         }
8260
8261         /* disable access to nvram interface */
8262         bnx2x_disable_nvram_access(bp);
8263         bnx2x_release_nvram_lock(bp);
8264
8265         return rc;
8266 }
8267
8268 static int bnx2x_set_eeprom(struct net_device *dev,
8269                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8270 {
8271         struct bnx2x *bp = netdev_priv(dev);
8272         int rc;
8273
8274         if (!netif_running(dev))
8275                 return -EAGAIN;
8276
8277         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8278            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8279            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8280            eeprom->len, eeprom->len);
8281
8282         /* parameters already validated in ethtool_set_eeprom */
8283
8284         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8285         if (eeprom->magic == 0x00504859)
8286                 if (bp->port.pmf) {
8287
8288                         bnx2x_acquire_phy_lock(bp);
8289                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8290                                              bp->link_params.ext_phy_config,
8291                                              (bp->state != BNX2X_STATE_CLOSED),
8292                                              eebuf, eeprom->len);
8293                         if ((bp->state == BNX2X_STATE_OPEN) ||
8294                             (bp->state == BNX2X_STATE_DISABLED)) {
8295                                 rc |= bnx2x_link_reset(&bp->link_params,
8296                                                        &bp->link_vars);
8297                                 rc |= bnx2x_phy_init(&bp->link_params,
8298                                                      &bp->link_vars);
8299                         }
8300                         bnx2x_release_phy_lock(bp);
8301
8302                 } else /* Only the PMF can access the PHY */
8303                         return -EINVAL;
8304         else
8305                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8306
8307         return rc;
8308 }
8309
8310 static int bnx2x_get_coalesce(struct net_device *dev,
8311                               struct ethtool_coalesce *coal)
8312 {
8313         struct bnx2x *bp = netdev_priv(dev);
8314
8315         memset(coal, 0, sizeof(struct ethtool_coalesce));
8316
8317         coal->rx_coalesce_usecs = bp->rx_ticks;
8318         coal->tx_coalesce_usecs = bp->tx_ticks;
8319
8320         return 0;
8321 }
8322
8323 static int bnx2x_set_coalesce(struct net_device *dev,
8324                               struct ethtool_coalesce *coal)
8325 {
8326         struct bnx2x *bp = netdev_priv(dev);
8327
8328         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8329         if (bp->rx_ticks > 3000)
8330                 bp->rx_ticks = 3000;
8331
8332         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8333         if (bp->tx_ticks > 0x3000)
8334                 bp->tx_ticks = 0x3000;
8335
8336         if (netif_running(dev))
8337                 bnx2x_update_coalesce(bp);
8338
8339         return 0;
8340 }
8341
8342 static void bnx2x_get_ringparam(struct net_device *dev,
8343                                 struct ethtool_ringparam *ering)
8344 {
8345         struct bnx2x *bp = netdev_priv(dev);
8346
8347         ering->rx_max_pending = MAX_RX_AVAIL;
8348         ering->rx_mini_max_pending = 0;
8349         ering->rx_jumbo_max_pending = 0;
8350
8351         ering->rx_pending = bp->rx_ring_size;
8352         ering->rx_mini_pending = 0;
8353         ering->rx_jumbo_pending = 0;
8354
8355         ering->tx_max_pending = MAX_TX_AVAIL;
8356         ering->tx_pending = bp->tx_ring_size;
8357 }
8358
8359 static int bnx2x_set_ringparam(struct net_device *dev,
8360                                struct ethtool_ringparam *ering)
8361 {
8362         struct bnx2x *bp = netdev_priv(dev);
8363         int rc = 0;
8364
8365         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8366             (ering->tx_pending > MAX_TX_AVAIL) ||
8367             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8368                 return -EINVAL;
8369
8370         bp->rx_ring_size = ering->rx_pending;
8371         bp->tx_ring_size = ering->tx_pending;
8372
8373         if (netif_running(dev)) {
8374                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8375                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8376         }
8377
8378         return rc;
8379 }
8380
8381 static void bnx2x_get_pauseparam(struct net_device *dev,
8382                                  struct ethtool_pauseparam *epause)
8383 {
8384         struct bnx2x *bp = netdev_priv(dev);
8385
8386         epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8387                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8388
8389         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8390                             BNX2X_FLOW_CTRL_RX);
8391         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8392                             BNX2X_FLOW_CTRL_TX);
8393
8394         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8395            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8396            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8397 }
8398
8399 static int bnx2x_set_pauseparam(struct net_device *dev,
8400                                 struct ethtool_pauseparam *epause)
8401 {
8402         struct bnx2x *bp = netdev_priv(dev);
8403
8404         if (IS_E1HMF(bp))
8405                 return 0;
8406
8407         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8408            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8409            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8410
8411         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8412
8413         if (epause->rx_pause)
8414                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8415
8416         if (epause->tx_pause)
8417                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8418
8419         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8420                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8421
8422         if (epause->autoneg) {
8423                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8424                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8425                         return -EINVAL;
8426                 }
8427
8428                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8429                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8430         }
8431
8432         DP(NETIF_MSG_LINK,
8433            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8434
8435         if (netif_running(dev)) {
8436                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8437                 bnx2x_link_set(bp);
8438         }
8439
8440         return 0;
8441 }
8442
8443 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8444 {
8445         struct bnx2x *bp = netdev_priv(dev);
8446         int changed = 0;
8447         int rc = 0;
8448
8449         /* TPA requires Rx CSUM offloading */
8450         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8451                 if (!(dev->features & NETIF_F_LRO)) {
8452                         dev->features |= NETIF_F_LRO;
8453                         bp->flags |= TPA_ENABLE_FLAG;
8454                         changed = 1;
8455                 }
8456
8457         } else if (dev->features & NETIF_F_LRO) {
8458                 dev->features &= ~NETIF_F_LRO;
8459                 bp->flags &= ~TPA_ENABLE_FLAG;
8460                 changed = 1;
8461         }
8462
8463         if (changed && netif_running(dev)) {
8464                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8465                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8466         }
8467
8468         return rc;
8469 }
8470
8471 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8472 {
8473         struct bnx2x *bp = netdev_priv(dev);
8474
8475         return bp->rx_csum;
8476 }
8477
8478 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8479 {
8480         struct bnx2x *bp = netdev_priv(dev);
8481         int rc = 0;
8482
8483         bp->rx_csum = data;
8484
8485         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8486            TPA'ed packets will be discarded due to wrong TCP CSUM */
8487         if (!data) {
8488                 u32 flags = ethtool_op_get_flags(dev);
8489
8490                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8491         }
8492
8493         return rc;
8494 }
8495
8496 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8497 {
8498         if (data) {
8499                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8500                 dev->features |= NETIF_F_TSO6;
8501         } else {
8502                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8503                 dev->features &= ~NETIF_F_TSO6;
8504         }
8505
8506         return 0;
8507 }
8508
8509 static const struct {
8510         char string[ETH_GSTRING_LEN];
8511 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8512         { "register_test (offline)" },
8513         { "memory_test (offline)" },
8514         { "loopback_test (offline)" },
8515         { "nvram_test (online)" },
8516         { "interrupt_test (online)" },
8517         { "link_test (online)" },
8518         { "idle check (online)" },
8519         { "MC errors (online)" }
8520 };
8521
8522 static int bnx2x_self_test_count(struct net_device *dev)
8523 {
8524         return BNX2X_NUM_TESTS;
8525 }
8526
8527 static int bnx2x_test_registers(struct bnx2x *bp)
8528 {
8529         int idx, i, rc = -ENODEV;
8530         u32 wr_val = 0;
8531         int port = BP_PORT(bp);
8532         static const struct {
8533                 u32  offset0;
8534                 u32  offset1;
8535                 u32  mask;
8536         } reg_tbl[] = {
8537 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8538                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8539                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8540                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8541                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8542                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8543                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8544                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8545                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8546                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8547 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8548                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8549                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8550                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8551                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8552                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8553                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8554                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8555                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8556                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8557 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8558                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8559                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8560                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8561                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8562                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8563                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8564                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8565                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8566                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8567 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8568                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8569                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8570                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8571                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8572                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8573                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8574                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8575
8576                 { 0xffffffff, 0, 0x00000000 }
8577         };
8578
8579         if (!netif_running(bp->dev))
8580                 return rc;
8581
8582         /* Repeat the test twice:
8583            First by writing 0x00000000, second by writing 0xffffffff */
8584         for (idx = 0; idx < 2; idx++) {
8585
8586                 switch (idx) {
8587                 case 0:
8588                         wr_val = 0;
8589                         break;
8590                 case 1:
8591                         wr_val = 0xffffffff;
8592                         break;
8593                 }
8594
8595                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8596                         u32 offset, mask, save_val, val;
8597
8598                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8599                         mask = reg_tbl[i].mask;
8600
8601                         save_val = REG_RD(bp, offset);
8602
8603                         REG_WR(bp, offset, wr_val);
8604                         val = REG_RD(bp, offset);
8605
8606                         /* Restore the original register's value */
8607                         REG_WR(bp, offset, save_val);
8608
8609                         /* verify that value is as expected value */
8610                         if ((val & mask) != (wr_val & mask))
8611                                 goto test_reg_exit;
8612                 }
8613         }
8614
8615         rc = 0;
8616
8617 test_reg_exit:
8618         return rc;
8619 }
8620
8621 static int bnx2x_test_memory(struct bnx2x *bp)
8622 {
8623         int i, j, rc = -ENODEV;
8624         u32 val;
8625         static const struct {
8626                 u32 offset;
8627                 int size;
8628         } mem_tbl[] = {
8629                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8630                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8631                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8632                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8633                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8634                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8635                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8636
8637                 { 0xffffffff, 0 }
8638         };
8639         static const struct {
8640                 char *name;
8641                 u32 offset;
8642                 u32 e1_mask;
8643                 u32 e1h_mask;
8644         } prty_tbl[] = {
8645                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8646                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8647                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8648                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8649                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8650                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8651
8652                 { NULL, 0xffffffff, 0, 0 }
8653         };
8654
8655         if (!netif_running(bp->dev))
8656                 return rc;
8657
8658         /* Go through all the memories */
8659         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8660                 for (j = 0; j < mem_tbl[i].size; j++)
8661                         REG_RD(bp, mem_tbl[i].offset + j*4);
8662
8663         /* Check the parity status */
8664         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8665                 val = REG_RD(bp, prty_tbl[i].offset);
8666                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8667                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8668                         DP(NETIF_MSG_HW,
8669                            "%s is 0x%x\n", prty_tbl[i].name, val);
8670                         goto test_mem_exit;
8671                 }
8672         }
8673
8674         rc = 0;
8675
8676 test_mem_exit:
8677         return rc;
8678 }
8679
8680 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8681 {
8682         int cnt = 1000;
8683
8684         if (link_up)
8685                 while (bnx2x_link_test(bp) && cnt--)
8686                         msleep(10);
8687 }
8688
8689 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8690 {
8691         unsigned int pkt_size, num_pkts, i;
8692         struct sk_buff *skb;
8693         unsigned char *packet;
8694         struct bnx2x_fastpath *fp = &bp->fp[0];
8695         u16 tx_start_idx, tx_idx;
8696         u16 rx_start_idx, rx_idx;
8697         u16 pkt_prod;
8698         struct sw_tx_bd *tx_buf;
8699         struct eth_tx_bd *tx_bd;
8700         dma_addr_t mapping;
8701         union eth_rx_cqe *cqe;
8702         u8 cqe_fp_flags;
8703         struct sw_rx_bd *rx_buf;
8704         u16 len;
8705         int rc = -ENODEV;
8706
8707         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8708                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8709                 bnx2x_acquire_phy_lock(bp);
8710                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8711                 bnx2x_release_phy_lock(bp);
8712
8713         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8714                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8715                 bnx2x_acquire_phy_lock(bp);
8716                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8717                 bnx2x_release_phy_lock(bp);
8718                 /* wait until link state is restored */
8719                 bnx2x_wait_for_link(bp, link_up);
8720
8721         } else
8722                 return -EINVAL;
8723
8724         pkt_size = 1514;
8725         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8726         if (!skb) {
8727                 rc = -ENOMEM;
8728                 goto test_loopback_exit;
8729         }
8730         packet = skb_put(skb, pkt_size);
8731         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8732         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8733         for (i = ETH_HLEN; i < pkt_size; i++)
8734                 packet[i] = (unsigned char) (i & 0xff);
8735
8736         num_pkts = 0;
8737         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8738         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8739
8740         pkt_prod = fp->tx_pkt_prod++;
8741         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8742         tx_buf->first_bd = fp->tx_bd_prod;
8743         tx_buf->skb = skb;
8744
8745         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8746         mapping = pci_map_single(bp->pdev, skb->data,
8747                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8748         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8749         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8750         tx_bd->nbd = cpu_to_le16(1);
8751         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8752         tx_bd->vlan = cpu_to_le16(pkt_prod);
8753         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8754                                        ETH_TX_BD_FLAGS_END_BD);
8755         tx_bd->general_data = ((UNICAST_ADDRESS <<
8756                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8757
8758         wmb();
8759
8760         fp->hw_tx_prods->bds_prod =
8761                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8762         mb(); /* FW restriction: must not reorder writing nbd and packets */
8763         fp->hw_tx_prods->packets_prod =
8764                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8765         DOORBELL(bp, FP_IDX(fp), 0);
8766
8767         mmiowb();
8768
8769         num_pkts++;
8770         fp->tx_bd_prod++;
8771         bp->dev->trans_start = jiffies;
8772
8773         udelay(100);
8774
8775         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8776         if (tx_idx != tx_start_idx + num_pkts)
8777                 goto test_loopback_exit;
8778
8779         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8780         if (rx_idx != rx_start_idx + num_pkts)
8781                 goto test_loopback_exit;
8782
8783         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8784         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8785         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8786                 goto test_loopback_rx_exit;
8787
8788         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8789         if (len != pkt_size)
8790                 goto test_loopback_rx_exit;
8791
8792         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8793         skb = rx_buf->skb;
8794         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8795         for (i = ETH_HLEN; i < pkt_size; i++)
8796                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8797                         goto test_loopback_rx_exit;
8798
8799         rc = 0;
8800
8801 test_loopback_rx_exit:
8802
8803         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8804         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8805         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8806         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8807
8808         /* Update producers */
8809         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8810                              fp->rx_sge_prod);
8811
8812 test_loopback_exit:
8813         bp->link_params.loopback_mode = LOOPBACK_NONE;
8814
8815         return rc;
8816 }
8817
8818 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8819 {
8820         int rc = 0;
8821
8822         if (!netif_running(bp->dev))
8823                 return BNX2X_LOOPBACK_FAILED;
8824
8825         bnx2x_netif_stop(bp, 1);
8826
8827         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8828                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8829                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8830         }
8831
8832         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8833                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8834                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8835         }
8836
8837         bnx2x_netif_start(bp);
8838
8839         return rc;
8840 }
8841
8842 #define CRC32_RESIDUAL                  0xdebb20e3
8843
8844 static int bnx2x_test_nvram(struct bnx2x *bp)
8845 {
8846         static const struct {
8847                 int offset;
8848                 int size;
8849         } nvram_tbl[] = {
8850                 {     0,  0x14 }, /* bootstrap */
8851                 {  0x14,  0xec }, /* dir */
8852                 { 0x100, 0x350 }, /* manuf_info */
8853                 { 0x450,  0xf0 }, /* feature_info */
8854                 { 0x640,  0x64 }, /* upgrade_key_info */
8855                 { 0x6a4,  0x64 },
8856                 { 0x708,  0x70 }, /* manuf_key_info */
8857                 { 0x778,  0x70 },
8858                 {     0,     0 }
8859         };
8860         u32 buf[0x350 / 4];
8861         u8 *data = (u8 *)buf;
8862         int i, rc;
8863         u32 magic, csum;
8864
8865         rc = bnx2x_nvram_read(bp, 0, data, 4);
8866         if (rc) {
8867                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8868                 goto test_nvram_exit;
8869         }
8870
8871         magic = be32_to_cpu(buf[0]);
8872         if (magic != 0x669955aa) {
8873                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8874                 rc = -ENODEV;
8875                 goto test_nvram_exit;
8876         }
8877
8878         for (i = 0; nvram_tbl[i].size; i++) {
8879
8880                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8881                                       nvram_tbl[i].size);
8882                 if (rc) {
8883                         DP(NETIF_MSG_PROBE,
8884                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8885                         goto test_nvram_exit;
8886                 }
8887
8888                 csum = ether_crc_le(nvram_tbl[i].size, data);
8889                 if (csum != CRC32_RESIDUAL) {
8890                         DP(NETIF_MSG_PROBE,
8891                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8892                         rc = -ENODEV;
8893                         goto test_nvram_exit;
8894                 }
8895         }
8896
8897 test_nvram_exit:
8898         return rc;
8899 }
8900
8901 static int bnx2x_test_intr(struct bnx2x *bp)
8902 {
8903         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8904         int i, rc;
8905
8906         if (!netif_running(bp->dev))
8907                 return -ENODEV;
8908
8909         config->hdr.length_6b = 0;
8910         config->hdr.offset = 0;
8911         config->hdr.client_id = BP_CL_ID(bp);
8912         config->hdr.reserved1 = 0;
8913
8914         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8915                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8916                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8917         if (rc == 0) {
8918                 bp->set_mac_pending++;
8919                 for (i = 0; i < 10; i++) {
8920                         if (!bp->set_mac_pending)
8921                                 break;
8922                         msleep_interruptible(10);
8923                 }
8924                 if (i == 10)
8925                         rc = -ENODEV;
8926         }
8927
8928         return rc;
8929 }
8930
8931 static void bnx2x_self_test(struct net_device *dev,
8932                             struct ethtool_test *etest, u64 *buf)
8933 {
8934         struct bnx2x *bp = netdev_priv(dev);
8935
8936         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8937
8938         if (!netif_running(dev))
8939                 return;
8940
8941         /* offline tests are not supported in MF mode */
8942         if (IS_E1HMF(bp))
8943                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8944
8945         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8946                 u8 link_up;
8947
8948                 link_up = bp->link_vars.link_up;
8949                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8950                 bnx2x_nic_load(bp, LOAD_DIAG);
8951                 /* wait until link state is restored */
8952                 bnx2x_wait_for_link(bp, link_up);
8953
8954                 if (bnx2x_test_registers(bp) != 0) {
8955                         buf[0] = 1;
8956                         etest->flags |= ETH_TEST_FL_FAILED;
8957                 }
8958                 if (bnx2x_test_memory(bp) != 0) {
8959                         buf[1] = 1;
8960                         etest->flags |= ETH_TEST_FL_FAILED;
8961                 }
8962                 buf[2] = bnx2x_test_loopback(bp, link_up);
8963                 if (buf[2] != 0)
8964                         etest->flags |= ETH_TEST_FL_FAILED;
8965
8966                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8967                 bnx2x_nic_load(bp, LOAD_NORMAL);
8968                 /* wait until link state is restored */
8969                 bnx2x_wait_for_link(bp, link_up);
8970         }
8971         if (bnx2x_test_nvram(bp) != 0) {
8972                 buf[3] = 1;
8973                 etest->flags |= ETH_TEST_FL_FAILED;
8974         }
8975         if (bnx2x_test_intr(bp) != 0) {
8976                 buf[4] = 1;
8977                 etest->flags |= ETH_TEST_FL_FAILED;
8978         }
8979         if (bp->port.pmf)
8980                 if (bnx2x_link_test(bp) != 0) {
8981                         buf[5] = 1;
8982                         etest->flags |= ETH_TEST_FL_FAILED;
8983                 }
8984         buf[7] = bnx2x_mc_assert(bp);
8985         if (buf[7] != 0)
8986                 etest->flags |= ETH_TEST_FL_FAILED;
8987
8988 #ifdef BNX2X_EXTRA_DEBUG
8989         bnx2x_panic_dump(bp);
8990 #endif
8991 }
8992
8993 static const struct {
8994         long offset;
8995         int size;
8996         u32 flags;
8997 #define STATS_FLAGS_PORT                1
8998 #define STATS_FLAGS_FUNC                2
8999         u8 string[ETH_GSTRING_LEN];
9000 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9001 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9002                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
9003         { STATS_OFFSET32(error_bytes_received_hi),
9004                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9005         { STATS_OFFSET32(total_bytes_transmitted_hi),
9006                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
9007         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9008                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9009         { STATS_OFFSET32(total_unicast_packets_received_hi),
9010                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9011         { STATS_OFFSET32(total_multicast_packets_received_hi),
9012                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9013         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9014                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9015         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9016                                 8, STATS_FLAGS_FUNC, "tx_packets" },
9017         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9018                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9019 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9020                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9021         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9022                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9023         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9024                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9025         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9026                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9027         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9028                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9029         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9030                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9031         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9032                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9033         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9034                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9035         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9036                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9037         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9038                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9039 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9040                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9041         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9042                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9043         { STATS_OFFSET32(jabber_packets_received),
9044                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9045         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9046                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9047         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9048                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9049         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9050                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9051         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9052                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9053         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9054                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9055         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9056                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9057         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9058                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9059 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9060                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9061         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9062                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9063         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9064                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9065         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9066                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9067         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9068                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9069         { STATS_OFFSET32(mac_filter_discard),
9070                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9071         { STATS_OFFSET32(no_buff_discard),
9072                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9073         { STATS_OFFSET32(xxoverflow_discard),
9074                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9075         { STATS_OFFSET32(brb_drop_hi),
9076                                 8, STATS_FLAGS_PORT, "brb_discard" },
9077         { STATS_OFFSET32(brb_truncate_hi),
9078                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9079 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9080                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9081         { STATS_OFFSET32(rx_skb_alloc_failed),
9082                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9083 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9084                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9085 };
9086
9087 #define IS_NOT_E1HMF_STAT(bp, i) \
9088                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9089
9090 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9091 {
9092         struct bnx2x *bp = netdev_priv(dev);
9093         int i, j;
9094
9095         switch (stringset) {
9096         case ETH_SS_STATS:
9097                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9098                         if (IS_NOT_E1HMF_STAT(bp, i))
9099                                 continue;
9100                         strcpy(buf + j*ETH_GSTRING_LEN,
9101                                bnx2x_stats_arr[i].string);
9102                         j++;
9103                 }
9104                 break;
9105
9106         case ETH_SS_TEST:
9107                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9108                 break;
9109         }
9110 }
9111
9112 static int bnx2x_get_stats_count(struct net_device *dev)
9113 {
9114         struct bnx2x *bp = netdev_priv(dev);
9115         int i, num_stats = 0;
9116
9117         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9118                 if (IS_NOT_E1HMF_STAT(bp, i))
9119                         continue;
9120                 num_stats++;
9121         }
9122         return num_stats;
9123 }
9124
9125 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9126                                     struct ethtool_stats *stats, u64 *buf)
9127 {
9128         struct bnx2x *bp = netdev_priv(dev);
9129         u32 *hw_stats = (u32 *)&bp->eth_stats;
9130         int i, j;
9131
9132         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9133                 if (IS_NOT_E1HMF_STAT(bp, i))
9134                         continue;
9135
9136                 if (bnx2x_stats_arr[i].size == 0) {
9137                         /* skip this counter */
9138                         buf[j] = 0;
9139                         j++;
9140                         continue;
9141                 }
9142                 if (bnx2x_stats_arr[i].size == 4) {
9143                         /* 4-byte counter */
9144                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9145                         j++;
9146                         continue;
9147                 }
9148                 /* 8-byte counter */
9149                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9150                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9151                 j++;
9152         }
9153 }
9154
9155 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9156 {
9157         struct bnx2x *bp = netdev_priv(dev);
9158         int port = BP_PORT(bp);
9159         int i;
9160
9161         if (!netif_running(dev))
9162                 return 0;
9163
9164         if (!bp->port.pmf)
9165                 return 0;
9166
9167         if (data == 0)
9168                 data = 2;
9169
9170         for (i = 0; i < (data * 2); i++) {
9171                 if ((i % 2) == 0)
9172                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9173                                       bp->link_params.hw_led_mode,
9174                                       bp->link_params.chip_id);
9175                 else
9176                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9177                                       bp->link_params.hw_led_mode,
9178                                       bp->link_params.chip_id);
9179
9180                 msleep_interruptible(500);
9181                 if (signal_pending(current))
9182                         break;
9183         }
9184
9185         if (bp->link_vars.link_up)
9186                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9187                               bp->link_vars.line_speed,
9188                               bp->link_params.hw_led_mode,
9189                               bp->link_params.chip_id);
9190
9191         return 0;
9192 }
9193
9194 static struct ethtool_ops bnx2x_ethtool_ops = {
9195         .get_settings           = bnx2x_get_settings,
9196         .set_settings           = bnx2x_set_settings,
9197         .get_drvinfo            = bnx2x_get_drvinfo,
9198         .get_wol                = bnx2x_get_wol,
9199         .set_wol                = bnx2x_set_wol,
9200         .get_msglevel           = bnx2x_get_msglevel,
9201         .set_msglevel           = bnx2x_set_msglevel,
9202         .nway_reset             = bnx2x_nway_reset,
9203         .get_link               = ethtool_op_get_link,
9204         .get_eeprom_len         = bnx2x_get_eeprom_len,
9205         .get_eeprom             = bnx2x_get_eeprom,
9206         .set_eeprom             = bnx2x_set_eeprom,
9207         .get_coalesce           = bnx2x_get_coalesce,
9208         .set_coalesce           = bnx2x_set_coalesce,
9209         .get_ringparam          = bnx2x_get_ringparam,
9210         .set_ringparam          = bnx2x_set_ringparam,
9211         .get_pauseparam         = bnx2x_get_pauseparam,
9212         .set_pauseparam         = bnx2x_set_pauseparam,
9213         .get_rx_csum            = bnx2x_get_rx_csum,
9214         .set_rx_csum            = bnx2x_set_rx_csum,
9215         .get_tx_csum            = ethtool_op_get_tx_csum,
9216         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9217         .set_flags              = bnx2x_set_flags,
9218         .get_flags              = ethtool_op_get_flags,
9219         .get_sg                 = ethtool_op_get_sg,
9220         .set_sg                 = ethtool_op_set_sg,
9221         .get_tso                = ethtool_op_get_tso,
9222         .set_tso                = bnx2x_set_tso,
9223         .self_test_count        = bnx2x_self_test_count,
9224         .self_test              = bnx2x_self_test,
9225         .get_strings            = bnx2x_get_strings,
9226         .phys_id                = bnx2x_phys_id,
9227         .get_stats_count        = bnx2x_get_stats_count,
9228         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9229 };
9230
9231 /* end of ethtool_ops */
9232
9233 /****************************************************************************
9234 * General service functions
9235 ****************************************************************************/
9236
9237 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9238 {
9239         u16 pmcsr;
9240
9241         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9242
9243         switch (state) {
9244         case PCI_D0:
9245                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9246                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9247                                        PCI_PM_CTRL_PME_STATUS));
9248
9249                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9250                         /* delay required during transition out of D3hot */
9251                         msleep(20);
9252                 break;
9253
9254         case PCI_D3hot:
9255                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9256                 pmcsr |= 3;
9257
9258                 if (bp->wol)
9259                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9260
9261                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9262                                       pmcsr);
9263
9264                 /* No more memory access after this point until
9265                 * device is brought back to D0.
9266                 */
9267                 break;
9268
9269         default:
9270                 return -EINVAL;
9271         }
9272         return 0;
9273 }
9274
9275 /*
9276  * net_device service functions
9277  */
9278
9279 static int bnx2x_poll(struct napi_struct *napi, int budget)
9280 {
9281         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9282                                                  napi);
9283         struct bnx2x *bp = fp->bp;
9284         int work_done = 0;
9285         u16 rx_cons_sb;
9286
9287 #ifdef BNX2X_STOP_ON_ERROR
9288         if (unlikely(bp->panic))
9289                 goto poll_panic;
9290 #endif
9291
9292         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9293         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9294         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9295
9296         bnx2x_update_fpsb_idx(fp);
9297
9298         if (BNX2X_HAS_TX_WORK(fp))
9299                 bnx2x_tx_int(fp, budget);
9300
9301         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9302         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9303                 rx_cons_sb++;
9304         if (BNX2X_HAS_RX_WORK(fp))
9305                 work_done = bnx2x_rx_int(fp, budget);
9306
9307         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9308         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9309         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9310                 rx_cons_sb++;
9311
9312         /* must not complete if we consumed full budget */
9313         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9314
9315 #ifdef BNX2X_STOP_ON_ERROR
9316 poll_panic:
9317 #endif
9318                 netif_rx_complete(napi);
9319
9320                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9321                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9322                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9323                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9324         }
9325         return work_done;
9326 }
9327
9328
9329 /* we split the first BD into headers and data BDs
9330  * to ease the pain of our fellow microcode engineers
9331  * we use one mapping for both BDs
9332  * So far this has only been observed to happen
9333  * in Other Operating Systems(TM)
9334  */
9335 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9336                                    struct bnx2x_fastpath *fp,
9337                                    struct eth_tx_bd **tx_bd, u16 hlen,
9338                                    u16 bd_prod, int nbd)
9339 {
9340         struct eth_tx_bd *h_tx_bd = *tx_bd;
9341         struct eth_tx_bd *d_tx_bd;
9342         dma_addr_t mapping;
9343         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9344
9345         /* first fix first BD */
9346         h_tx_bd->nbd = cpu_to_le16(nbd);
9347         h_tx_bd->nbytes = cpu_to_le16(hlen);
9348
9349         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9350            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9351            h_tx_bd->addr_lo, h_tx_bd->nbd);
9352
9353         /* now get a new data BD
9354          * (after the pbd) and fill it */
9355         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9356         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9357
9358         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9359                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9360
9361         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9362         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9363         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9364         d_tx_bd->vlan = 0;
9365         /* this marks the BD as one that has no individual mapping
9366          * the FW ignores this flag in a BD not marked start
9367          */
9368         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9369         DP(NETIF_MSG_TX_QUEUED,
9370            "TSO split data size is %d (%x:%x)\n",
9371            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9372
9373         /* update tx_bd for marking the last BD flag */
9374         *tx_bd = d_tx_bd;
9375
9376         return bd_prod;
9377 }
9378
9379 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9380 {
9381         if (fix > 0)
9382                 csum = (u16) ~csum_fold(csum_sub(csum,
9383                                 csum_partial(t_header - fix, fix, 0)));
9384
9385         else if (fix < 0)
9386                 csum = (u16) ~csum_fold(csum_add(csum,
9387                                 csum_partial(t_header, -fix, 0)));
9388
9389         return swab16(csum);
9390 }
9391
9392 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9393 {
9394         u32 rc;
9395
9396         if (skb->ip_summed != CHECKSUM_PARTIAL)
9397                 rc = XMIT_PLAIN;
9398
9399         else {
9400                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9401                         rc = XMIT_CSUM_V6;
9402                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9403                                 rc |= XMIT_CSUM_TCP;
9404
9405                 } else {
9406                         rc = XMIT_CSUM_V4;
9407                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9408                                 rc |= XMIT_CSUM_TCP;
9409                 }
9410         }
9411
9412         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9413                 rc |= XMIT_GSO_V4;
9414
9415         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9416                 rc |= XMIT_GSO_V6;
9417
9418         return rc;
9419 }
9420
9421 /* check if packet requires linearization (packet is too fragmented) */
9422 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9423                              u32 xmit_type)
9424 {
9425         int to_copy = 0;
9426         int hlen = 0;
9427         int first_bd_sz = 0;
9428
9429         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9430         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9431
9432                 if (xmit_type & XMIT_GSO) {
9433                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9434                         /* Check if LSO packet needs to be copied:
9435                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9436                         int wnd_size = MAX_FETCH_BD - 3;
9437                         /* Number of windows to check */
9438                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9439                         int wnd_idx = 0;
9440                         int frag_idx = 0;
9441                         u32 wnd_sum = 0;
9442
9443                         /* Headers length */
9444                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9445                                 tcp_hdrlen(skb);
9446
9447                         /* Amount of data (w/o headers) on linear part of SKB*/
9448                         first_bd_sz = skb_headlen(skb) - hlen;
9449
9450                         wnd_sum  = first_bd_sz;
9451
9452                         /* Calculate the first sum - it's special */
9453                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9454                                 wnd_sum +=
9455                                         skb_shinfo(skb)->frags[frag_idx].size;
9456
9457                         /* If there was data on linear skb data - check it */
9458                         if (first_bd_sz > 0) {
9459                                 if (unlikely(wnd_sum < lso_mss)) {
9460                                         to_copy = 1;
9461                                         goto exit_lbl;
9462                                 }
9463
9464                                 wnd_sum -= first_bd_sz;
9465                         }
9466
9467                         /* Others are easier: run through the frag list and
9468                            check all windows */
9469                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9470                                 wnd_sum +=
9471                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9472
9473                                 if (unlikely(wnd_sum < lso_mss)) {
9474                                         to_copy = 1;
9475                                         break;
9476                                 }
9477                                 wnd_sum -=
9478                                         skb_shinfo(skb)->frags[wnd_idx].size;
9479                         }
9480
9481                 } else {
9482                         /* in non-LSO too fragmented packet should always
9483                            be linearized */
9484                         to_copy = 1;
9485                 }
9486         }
9487
9488 exit_lbl:
9489         if (unlikely(to_copy))
9490                 DP(NETIF_MSG_TX_QUEUED,
9491                    "Linearization IS REQUIRED for %s packet. "
9492                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9493                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9494                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9495
9496         return to_copy;
9497 }
9498
9499 /* called with netif_tx_lock
9500  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9501  * netif_wake_queue()
9502  */
9503 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9504 {
9505         struct bnx2x *bp = netdev_priv(dev);
9506         struct bnx2x_fastpath *fp;
9507         struct sw_tx_bd *tx_buf;
9508         struct eth_tx_bd *tx_bd;
9509         struct eth_tx_parse_bd *pbd = NULL;
9510         u16 pkt_prod, bd_prod;
9511         int nbd, fp_index;
9512         dma_addr_t mapping;
9513         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9514         int vlan_off = (bp->e1hov ? 4 : 0);
9515         int i;
9516         u8 hlen = 0;
9517
9518 #ifdef BNX2X_STOP_ON_ERROR
9519         if (unlikely(bp->panic))
9520                 return NETDEV_TX_BUSY;
9521 #endif
9522
9523         fp_index = (smp_processor_id() % bp->num_queues);
9524         fp = &bp->fp[fp_index];
9525
9526         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9527                 bp->eth_stats.driver_xoff++,
9528                 netif_stop_queue(dev);
9529                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9530                 return NETDEV_TX_BUSY;
9531         }
9532
9533         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9534            "  gso type %x  xmit_type %x\n",
9535            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9536            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9537
9538         /* First, check if we need to linearize the skb
9539            (due to FW restrictions) */
9540         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9541                 /* Statistics of linearization */
9542                 bp->lin_cnt++;
9543                 if (skb_linearize(skb) != 0) {
9544                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9545                            "silently dropping this SKB\n");
9546                         dev_kfree_skb_any(skb);
9547                         return NETDEV_TX_OK;
9548                 }
9549         }
9550
9551         /*
9552         Please read carefully. First we use one BD which we mark as start,
9553         then for TSO or xsum we have a parsing info BD,
9554         and only then we have the rest of the TSO BDs.
9555         (don't forget to mark the last one as last,
9556         and to unmap only AFTER you write to the BD ...)
9557         And above all, all pdb sizes are in words - NOT DWORDS!
9558         */
9559
9560         pkt_prod = fp->tx_pkt_prod++;
9561         bd_prod = TX_BD(fp->tx_bd_prod);
9562
9563         /* get a tx_buf and first BD */
9564         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9565         tx_bd = &fp->tx_desc_ring[bd_prod];
9566
9567         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9568         tx_bd->general_data = (UNICAST_ADDRESS <<
9569                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9570         /* header nbd */
9571         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9572
9573         /* remember the first BD of the packet */
9574         tx_buf->first_bd = fp->tx_bd_prod;
9575         tx_buf->skb = skb;
9576
9577         DP(NETIF_MSG_TX_QUEUED,
9578            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9579            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9580
9581 #ifdef BCM_VLAN
9582         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9583             (bp->flags & HW_VLAN_TX_FLAG)) {
9584                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9585                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9586                 vlan_off += 4;
9587         } else
9588 #endif
9589                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9590
9591         if (xmit_type) {
9592                 /* turn on parsing and get a BD */
9593                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9594                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9595
9596                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9597         }
9598
9599         if (xmit_type & XMIT_CSUM) {
9600                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9601
9602                 /* for now NS flag is not used in Linux */
9603                 pbd->global_data = (hlen |
9604                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9605                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9606
9607                 pbd->ip_hlen = (skb_transport_header(skb) -
9608                                 skb_network_header(skb)) / 2;
9609
9610                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9611
9612                 pbd->total_hlen = cpu_to_le16(hlen);
9613                 hlen = hlen*2 - vlan_off;
9614
9615                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9616
9617                 if (xmit_type & XMIT_CSUM_V4)
9618                         tx_bd->bd_flags.as_bitfield |=
9619                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9620                 else
9621                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9622
9623                 if (xmit_type & XMIT_CSUM_TCP) {
9624                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9625
9626                 } else {
9627                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9628
9629                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9630                         pbd->cs_offset = fix / 2;
9631
9632                         DP(NETIF_MSG_TX_QUEUED,
9633                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9634                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9635                            SKB_CS(skb));
9636
9637                         /* HW bug: fixup the CSUM */
9638                         pbd->tcp_pseudo_csum =
9639                                 bnx2x_csum_fix(skb_transport_header(skb),
9640                                                SKB_CS(skb), fix);
9641
9642                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9643                            pbd->tcp_pseudo_csum);
9644                 }
9645         }
9646
9647         mapping = pci_map_single(bp->pdev, skb->data,
9648                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9649
9650         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9651         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9652         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9653         tx_bd->nbd = cpu_to_le16(nbd);
9654         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9655
9656         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9657            "  nbytes %d  flags %x  vlan %x\n",
9658            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9659            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9660            le16_to_cpu(tx_bd->vlan));
9661
9662         if (xmit_type & XMIT_GSO) {
9663
9664                 DP(NETIF_MSG_TX_QUEUED,
9665                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9666                    skb->len, hlen, skb_headlen(skb),
9667                    skb_shinfo(skb)->gso_size);
9668
9669                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9670
9671                 if (unlikely(skb_headlen(skb) > hlen))
9672                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9673                                                  bd_prod, ++nbd);
9674
9675                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9676                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9677                 pbd->tcp_flags = pbd_tcp_flags(skb);
9678
9679                 if (xmit_type & XMIT_GSO_V4) {
9680                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9681                         pbd->tcp_pseudo_csum =
9682                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9683                                                           ip_hdr(skb)->daddr,
9684                                                           0, IPPROTO_TCP, 0));
9685
9686                 } else
9687                         pbd->tcp_pseudo_csum =
9688                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9689                                                         &ipv6_hdr(skb)->daddr,
9690                                                         0, IPPROTO_TCP, 0));
9691
9692                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9693         }
9694
9695         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9696                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9697
9698                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9699                 tx_bd = &fp->tx_desc_ring[bd_prod];
9700
9701                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9702                                        frag->size, PCI_DMA_TODEVICE);
9703
9704                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9705                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9706                 tx_bd->nbytes = cpu_to_le16(frag->size);
9707                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9708                 tx_bd->bd_flags.as_bitfield = 0;
9709
9710                 DP(NETIF_MSG_TX_QUEUED,
9711                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9712                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9713                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9714         }
9715
9716         /* now at last mark the BD as the last BD */
9717         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9718
9719         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9720            tx_bd, tx_bd->bd_flags.as_bitfield);
9721
9722         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9723
9724         /* now send a tx doorbell, counting the next BD
9725          * if the packet contains or ends with it
9726          */
9727         if (TX_BD_POFF(bd_prod) < nbd)
9728                 nbd++;
9729
9730         if (pbd)
9731                 DP(NETIF_MSG_TX_QUEUED,
9732                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9733                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9734                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9735                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9736                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9737
9738         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9739
9740         /*
9741          * Make sure that the BD data is updated before updating the producer
9742          * since FW might read the BD right after the producer is updated.
9743          * This is only applicable for weak-ordered memory model archs such
9744          * as IA-64. The following barrier is also mandatory since FW will
9745          * assumes packets must have BDs.
9746          */
9747         wmb();
9748
9749         fp->hw_tx_prods->bds_prod =
9750                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9751         mb(); /* FW restriction: must not reorder writing nbd and packets */
9752         fp->hw_tx_prods->packets_prod =
9753                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9754         DOORBELL(bp, FP_IDX(fp), 0);
9755
9756         mmiowb();
9757
9758         fp->tx_bd_prod += nbd;
9759         dev->trans_start = jiffies;
9760
9761         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9762                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9763                    if we put Tx into XOFF state. */
9764                 smp_mb();
9765                 netif_stop_queue(dev);
9766                 bp->eth_stats.driver_xoff++;
9767                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9768                         netif_wake_queue(dev);
9769         }
9770         fp->tx_pkt++;
9771
9772         return NETDEV_TX_OK;
9773 }
9774
9775 /* called with rtnl_lock */
9776 static int bnx2x_open(struct net_device *dev)
9777 {
9778         struct bnx2x *bp = netdev_priv(dev);
9779
9780         bnx2x_set_power_state(bp, PCI_D0);
9781
9782         return bnx2x_nic_load(bp, LOAD_OPEN);
9783 }
9784
9785 /* called with rtnl_lock */
9786 static int bnx2x_close(struct net_device *dev)
9787 {
9788         struct bnx2x *bp = netdev_priv(dev);
9789
9790         /* Unload the driver, release IRQs */
9791         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9792         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9793                 if (!CHIP_REV_IS_SLOW(bp))
9794                         bnx2x_set_power_state(bp, PCI_D3hot);
9795
9796         return 0;
9797 }
9798
9799 /* called with netif_tx_lock from set_multicast */
9800 static void bnx2x_set_rx_mode(struct net_device *dev)
9801 {
9802         struct bnx2x *bp = netdev_priv(dev);
9803         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9804         int port = BP_PORT(bp);
9805
9806         if (bp->state != BNX2X_STATE_OPEN) {
9807                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9808                 return;
9809         }
9810
9811         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9812
9813         if (dev->flags & IFF_PROMISC)
9814                 rx_mode = BNX2X_RX_MODE_PROMISC;
9815
9816         else if ((dev->flags & IFF_ALLMULTI) ||
9817                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9818                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9819
9820         else { /* some multicasts */
9821                 if (CHIP_IS_E1(bp)) {
9822                         int i, old, offset;
9823                         struct dev_mc_list *mclist;
9824                         struct mac_configuration_cmd *config =
9825                                                 bnx2x_sp(bp, mcast_config);
9826
9827                         for (i = 0, mclist = dev->mc_list;
9828                              mclist && (i < dev->mc_count);
9829                              i++, mclist = mclist->next) {
9830
9831                                 config->config_table[i].
9832                                         cam_entry.msb_mac_addr =
9833                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9834                                 config->config_table[i].
9835                                         cam_entry.middle_mac_addr =
9836                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9837                                 config->config_table[i].
9838                                         cam_entry.lsb_mac_addr =
9839                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9840                                 config->config_table[i].cam_entry.flags =
9841                                                         cpu_to_le16(port);
9842                                 config->config_table[i].
9843                                         target_table_entry.flags = 0;
9844                                 config->config_table[i].
9845                                         target_table_entry.client_id = 0;
9846                                 config->config_table[i].
9847                                         target_table_entry.vlan_id = 0;
9848
9849                                 DP(NETIF_MSG_IFUP,
9850                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9851                                    config->config_table[i].
9852                                                 cam_entry.msb_mac_addr,
9853                                    config->config_table[i].
9854                                                 cam_entry.middle_mac_addr,
9855                                    config->config_table[i].
9856                                                 cam_entry.lsb_mac_addr);
9857                         }
9858                         old = config->hdr.length_6b;
9859                         if (old > i) {
9860                                 for (; i < old; i++) {
9861                                         if (CAM_IS_INVALID(config->
9862                                                            config_table[i])) {
9863                                                 i--; /* already invalidated */
9864                                                 break;
9865                                         }
9866                                         /* invalidate */
9867                                         CAM_INVALIDATE(config->
9868                                                        config_table[i]);
9869                                 }
9870                         }
9871
9872                         if (CHIP_REV_IS_SLOW(bp))
9873                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9874                         else
9875                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9876
9877                         config->hdr.length_6b = i;
9878                         config->hdr.offset = offset;
9879                         config->hdr.client_id = BP_CL_ID(bp);
9880                         config->hdr.reserved1 = 0;
9881
9882                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9883                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9884                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9885                                       0);
9886                 } else { /* E1H */
9887                         /* Accept one or more multicasts */
9888                         struct dev_mc_list *mclist;
9889                         u32 mc_filter[MC_HASH_SIZE];
9890                         u32 crc, bit, regidx;
9891                         int i;
9892
9893                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9894
9895                         for (i = 0, mclist = dev->mc_list;
9896                              mclist && (i < dev->mc_count);
9897                              i++, mclist = mclist->next) {
9898
9899                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9900                                    mclist->dmi_addr);
9901
9902                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9903                                 bit = (crc >> 24) & 0xff;
9904                                 regidx = bit >> 5;
9905                                 bit &= 0x1f;
9906                                 mc_filter[regidx] |= (1 << bit);
9907                         }
9908
9909                         for (i = 0; i < MC_HASH_SIZE; i++)
9910                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9911                                        mc_filter[i]);
9912                 }
9913         }
9914
9915         bp->rx_mode = rx_mode;
9916         bnx2x_set_storm_rx_mode(bp);
9917 }
9918
9919 /* called with rtnl_lock */
9920 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9921 {
9922         struct sockaddr *addr = p;
9923         struct bnx2x *bp = netdev_priv(dev);
9924
9925         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9926                 return -EINVAL;
9927
9928         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9929         if (netif_running(dev)) {
9930                 if (CHIP_IS_E1(bp))
9931                         bnx2x_set_mac_addr_e1(bp, 1);
9932                 else
9933                         bnx2x_set_mac_addr_e1h(bp, 1);
9934         }
9935
9936         return 0;
9937 }
9938
9939 /* called with rtnl_lock */
9940 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9941 {
9942         struct mii_ioctl_data *data = if_mii(ifr);
9943         struct bnx2x *bp = netdev_priv(dev);
9944         int port = BP_PORT(bp);
9945         int err;
9946
9947         switch (cmd) {
9948         case SIOCGMIIPHY:
9949                 data->phy_id = bp->port.phy_addr;
9950
9951                 /* fallthrough */
9952
9953         case SIOCGMIIREG: {
9954                 u16 mii_regval;
9955
9956                 if (!netif_running(dev))
9957                         return -EAGAIN;
9958
9959                 mutex_lock(&bp->port.phy_mutex);
9960                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9961                                       DEFAULT_PHY_DEV_ADDR,
9962                                       (data->reg_num & 0x1f), &mii_regval);
9963                 data->val_out = mii_regval;
9964                 mutex_unlock(&bp->port.phy_mutex);
9965                 return err;
9966         }
9967
9968         case SIOCSMIIREG:
9969                 if (!capable(CAP_NET_ADMIN))
9970                         return -EPERM;
9971
9972                 if (!netif_running(dev))
9973                         return -EAGAIN;
9974
9975                 mutex_lock(&bp->port.phy_mutex);
9976                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9977                                        DEFAULT_PHY_DEV_ADDR,
9978                                        (data->reg_num & 0x1f), data->val_in);
9979                 mutex_unlock(&bp->port.phy_mutex);
9980                 return err;
9981
9982         default:
9983                 /* do nothing */
9984                 break;
9985         }
9986
9987         return -EOPNOTSUPP;
9988 }
9989
9990 /* called with rtnl_lock */
9991 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9992 {
9993         struct bnx2x *bp = netdev_priv(dev);
9994         int rc = 0;
9995
9996         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9997             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9998                 return -EINVAL;
9999
10000         /* This does not race with packet allocation
10001          * because the actual alloc size is
10002          * only updated as part of load
10003          */
10004         dev->mtu = new_mtu;
10005
10006         if (netif_running(dev)) {
10007                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10008                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10009         }
10010
10011         return rc;
10012 }
10013
10014 static void bnx2x_tx_timeout(struct net_device *dev)
10015 {
10016         struct bnx2x *bp = netdev_priv(dev);
10017
10018 #ifdef BNX2X_STOP_ON_ERROR
10019         if (!bp->panic)
10020                 bnx2x_panic();
10021 #endif
10022         /* This allows the netif to be shutdown gracefully before resetting */
10023         schedule_work(&bp->reset_task);
10024 }
10025
10026 #ifdef BCM_VLAN
10027 /* called with rtnl_lock */
10028 static void bnx2x_vlan_rx_register(struct net_device *dev,
10029                                    struct vlan_group *vlgrp)
10030 {
10031         struct bnx2x *bp = netdev_priv(dev);
10032
10033         bp->vlgrp = vlgrp;
10034
10035         /* Set flags according to the required capabilities */
10036         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10037
10038         if (dev->features & NETIF_F_HW_VLAN_TX)
10039                 bp->flags |= HW_VLAN_TX_FLAG;
10040
10041         if (dev->features & NETIF_F_HW_VLAN_RX)
10042                 bp->flags |= HW_VLAN_RX_FLAG;
10043
10044         if (netif_running(dev))
10045                 bnx2x_set_client_config(bp);
10046 }
10047
10048 #endif
10049
10050 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10051 static void poll_bnx2x(struct net_device *dev)
10052 {
10053         struct bnx2x *bp = netdev_priv(dev);
10054
10055         disable_irq(bp->pdev->irq);
10056         bnx2x_interrupt(bp->pdev->irq, dev);
10057         enable_irq(bp->pdev->irq);
10058 }
10059 #endif
10060
10061 static const struct net_device_ops bnx2x_netdev_ops = {
10062         .ndo_open               = bnx2x_open,
10063         .ndo_stop               = bnx2x_close,
10064         .ndo_start_xmit         = bnx2x_start_xmit,
10065         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10066         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10067         .ndo_validate_addr      = eth_validate_addr,
10068         .ndo_do_ioctl           = bnx2x_ioctl,
10069         .ndo_change_mtu         = bnx2x_change_mtu,
10070         .ndo_tx_timeout         = bnx2x_tx_timeout,
10071 #ifdef BCM_VLAN
10072         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10073 #endif
10074 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10075         .ndo_poll_controller    = poll_bnx2x,
10076 #endif
10077 };
10078
10079
10080 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10081                                     struct net_device *dev)
10082 {
10083         struct bnx2x *bp;
10084         int rc;
10085
10086         SET_NETDEV_DEV(dev, &pdev->dev);
10087         bp = netdev_priv(dev);
10088
10089         bp->dev = dev;
10090         bp->pdev = pdev;
10091         bp->flags = 0;
10092         bp->func = PCI_FUNC(pdev->devfn);
10093
10094         rc = pci_enable_device(pdev);
10095         if (rc) {
10096                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10097                 goto err_out;
10098         }
10099
10100         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10101                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10102                        " aborting\n");
10103                 rc = -ENODEV;
10104                 goto err_out_disable;
10105         }
10106
10107         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10108                 printk(KERN_ERR PFX "Cannot find second PCI device"
10109                        " base address, aborting\n");
10110                 rc = -ENODEV;
10111                 goto err_out_disable;
10112         }
10113
10114         if (atomic_read(&pdev->enable_cnt) == 1) {
10115                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10116                 if (rc) {
10117                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10118                                " aborting\n");
10119                         goto err_out_disable;
10120                 }
10121
10122                 pci_set_master(pdev);
10123                 pci_save_state(pdev);
10124         }
10125
10126         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10127         if (bp->pm_cap == 0) {
10128                 printk(KERN_ERR PFX "Cannot find power management"
10129                        " capability, aborting\n");
10130                 rc = -EIO;
10131                 goto err_out_release;
10132         }
10133
10134         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10135         if (bp->pcie_cap == 0) {
10136                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10137                        " aborting\n");
10138                 rc = -EIO;
10139                 goto err_out_release;
10140         }
10141
10142         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10143                 bp->flags |= USING_DAC_FLAG;
10144                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10145                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10146                                " failed, aborting\n");
10147                         rc = -EIO;
10148                         goto err_out_release;
10149                 }
10150
10151         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10152                 printk(KERN_ERR PFX "System does not support DMA,"
10153                        " aborting\n");
10154                 rc = -EIO;
10155                 goto err_out_release;
10156         }
10157
10158         dev->mem_start = pci_resource_start(pdev, 0);
10159         dev->base_addr = dev->mem_start;
10160         dev->mem_end = pci_resource_end(pdev, 0);
10161
10162         dev->irq = pdev->irq;
10163
10164         bp->regview = pci_ioremap_bar(pdev, 0);
10165         if (!bp->regview) {
10166                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10167                 rc = -ENOMEM;
10168                 goto err_out_release;
10169         }
10170
10171         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10172                                         min_t(u64, BNX2X_DB_SIZE,
10173                                               pci_resource_len(pdev, 2)));
10174         if (!bp->doorbells) {
10175                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10176                 rc = -ENOMEM;
10177                 goto err_out_unmap;
10178         }
10179
10180         bnx2x_set_power_state(bp, PCI_D0);
10181
10182         /* clean indirect addresses */
10183         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10184                                PCICFG_VENDOR_ID_OFFSET);
10185         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10186         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10187         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10188         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10189
10190         dev->watchdog_timeo = TX_TIMEOUT;
10191
10192         dev->netdev_ops = &bnx2x_netdev_ops;
10193         dev->ethtool_ops = &bnx2x_ethtool_ops;
10194         dev->features |= NETIF_F_SG;
10195         dev->features |= NETIF_F_HW_CSUM;
10196         if (bp->flags & USING_DAC_FLAG)
10197                 dev->features |= NETIF_F_HIGHDMA;
10198 #ifdef BCM_VLAN
10199         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10200         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10201 #endif
10202         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10203         dev->features |= NETIF_F_TSO6;
10204
10205         return 0;
10206
10207 err_out_unmap:
10208         if (bp->regview) {
10209                 iounmap(bp->regview);
10210                 bp->regview = NULL;
10211         }
10212         if (bp->doorbells) {
10213                 iounmap(bp->doorbells);
10214                 bp->doorbells = NULL;
10215         }
10216
10217 err_out_release:
10218         if (atomic_read(&pdev->enable_cnt) == 1)
10219                 pci_release_regions(pdev);
10220
10221 err_out_disable:
10222         pci_disable_device(pdev);
10223         pci_set_drvdata(pdev, NULL);
10224
10225 err_out:
10226         return rc;
10227 }
10228
10229 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10230 {
10231         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10232
10233         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10234         return val;
10235 }
10236
10237 /* return value of 1=2.5GHz 2=5GHz */
10238 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10239 {
10240         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10241
10242         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10243         return val;
10244 }
10245
10246 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10247                                     const struct pci_device_id *ent)
10248 {
10249         static int version_printed;
10250         struct net_device *dev = NULL;
10251         struct bnx2x *bp;
10252         int rc;
10253
10254         if (version_printed++ == 0)
10255                 printk(KERN_INFO "%s", version);
10256
10257         /* dev zeroed in init_etherdev */
10258         dev = alloc_etherdev(sizeof(*bp));
10259         if (!dev) {
10260                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10261                 return -ENOMEM;
10262         }
10263
10264         bp = netdev_priv(dev);
10265         bp->msglevel = debug;
10266
10267         rc = bnx2x_init_dev(pdev, dev);
10268         if (rc < 0) {
10269                 free_netdev(dev);
10270                 return rc;
10271         }
10272
10273         rc = register_netdev(dev);
10274         if (rc) {
10275                 dev_err(&pdev->dev, "Cannot register net device\n");
10276                 goto init_one_exit;
10277         }
10278
10279         pci_set_drvdata(pdev, dev);
10280
10281         rc = bnx2x_init_bp(bp);
10282         if (rc) {
10283                 unregister_netdev(dev);
10284                 goto init_one_exit;
10285         }
10286
10287         netif_carrier_off(dev);
10288
10289         bp->common.name = board_info[ent->driver_data].name;
10290         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10291                " IRQ %d, ", dev->name, bp->common.name,
10292                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10293                bnx2x_get_pcie_width(bp),
10294                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10295                dev->base_addr, bp->pdev->irq);
10296         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10297         return 0;
10298
10299 init_one_exit:
10300         if (bp->regview)
10301                 iounmap(bp->regview);
10302
10303         if (bp->doorbells)
10304                 iounmap(bp->doorbells);
10305
10306         free_netdev(dev);
10307
10308         if (atomic_read(&pdev->enable_cnt) == 1)
10309                 pci_release_regions(pdev);
10310
10311         pci_disable_device(pdev);
10312         pci_set_drvdata(pdev, NULL);
10313
10314         return rc;
10315 }
10316
10317 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10318 {
10319         struct net_device *dev = pci_get_drvdata(pdev);
10320         struct bnx2x *bp;
10321
10322         if (!dev) {
10323                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10324                 return;
10325         }
10326         bp = netdev_priv(dev);
10327
10328         unregister_netdev(dev);
10329
10330         if (bp->regview)
10331                 iounmap(bp->regview);
10332
10333         if (bp->doorbells)
10334                 iounmap(bp->doorbells);
10335
10336         free_netdev(dev);
10337
10338         if (atomic_read(&pdev->enable_cnt) == 1)
10339                 pci_release_regions(pdev);
10340
10341         pci_disable_device(pdev);
10342         pci_set_drvdata(pdev, NULL);
10343 }
10344
10345 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10346 {
10347         struct net_device *dev = pci_get_drvdata(pdev);
10348         struct bnx2x *bp;
10349
10350         if (!dev) {
10351                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10352                 return -ENODEV;
10353         }
10354         bp = netdev_priv(dev);
10355
10356         rtnl_lock();
10357
10358         pci_save_state(pdev);
10359
10360         if (!netif_running(dev)) {
10361                 rtnl_unlock();
10362                 return 0;
10363         }
10364
10365         netif_device_detach(dev);
10366
10367         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10368
10369         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10370
10371         rtnl_unlock();
10372
10373         return 0;
10374 }
10375
10376 static int bnx2x_resume(struct pci_dev *pdev)
10377 {
10378         struct net_device *dev = pci_get_drvdata(pdev);
10379         struct bnx2x *bp;
10380         int rc;
10381
10382         if (!dev) {
10383                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10384                 return -ENODEV;
10385         }
10386         bp = netdev_priv(dev);
10387
10388         rtnl_lock();
10389
10390         pci_restore_state(pdev);
10391
10392         if (!netif_running(dev)) {
10393                 rtnl_unlock();
10394                 return 0;
10395         }
10396
10397         bnx2x_set_power_state(bp, PCI_D0);
10398         netif_device_attach(dev);
10399
10400         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10401
10402         rtnl_unlock();
10403
10404         return rc;
10405 }
10406
10407 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10408 {
10409         int i;
10410
10411         bp->state = BNX2X_STATE_ERROR;
10412
10413         bp->rx_mode = BNX2X_RX_MODE_NONE;
10414
10415         bnx2x_netif_stop(bp, 0);
10416
10417         del_timer_sync(&bp->timer);
10418         bp->stats_state = STATS_STATE_DISABLED;
10419         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10420
10421         /* Release IRQs */
10422         bnx2x_free_irq(bp);
10423
10424         if (CHIP_IS_E1(bp)) {
10425                 struct mac_configuration_cmd *config =
10426                                                 bnx2x_sp(bp, mcast_config);
10427
10428                 for (i = 0; i < config->hdr.length_6b; i++)
10429                         CAM_INVALIDATE(config->config_table[i]);
10430         }
10431
10432         /* Free SKBs, SGEs, TPA pool and driver internals */
10433         bnx2x_free_skbs(bp);
10434         for_each_queue(bp, i)
10435                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10436         bnx2x_free_mem(bp);
10437
10438         bp->state = BNX2X_STATE_CLOSED;
10439
10440         netif_carrier_off(bp->dev);
10441
10442         return 0;
10443 }
10444
10445 static void bnx2x_eeh_recover(struct bnx2x *bp)
10446 {
10447         u32 val;
10448
10449         mutex_init(&bp->port.phy_mutex);
10450
10451         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10452         bp->link_params.shmem_base = bp->common.shmem_base;
10453         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10454
10455         if (!bp->common.shmem_base ||
10456             (bp->common.shmem_base < 0xA0000) ||
10457             (bp->common.shmem_base >= 0xC0000)) {
10458                 BNX2X_DEV_INFO("MCP not active\n");
10459                 bp->flags |= NO_MCP_FLAG;
10460                 return;
10461         }
10462
10463         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10464         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10465                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10466                 BNX2X_ERR("BAD MCP validity signature\n");
10467
10468         if (!BP_NOMCP(bp)) {
10469                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10470                               & DRV_MSG_SEQ_NUMBER_MASK);
10471                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10472         }
10473 }
10474
10475 /**
10476  * bnx2x_io_error_detected - called when PCI error is detected
10477  * @pdev: Pointer to PCI device
10478  * @state: The current pci connection state
10479  *
10480  * This function is called after a PCI bus error affecting
10481  * this device has been detected.
10482  */
10483 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10484                                                 pci_channel_state_t state)
10485 {
10486         struct net_device *dev = pci_get_drvdata(pdev);
10487         struct bnx2x *bp = netdev_priv(dev);
10488
10489         rtnl_lock();
10490
10491         netif_device_detach(dev);
10492
10493         if (netif_running(dev))
10494                 bnx2x_eeh_nic_unload(bp);
10495
10496         pci_disable_device(pdev);
10497
10498         rtnl_unlock();
10499
10500         /* Request a slot reset */
10501         return PCI_ERS_RESULT_NEED_RESET;
10502 }
10503
10504 /**
10505  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10506  * @pdev: Pointer to PCI device
10507  *
10508  * Restart the card from scratch, as if from a cold-boot.
10509  */
10510 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10511 {
10512         struct net_device *dev = pci_get_drvdata(pdev);
10513         struct bnx2x *bp = netdev_priv(dev);
10514
10515         rtnl_lock();
10516
10517         if (pci_enable_device(pdev)) {
10518                 dev_err(&pdev->dev,
10519                         "Cannot re-enable PCI device after reset\n");
10520                 rtnl_unlock();
10521                 return PCI_ERS_RESULT_DISCONNECT;
10522         }
10523
10524         pci_set_master(pdev);
10525         pci_restore_state(pdev);
10526
10527         if (netif_running(dev))
10528                 bnx2x_set_power_state(bp, PCI_D0);
10529
10530         rtnl_unlock();
10531
10532         return PCI_ERS_RESULT_RECOVERED;
10533 }
10534
10535 /**
10536  * bnx2x_io_resume - called when traffic can start flowing again
10537  * @pdev: Pointer to PCI device
10538  *
10539  * This callback is called when the error recovery driver tells us that
10540  * its OK to resume normal operation.
10541  */
10542 static void bnx2x_io_resume(struct pci_dev *pdev)
10543 {
10544         struct net_device *dev = pci_get_drvdata(pdev);
10545         struct bnx2x *bp = netdev_priv(dev);
10546
10547         rtnl_lock();
10548
10549         bnx2x_eeh_recover(bp);
10550
10551         if (netif_running(dev))
10552                 bnx2x_nic_load(bp, LOAD_NORMAL);
10553
10554         netif_device_attach(dev);
10555
10556         rtnl_unlock();
10557 }
10558
10559 static struct pci_error_handlers bnx2x_err_handler = {
10560         .error_detected = bnx2x_io_error_detected,
10561         .slot_reset = bnx2x_io_slot_reset,
10562         .resume = bnx2x_io_resume,
10563 };
10564
10565 static struct pci_driver bnx2x_pci_driver = {
10566         .name        = DRV_MODULE_NAME,
10567         .id_table    = bnx2x_pci_tbl,
10568         .probe       = bnx2x_init_one,
10569         .remove      = __devexit_p(bnx2x_remove_one),
10570         .suspend     = bnx2x_suspend,
10571         .resume      = bnx2x_resume,
10572         .err_handler = &bnx2x_err_handler,
10573 };
10574
10575 static int __init bnx2x_init(void)
10576 {
10577         bnx2x_wq = create_singlethread_workqueue("bnx2x");
10578         if (bnx2x_wq == NULL) {
10579                 printk(KERN_ERR PFX "Cannot create workqueue\n");
10580                 return -ENOMEM;
10581         }
10582
10583         return pci_register_driver(&bnx2x_pci_driver);
10584 }
10585
10586 static void __exit bnx2x_cleanup(void)
10587 {
10588         pci_unregister_driver(&bnx2x_pci_driver);
10589
10590         destroy_workqueue(bnx2x_wq);
10591 }
10592
10593 module_init(bnx2x_init);
10594 module_exit(bnx2x_cleanup);
10595