]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/bnx2x_main.c
db6a3b9a367c3b6c1d5cf0ef37fa02b74ac1eebe
[linux-2.6-omap-h63xx.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
54 #include <linux/io.h>
55
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
60 #include "bnx2x.h"
61 #include "bnx2x_init.h"
62
63 #define DRV_MODULE_VERSION      "1.45.6"
64 #define DRV_MODULE_RELDATE      "2008/06/23"
65 #define BNX2X_BC_VER            0x040200
66
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT              (5*HZ)
69
70 static char version[] __devinitdata =
71         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
78
79 static int disable_tpa;
80 static int use_inta;
81 static int poll;
82 static int debug;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 static int use_multi;
85
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
94
95 #ifdef BNX2X_MULTI
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98 #endif
99
100 enum bnx2x_board_type {
101         BCM57710 = 0,
102         BCM57711 = 1,
103         BCM57711E = 2,
104 };
105
106 /* indexed by board_type, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM57710 XGb" },
111         { "Broadcom NetXtreme II BCM57711 XGb" },
112         { "Broadcom NetXtreme II BCM57711E XGb" }
113 };
114
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
123         { 0 }
124 };
125
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
131
132 /* used only at init
133  * locking is done by mcp
134  */
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 {
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140                                PCICFG_VENDOR_ID_OFFSET);
141 }
142
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 {
145         u32 val;
146
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150                                PCICFG_VENDOR_ID_OFFSET);
151
152         return val;
153 }
154
155 static const u32 dmae_reg_go_c[] = {
156         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 };
161
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164                             int idx)
165 {
166         u32 cmd_offset;
167         int i;
168
169         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
173                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175         }
176         REG_WR(bp, dmae_reg_go_c[idx], 1);
177 }
178
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180                       u32 len32)
181 {
182         struct dmae_command *dmae = &bp->init_dmae;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int cnt = 200;
185
186         if (!bp->dmae_ready) {
187                 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
190                    "  using indirect\n", dst_addr, len32);
191                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192                 return;
193         }
194
195         mutex_lock(&bp->dmae_mutex);
196
197         memset(dmae, 0, sizeof(struct dmae_command));
198
199         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 #ifdef __BIG_ENDIAN
203                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 #else
205                         DMAE_CMD_ENDIANITY_DW_SWAP |
206 #endif
207                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209         dmae->src_addr_lo = U64_LO(dma_addr);
210         dmae->src_addr_hi = U64_HI(dma_addr);
211         dmae->dst_addr_lo = dst_addr >> 2;
212         dmae->dst_addr_hi = 0;
213         dmae->len = len32;
214         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_val = DMAE_COMP_VAL;
217
218         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
220                     "dst_addr [%x:%08x (%08x)]\n"
221            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
222            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228
229         *wb_comp = 0;
230
231         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232
233         udelay(5);
234
235         while (*wb_comp != DMAE_COMP_VAL) {
236                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238                 if (!cnt) {
239                         BNX2X_ERR("dmae timeout!\n");
240                         break;
241                 }
242                 cnt--;
243                 /* adjust delay for emulation/FPGA */
244                 if (CHIP_REV_IS_SLOW(bp))
245                         msleep(100);
246                 else
247                         udelay(5);
248         }
249
250         mutex_unlock(&bp->dmae_mutex);
251 }
252
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
254 {
255         struct dmae_command *dmae = &bp->init_dmae;
256         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257         int cnt = 200;
258
259         if (!bp->dmae_ready) {
260                 u32 *data = bnx2x_sp(bp, wb_data[0]);
261                 int i;
262
263                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
264                    "  using indirect\n", src_addr, len32);
265                 for (i = 0; i < len32; i++)
266                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267                 return;
268         }
269
270         mutex_lock(&bp->dmae_mutex);
271
272         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273         memset(dmae, 0, sizeof(struct dmae_command));
274
275         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278 #ifdef __BIG_ENDIAN
279                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
280 #else
281                         DMAE_CMD_ENDIANITY_DW_SWAP |
282 #endif
283                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285         dmae->src_addr_lo = src_addr >> 2;
286         dmae->src_addr_hi = 0;
287         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289         dmae->len = len32;
290         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_val = DMAE_COMP_VAL;
293
294         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
296                     "dst_addr [%x:%08x (%08x)]\n"
297            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
298            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
301
302         *wb_comp = 0;
303
304         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
305
306         udelay(5);
307
308         while (*wb_comp != DMAE_COMP_VAL) {
309
310                 if (!cnt) {
311                         BNX2X_ERR("dmae timeout!\n");
312                         break;
313                 }
314                 cnt--;
315                 /* adjust delay for emulation/FPGA */
316                 if (CHIP_REV_IS_SLOW(bp))
317                         msleep(100);
318                 else
319                         udelay(5);
320         }
321         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
324
325         mutex_unlock(&bp->dmae_mutex);
326 }
327
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330 {
331         u32 wb_write[2];
332
333         wb_write[0] = val_hi;
334         wb_write[1] = val_lo;
335         REG_WR_DMAE(bp, reg, wb_write, 2);
336 }
337
338 #ifdef USE_WB_RD
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340 {
341         u32 wb_data[2];
342
343         REG_RD_DMAE(bp, reg, wb_data, 2);
344
345         return HILO_U64(wb_data[0], wb_data[1]);
346 }
347 #endif
348
349 static int bnx2x_mc_assert(struct bnx2x *bp)
350 {
351         char last_idx;
352         int i, rc = 0;
353         u32 row0, row1, row2, row3;
354
355         /* XSTORM */
356         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
358         if (last_idx)
359                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361         /* print the asserts */
362         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i));
366                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375                                   " 0x%08x 0x%08x 0x%08x\n",
376                                   i, row3, row2, row1, row0);
377                         rc++;
378                 } else {
379                         break;
380                 }
381         }
382
383         /* TSTORM */
384         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
386         if (last_idx)
387                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389         /* print the asserts */
390         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i));
394                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403                                   " 0x%08x 0x%08x 0x%08x\n",
404                                   i, row3, row2, row1, row0);
405                         rc++;
406                 } else {
407                         break;
408                 }
409         }
410
411         /* CSTORM */
412         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
414         if (last_idx)
415                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417         /* print the asserts */
418         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i));
422                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431                                   " 0x%08x 0x%08x 0x%08x\n",
432                                   i, row3, row2, row1, row0);
433                         rc++;
434                 } else {
435                         break;
436                 }
437         }
438
439         /* USTORM */
440         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441                            USTORM_ASSERT_LIST_INDEX_OFFSET);
442         if (last_idx)
443                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445         /* print the asserts */
446         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i));
450                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
452                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
454                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459                                   " 0x%08x 0x%08x 0x%08x\n",
460                                   i, row3, row2, row1, row0);
461                         rc++;
462                 } else {
463                         break;
464                 }
465         }
466
467         return rc;
468 }
469
470 static void bnx2x_fw_dump(struct bnx2x *bp)
471 {
472         u32 mark, offset;
473         u32 data[9];
474         int word;
475
476         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477         mark = ((mark + 0x3) & ~0x3);
478         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
479
480         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481                 for (word = 0; word < 8; word++)
482                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483                                                   offset + 4*word));
484                 data[8] = 0x0;
485                 printk(KERN_CONT "%s", (char *)data);
486         }
487         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488                 for (word = 0; word < 8; word++)
489                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490                                                   offset + 4*word));
491                 data[8] = 0x0;
492                 printk(KERN_CONT "%s", (char *)data);
493         }
494         printk("\n" KERN_ERR PFX "end of fw dump\n");
495 }
496
497 static void bnx2x_panic_dump(struct bnx2x *bp)
498 {
499         int i;
500         u16 j, start, end;
501
502         bp->stats_state = STATS_STATE_DISABLED;
503         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
505         BNX2X_ERR("begin crash dump -----------------\n");
506
507         for_each_queue(bp, i) {
508                 struct bnx2x_fastpath *fp = &bp->fp[i];
509                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
512                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
513                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
514                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
515                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
516                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
517                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
518                           fp->rx_bd_prod, fp->rx_bd_cons,
519                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
522                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
523                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
524                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525                           fp->status_blk->c_status_block.status_block_index,
526                           fp->fp_u_idx,
527                           fp->status_blk->u_status_block.status_block_index,
528                           hw_prods->packets_prod, hw_prods->bds_prod);
529
530                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532                 for (j = start; j < end; j++) {
533                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536                                   sw_bd->skb, sw_bd->first_bd);
537                 }
538
539                 start = TX_BD(fp->tx_bd_cons - 10);
540                 end = TX_BD(fp->tx_bd_cons + 254);
541                 for (j = start; j < end; j++) {
542                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546                 }
547
548                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550                 for (j = start; j < end; j++) {
551                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
555                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
556                 }
557
558                 start = 0;
559                 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
560                 for (j = start; j < end; j++) {
561                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
565                                   j, rx_sge[1], rx_sge[0], sw_page->page);
566                 }
567
568                 start = RCQ_BD(fp->rx_comp_cons - 10);
569                 end = RCQ_BD(fp->rx_comp_cons + 503);
570                 for (j = start; j < end; j++) {
571                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
575                 }
576         }
577
578         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
579                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
580                   "  spq_prod_idx(%u)\n",
581                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
582                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
584         bnx2x_fw_dump(bp);
585         bnx2x_mc_assert(bp);
586         BNX2X_ERR("end crash dump -----------------\n");
587 }
588
589 static void bnx2x_int_enable(struct bnx2x *bp)
590 {
591         int port = BP_PORT(bp);
592         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593         u32 val = REG_RD(bp, addr);
594         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596         if (msix) {
597                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600         } else {
601                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
603                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
604                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605
606                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
607                    val, port, addr, msix);
608
609                 REG_WR(bp, addr, val);
610
611                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612         }
613
614         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
615            val, port, addr, msix);
616
617         REG_WR(bp, addr, val);
618
619         if (CHIP_IS_E1H(bp)) {
620                 /* init leading/trailing edge */
621                 if (IS_E1HMF(bp)) {
622                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623                         if (bp->port.pmf)
624                                 /* enable nig attention */
625                                 val |= 0x0100;
626                 } else
627                         val = 0xffff;
628
629                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631         }
632 }
633
634 static void bnx2x_int_disable(struct bnx2x *bp)
635 {
636         int port = BP_PORT(bp);
637         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638         u32 val = REG_RD(bp, addr);
639
640         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
643                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646            val, port, addr);
647
648         REG_WR(bp, addr, val);
649         if (REG_RD(bp, addr) != val)
650                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 }
652
653 static void bnx2x_int_disable_sync(struct bnx2x *bp)
654 {
655         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656         int i;
657
658         /* disable interrupt handling */
659         atomic_inc(&bp->intr_sem);
660         /* prevent the HW from sending interrupts */
661         bnx2x_int_disable(bp);
662
663         /* make sure all ISRs are done */
664         if (msix) {
665                 for_each_queue(bp, i)
666                         synchronize_irq(bp->msix_table[i].vector);
667
668                 /* one more for the Slow Path IRQ */
669                 synchronize_irq(bp->msix_table[i].vector);
670         } else
671                 synchronize_irq(bp->pdev->irq);
672
673         /* make sure sp_task is not running */
674         cancel_work_sync(&bp->sp_task);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 /* free skb in the packet ring at pos idx
737  * return idx of last bd freed
738  */
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740                              u16 idx)
741 {
742         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743         struct eth_tx_bd *tx_bd;
744         struct sk_buff *skb = tx_buf->skb;
745         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746         int nbd;
747
748         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
749            idx, tx_buf, skb);
750
751         /* unmap first bd */
752         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753         tx_bd = &fp->tx_desc_ring[bd_idx];
754         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757         nbd = le16_to_cpu(tx_bd->nbd) - 1;
758         new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760         if (nbd > (MAX_SKB_FRAGS + 2)) {
761                 BNX2X_ERR("BAD nbd!\n");
762                 bnx2x_panic();
763         }
764 #endif
765
766         /* Skip a parse bd and the TSO split header bd
767            since they have no mapping */
768         if (nbd)
769                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772                                            ETH_TX_BD_FLAGS_TCP_CSUM |
773                                            ETH_TX_BD_FLAGS_SW_LSO)) {
774                 if (--nbd)
775                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776                 tx_bd = &fp->tx_desc_ring[bd_idx];
777                 /* is this a TSO split header bd? */
778                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779                         if (--nbd)
780                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781                 }
782         }
783
784         /* now free frags */
785         while (nbd > 0) {
786
787                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788                 tx_bd = &fp->tx_desc_ring[bd_idx];
789                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791                 if (--nbd)
792                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793         }
794
795         /* release skb */
796         WARN_ON(!skb);
797         dev_kfree_skb(skb);
798         tx_buf->first_bd = 0;
799         tx_buf->skb = NULL;
800
801         return new_cons;
802 }
803
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 {
806         s16 used;
807         u16 prod;
808         u16 cons;
809
810         barrier(); /* Tell compiler that prod and cons can change */
811         prod = fp->tx_bd_prod;
812         cons = fp->tx_bd_cons;
813
814         /* NUM_TX_RINGS = number of "next-page" entries
815            It will be used as a threshold */
816         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817
818 #ifdef BNX2X_STOP_ON_ERROR
819         WARN_ON(used < 0);
820         WARN_ON(used > fp->bp->tx_ring_size);
821         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
823
824         return (s16)(fp->bp->tx_ring_size) - used;
825 }
826
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 {
829         struct bnx2x *bp = fp->bp;
830         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831         int done = 0;
832
833 #ifdef BNX2X_STOP_ON_ERROR
834         if (unlikely(bp->panic))
835                 return;
836 #endif
837
838         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839         sw_cons = fp->tx_pkt_cons;
840
841         while (sw_cons != hw_cons) {
842                 u16 pkt_cons;
843
844                 pkt_cons = TX_BD(sw_cons);
845
846                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
848                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
849                    hw_cons, sw_cons, pkt_cons);
850
851 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852                         rmb();
853                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854                 }
855 */
856                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857                 sw_cons++;
858                 done++;
859
860                 if (done == work)
861                         break;
862         }
863
864         fp->tx_pkt_cons = sw_cons;
865         fp->tx_bd_cons = bd_cons;
866
867         /* Need to make the tx_cons update visible to start_xmit()
868          * before checking for netif_queue_stopped().  Without the
869          * memory barrier, there is a small possibility that start_xmit()
870          * will miss it and cause the queue to be stopped forever.
871          */
872         smp_mb();
873
874         /* TBD need a thresh? */
875         if (unlikely(netif_queue_stopped(bp->dev))) {
876
877                 netif_tx_lock(bp->dev);
878
879                 if (netif_queue_stopped(bp->dev) &&
880                     (bp->state == BNX2X_STATE_OPEN) &&
881                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882                         netif_wake_queue(bp->dev);
883
884                 netif_tx_unlock(bp->dev);
885         }
886 }
887
888 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
889                            union eth_rx_cqe *rr_cqe)
890 {
891         struct bnx2x *bp = fp->bp;
892         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
893         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894
895         DP(BNX2X_MSG_SP,
896            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
897            FP_IDX(fp), cid, command, bp->state,
898            rr_cqe->ramrod_cqe.ramrod_type);
899
900         bp->spq_left++;
901
902         if (FP_IDX(fp)) {
903                 switch (command | fp->state) {
904                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
905                                                 BNX2X_FP_STATE_OPENING):
906                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
907                            cid);
908                         fp->state = BNX2X_FP_STATE_OPEN;
909                         break;
910
911                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
912                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
913                            cid);
914                         fp->state = BNX2X_FP_STATE_HALTED;
915                         break;
916
917                 default:
918                         BNX2X_ERR("unexpected MC reply (%d)  "
919                                   "fp->state is %x\n", command, fp->state);
920                         break;
921                 }
922                 mb(); /* force bnx2x_wait_ramrod() to see the change */
923                 return;
924         }
925
926         switch (command | bp->state) {
927         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
928                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
929                 bp->state = BNX2X_STATE_OPEN;
930                 break;
931
932         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
933                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
934                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
935                 fp->state = BNX2X_FP_STATE_HALTED;
936                 break;
937
938         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
939                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
940                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
941                 break;
942
943         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
944         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
945                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
946                 bp->set_mac_pending = 0;
947                 break;
948
949         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
950                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
951                 break;
952
953         default:
954                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
955                           command, bp->state);
956                 break;
957         }
958         mb(); /* force bnx2x_wait_ramrod() to see the change */
959 }
960
961 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
962                                      struct bnx2x_fastpath *fp, u16 index)
963 {
964         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
965         struct page *page = sw_buf->page;
966         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
967
968         /* Skip "next page" elements */
969         if (!page)
970                 return;
971
972         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
973                        BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
974         __free_pages(page, PAGES_PER_SGE_SHIFT);
975
976         sw_buf->page = NULL;
977         sge->addr_hi = 0;
978         sge->addr_lo = 0;
979 }
980
981 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
982                                            struct bnx2x_fastpath *fp, int last)
983 {
984         int i;
985
986         for (i = 0; i < last; i++)
987                 bnx2x_free_rx_sge(bp, fp, i);
988 }
989
990 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
991                                      struct bnx2x_fastpath *fp, u16 index)
992 {
993         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
994         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
995         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
996         dma_addr_t mapping;
997
998         if (unlikely(page == NULL))
999                 return -ENOMEM;
1000
1001         mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1002                                PCI_DMA_FROMDEVICE);
1003         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1004                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1005                 return -ENOMEM;
1006         }
1007
1008         sw_buf->page = page;
1009         pci_unmap_addr_set(sw_buf, mapping, mapping);
1010
1011         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1012         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1013
1014         return 0;
1015 }
1016
1017 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1018                                      struct bnx2x_fastpath *fp, u16 index)
1019 {
1020         struct sk_buff *skb;
1021         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1022         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1023         dma_addr_t mapping;
1024
1025         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1026         if (unlikely(skb == NULL))
1027                 return -ENOMEM;
1028
1029         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1030                                  PCI_DMA_FROMDEVICE);
1031         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1032                 dev_kfree_skb(skb);
1033                 return -ENOMEM;
1034         }
1035
1036         rx_buf->skb = skb;
1037         pci_unmap_addr_set(rx_buf, mapping, mapping);
1038
1039         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1040         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1041
1042         return 0;
1043 }
1044
1045 /* note that we are not allocating a new skb,
1046  * we are just moving one from cons to prod
1047  * we are not creating a new mapping,
1048  * so there is no need to check for dma_mapping_error().
1049  */
1050 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1051                                struct sk_buff *skb, u16 cons, u16 prod)
1052 {
1053         struct bnx2x *bp = fp->bp;
1054         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1055         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1056         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1057         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1058
1059         pci_dma_sync_single_for_device(bp->pdev,
1060                                        pci_unmap_addr(cons_rx_buf, mapping),
1061                                        bp->rx_offset + RX_COPY_THRESH,
1062                                        PCI_DMA_FROMDEVICE);
1063
1064         prod_rx_buf->skb = cons_rx_buf->skb;
1065         pci_unmap_addr_set(prod_rx_buf, mapping,
1066                            pci_unmap_addr(cons_rx_buf, mapping));
1067         *prod_bd = *cons_bd;
1068 }
1069
1070 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1071                                              u16 idx)
1072 {
1073         u16 last_max = fp->last_max_sge;
1074
1075         if (SUB_S16(idx, last_max) > 0)
1076                 fp->last_max_sge = idx;
1077 }
1078
1079 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1080 {
1081         int i, j;
1082
1083         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1084                 int idx = RX_SGE_CNT * i - 1;
1085
1086                 for (j = 0; j < 2; j++) {
1087                         SGE_MASK_CLEAR_BIT(fp, idx);
1088                         idx--;
1089                 }
1090         }
1091 }
1092
1093 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1094                                   struct eth_fast_path_rx_cqe *fp_cqe)
1095 {
1096         struct bnx2x *bp = fp->bp;
1097         u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1098                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1099                       BCM_PAGE_SHIFT;
1100         u16 last_max, last_elem, first_elem;
1101         u16 delta = 0;
1102         u16 i;
1103
1104         if (!sge_len)
1105                 return;
1106
1107         /* First mark all used pages */
1108         for (i = 0; i < sge_len; i++)
1109                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1110
1111         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1112            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1113
1114         /* Here we assume that the last SGE index is the biggest */
1115         prefetch((void *)(fp->sge_mask));
1116         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1117
1118         last_max = RX_SGE(fp->last_max_sge);
1119         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1120         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1121
1122         /* If ring is not full */
1123         if (last_elem + 1 != first_elem)
1124                 last_elem++;
1125
1126         /* Now update the prod */
1127         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1128                 if (likely(fp->sge_mask[i]))
1129                         break;
1130
1131                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1132                 delta += RX_SGE_MASK_ELEM_SZ;
1133         }
1134
1135         if (delta > 0) {
1136                 fp->rx_sge_prod += delta;
1137                 /* clear page-end entries */
1138                 bnx2x_clear_sge_mask_next_elems(fp);
1139         }
1140
1141         DP(NETIF_MSG_RX_STATUS,
1142            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1143            fp->last_max_sge, fp->rx_sge_prod);
1144 }
1145
1146 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1147 {
1148         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1149         memset(fp->sge_mask, 0xff,
1150                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1151
1152         /* Clear the two last indeces in the page to 1:
1153            these are the indeces that correspond to the "next" element,
1154            hence will never be indicated and should be removed from
1155            the calculations. */
1156         bnx2x_clear_sge_mask_next_elems(fp);
1157 }
1158
1159 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1160                             struct sk_buff *skb, u16 cons, u16 prod)
1161 {
1162         struct bnx2x *bp = fp->bp;
1163         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1164         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1165         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1166         dma_addr_t mapping;
1167
1168         /* move empty skb from pool to prod and map it */
1169         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1170         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1171                                  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1172         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1173
1174         /* move partial skb from cons to pool (don't unmap yet) */
1175         fp->tpa_pool[queue] = *cons_rx_buf;
1176
1177         /* mark bin state as start - print error if current state != stop */
1178         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1179                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1180
1181         fp->tpa_state[queue] = BNX2X_TPA_START;
1182
1183         /* point prod_bd to new skb */
1184         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1185         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1186
1187 #ifdef BNX2X_STOP_ON_ERROR
1188         fp->tpa_queue_used |= (1 << queue);
1189 #ifdef __powerpc64__
1190         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1191 #else
1192         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1193 #endif
1194            fp->tpa_queue_used);
1195 #endif
1196 }
1197
1198 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1199                                struct sk_buff *skb,
1200                                struct eth_fast_path_rx_cqe *fp_cqe,
1201                                u16 cqe_idx)
1202 {
1203         struct sw_rx_page *rx_pg, old_rx_pg;
1204         struct page *sge;
1205         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1206         u32 i, frag_len, frag_size, pages;
1207         int err;
1208         int j;
1209
1210         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1211         pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1212
1213         /* This is needed in order to enable forwarding support */
1214         if (frag_size)
1215                 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1216                                                max(frag_size, (u32)len_on_bd));
1217
1218 #ifdef BNX2X_STOP_ON_ERROR
1219         if (pages > 8*PAGES_PER_SGE) {
1220                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1221                           pages, cqe_idx);
1222                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1223                           fp_cqe->pkt_len, len_on_bd);
1224                 bnx2x_panic();
1225                 return -EINVAL;
1226         }
1227 #endif
1228
1229         /* Run through the SGL and compose the fragmented skb */
1230         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1231                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1232
1233                 /* FW gives the indices of the SGE as if the ring is an array
1234                    (meaning that "next" element will consume 2 indices) */
1235                 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1236                 rx_pg = &fp->rx_page_ring[sge_idx];
1237                 sge = rx_pg->page;
1238                 old_rx_pg = *rx_pg;
1239
1240                 /* If we fail to allocate a substitute page, we simply stop
1241                    where we are and drop the whole packet */
1242                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1243                 if (unlikely(err)) {
1244                         bp->eth_stats.rx_skb_alloc_failed++;
1245                         return err;
1246                 }
1247
1248                 /* Unmap the page as we r going to pass it to the stack */
1249                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1250                               BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1251
1252                 /* Add one frag and update the appropriate fields in the skb */
1253                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1254
1255                 skb->data_len += frag_len;
1256                 skb->truesize += frag_len;
1257                 skb->len += frag_len;
1258
1259                 frag_size -= frag_len;
1260         }
1261
1262         return 0;
1263 }
1264
1265 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1266                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1267                            u16 cqe_idx)
1268 {
1269         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1270         struct sk_buff *skb = rx_buf->skb;
1271         /* alloc new skb */
1272         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1273
1274         /* Unmap skb in the pool anyway, as we are going to change
1275            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1276            fails. */
1277         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1278                          bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1279
1280         if (likely(new_skb)) {
1281                 /* fix ip xsum and give it to the stack */
1282                 /* (no need to map the new skb) */
1283
1284                 prefetch(skb);
1285                 prefetch(((char *)(skb)) + 128);
1286
1287 #ifdef BNX2X_STOP_ON_ERROR
1288                 if (pad + len > bp->rx_buf_size) {
1289                         BNX2X_ERR("skb_put is about to fail...  "
1290                                   "pad %d  len %d  rx_buf_size %d\n",
1291                                   pad, len, bp->rx_buf_size);
1292                         bnx2x_panic();
1293                         return;
1294                 }
1295 #endif
1296
1297                 skb_reserve(skb, pad);
1298                 skb_put(skb, len);
1299
1300                 skb->protocol = eth_type_trans(skb, bp->dev);
1301                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1302
1303                 {
1304                         struct iphdr *iph;
1305
1306                         iph = (struct iphdr *)skb->data;
1307                         iph->check = 0;
1308                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1309                 }
1310
1311                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1312                                          &cqe->fast_path_cqe, cqe_idx)) {
1313 #ifdef BCM_VLAN
1314                         if ((bp->vlgrp != NULL) &&
1315                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316                              PARSING_FLAGS_VLAN))
1317                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1318                                                 le16_to_cpu(cqe->fast_path_cqe.
1319                                                             vlan_tag));
1320                         else
1321 #endif
1322                                 netif_receive_skb(skb);
1323                 } else {
1324                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1325                            " - dropping packet!\n");
1326                         dev_kfree_skb(skb);
1327                 }
1328
1329                 bp->dev->last_rx = jiffies;
1330
1331                 /* put new skb in bin */
1332                 fp->tpa_pool[queue].skb = new_skb;
1333
1334         } else {
1335                 /* else drop the packet and keep the buffer in the bin */
1336                 DP(NETIF_MSG_RX_STATUS,
1337                    "Failed to allocate new skb - dropping packet!\n");
1338                 bp->eth_stats.rx_skb_alloc_failed++;
1339         }
1340
1341         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1342 }
1343
1344 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1345                                         struct bnx2x_fastpath *fp,
1346                                         u16 bd_prod, u16 rx_comp_prod,
1347                                         u16 rx_sge_prod)
1348 {
1349         struct tstorm_eth_rx_producers rx_prods = {0};
1350         int i;
1351
1352         /* Update producers */
1353         rx_prods.bd_prod = bd_prod;
1354         rx_prods.cqe_prod = rx_comp_prod;
1355         rx_prods.sge_prod = rx_sge_prod;
1356
1357         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1358                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1359                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1360                        ((u32 *)&rx_prods)[i]);
1361
1362         DP(NETIF_MSG_RX_STATUS,
1363            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1364            bd_prod, rx_comp_prod, rx_sge_prod);
1365 }
1366
1367 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1368 {
1369         struct bnx2x *bp = fp->bp;
1370         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1371         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1372         int rx_pkt = 0;
1373         u16 queue;
1374
1375 #ifdef BNX2X_STOP_ON_ERROR
1376         if (unlikely(bp->panic))
1377                 return 0;
1378 #endif
1379
1380         /* CQ "next element" is of the size of the regular element,
1381            that's why it's ok here */
1382         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384                 hw_comp_cons++;
1385
1386         bd_cons = fp->rx_bd_cons;
1387         bd_prod = fp->rx_bd_prod;
1388         bd_prod_fw = bd_prod;
1389         sw_comp_cons = fp->rx_comp_cons;
1390         sw_comp_prod = fp->rx_comp_prod;
1391
1392         /* Memory barrier necessary as speculative reads of the rx
1393          * buffer can be ahead of the index in the status block
1394          */
1395         rmb();
1396
1397         DP(NETIF_MSG_RX_STATUS,
1398            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1399            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1400
1401         while (sw_comp_cons != hw_comp_cons) {
1402                 struct sw_rx_bd *rx_buf = NULL;
1403                 struct sk_buff *skb;
1404                 union eth_rx_cqe *cqe;
1405                 u8 cqe_fp_flags;
1406                 u16 len, pad;
1407
1408                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409                 bd_prod = RX_BD(bd_prod);
1410                 bd_cons = RX_BD(bd_cons);
1411
1412                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1413                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1414
1415                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1416                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1417                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1418                    cqe->fast_path_cqe.rss_hash_result,
1419                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1421
1422                 /* is this a slowpath msg? */
1423                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1424                         bnx2x_sp_event(fp, cqe);
1425                         goto next_cqe;
1426
1427                 /* this is an rx packet */
1428                 } else {
1429                         rx_buf = &fp->rx_buf_ring[bd_cons];
1430                         skb = rx_buf->skb;
1431                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432                         pad = cqe->fast_path_cqe.placement_offset;
1433
1434                         /* If CQE is marked both TPA_START and TPA_END
1435                            it is a non-TPA CQE */
1436                         if ((!fp->disable_tpa) &&
1437                             (TPA_TYPE(cqe_fp_flags) !=
1438                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1439                                 queue = cqe->fast_path_cqe.queue_index;
1440
1441                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442                                         DP(NETIF_MSG_RX_STATUS,
1443                                            "calling tpa_start on queue %d\n",
1444                                            queue);
1445
1446                                         bnx2x_tpa_start(fp, queue, skb,
1447                                                         bd_cons, bd_prod);
1448                                         goto next_rx;
1449                                 }
1450
1451                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452                                         DP(NETIF_MSG_RX_STATUS,
1453                                            "calling tpa_stop on queue %d\n",
1454                                            queue);
1455
1456                                         if (!BNX2X_RX_SUM_FIX(cqe))
1457                                                 BNX2X_ERR("STOP on none TCP "
1458                                                           "data\n");
1459
1460                                         /* This is a size of the linear data
1461                                            on this skb */
1462                                         len = le16_to_cpu(cqe->fast_path_cqe.
1463                                                                 len_on_bd);
1464                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1465                                                     len, cqe, comp_ring_cons);
1466 #ifdef BNX2X_STOP_ON_ERROR
1467                                         if (bp->panic)
1468                                                 return -EINVAL;
1469 #endif
1470
1471                                         bnx2x_update_sge_prod(fp,
1472                                                         &cqe->fast_path_cqe);
1473                                         goto next_cqe;
1474                                 }
1475                         }
1476
1477                         pci_dma_sync_single_for_device(bp->pdev,
1478                                         pci_unmap_addr(rx_buf, mapping),
1479                                                        pad + RX_COPY_THRESH,
1480                                                        PCI_DMA_FROMDEVICE);
1481                         prefetch(skb);
1482                         prefetch(((char *)(skb)) + 128);
1483
1484                         /* is this an error packet? */
1485                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1486                                 DP(NETIF_MSG_RX_ERR,
1487                                    "ERROR  flags %x  rx packet %u\n",
1488                                    cqe_fp_flags, sw_comp_cons);
1489                                 bp->eth_stats.rx_err_discard_pkt++;
1490                                 goto reuse_rx;
1491                         }
1492
1493                         /* Since we don't have a jumbo ring
1494                          * copy small packets if mtu > 1500
1495                          */
1496                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497                             (len <= RX_COPY_THRESH)) {
1498                                 struct sk_buff *new_skb;
1499
1500                                 new_skb = netdev_alloc_skb(bp->dev,
1501                                                            len + pad);
1502                                 if (new_skb == NULL) {
1503                                         DP(NETIF_MSG_RX_ERR,
1504                                            "ERROR  packet dropped "
1505                                            "because of alloc failure\n");
1506                                         bp->eth_stats.rx_skb_alloc_failed++;
1507                                         goto reuse_rx;
1508                                 }
1509
1510                                 /* aligned copy */
1511                                 skb_copy_from_linear_data_offset(skb, pad,
1512                                                     new_skb->data + pad, len);
1513                                 skb_reserve(new_skb, pad);
1514                                 skb_put(new_skb, len);
1515
1516                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1517
1518                                 skb = new_skb;
1519
1520                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521                                 pci_unmap_single(bp->pdev,
1522                                         pci_unmap_addr(rx_buf, mapping),
1523                                                  bp->rx_buf_use_size,
1524                                                  PCI_DMA_FROMDEVICE);
1525                                 skb_reserve(skb, pad);
1526                                 skb_put(skb, len);
1527
1528                         } else {
1529                                 DP(NETIF_MSG_RX_ERR,
1530                                    "ERROR  packet dropped because "
1531                                    "of alloc failure\n");
1532                                 bp->eth_stats.rx_skb_alloc_failed++;
1533 reuse_rx:
1534                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535                                 goto next_rx;
1536                         }
1537
1538                         skb->protocol = eth_type_trans(skb, bp->dev);
1539
1540                         skb->ip_summed = CHECKSUM_NONE;
1541                         if (bp->rx_csum) {
1542                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1544                                 else
1545                                         bp->eth_stats.hw_csum_err++;
1546                         }
1547                 }
1548
1549 #ifdef BCM_VLAN
1550                 if ((bp->vlgrp != NULL) &&
1551                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552                      PARSING_FLAGS_VLAN))
1553                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555                 else
1556 #endif
1557                         netif_receive_skb(skb);
1558
1559                 bp->dev->last_rx = jiffies;
1560
1561 next_rx:
1562                 rx_buf->skb = NULL;
1563
1564                 bd_cons = NEXT_RX_IDX(bd_cons);
1565                 bd_prod = NEXT_RX_IDX(bd_prod);
1566                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1567                 rx_pkt++;
1568 next_cqe:
1569                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1570                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1571
1572                 if (rx_pkt == budget)
1573                         break;
1574         } /* while */
1575
1576         fp->rx_bd_cons = bd_cons;
1577         fp->rx_bd_prod = bd_prod_fw;
1578         fp->rx_comp_cons = sw_comp_cons;
1579         fp->rx_comp_prod = sw_comp_prod;
1580
1581         /* Update producers */
1582         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1583                              fp->rx_sge_prod);
1584         mmiowb(); /* keep prod updates ordered */
1585
1586         fp->rx_pkt += rx_pkt;
1587         fp->rx_calls++;
1588
1589         return rx_pkt;
1590 }
1591
1592 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1593 {
1594         struct bnx2x_fastpath *fp = fp_cookie;
1595         struct bnx2x *bp = fp->bp;
1596         struct net_device *dev = bp->dev;
1597         int index = FP_IDX(fp);
1598
1599         /* Return here if interrupt is disabled */
1600         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602                 return IRQ_HANDLED;
1603         }
1604
1605         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1606            index, FP_SB_ID(fp));
1607         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1608
1609 #ifdef BNX2X_STOP_ON_ERROR
1610         if (unlikely(bp->panic))
1611                 return IRQ_HANDLED;
1612 #endif
1613
1614         prefetch(fp->rx_cons_sb);
1615         prefetch(fp->tx_cons_sb);
1616         prefetch(&fp->status_blk->c_status_block.status_block_index);
1617         prefetch(&fp->status_blk->u_status_block.status_block_index);
1618
1619         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1620
1621         return IRQ_HANDLED;
1622 }
1623
1624 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1625 {
1626         struct net_device *dev = dev_instance;
1627         struct bnx2x *bp = netdev_priv(dev);
1628         u16 status = bnx2x_ack_int(bp);
1629         u16 mask;
1630
1631         /* Return here if interrupt is shared and it's not for us */
1632         if (unlikely(status == 0)) {
1633                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1634                 return IRQ_NONE;
1635         }
1636         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1637
1638 #ifdef BNX2X_STOP_ON_ERROR
1639         if (unlikely(bp->panic))
1640                 return IRQ_HANDLED;
1641 #endif
1642
1643         /* Return here if interrupt is disabled */
1644         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1645                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1646                 return IRQ_HANDLED;
1647         }
1648
1649         mask = 0x2 << bp->fp[0].sb_id;
1650         if (status & mask) {
1651                 struct bnx2x_fastpath *fp = &bp->fp[0];
1652
1653                 prefetch(fp->rx_cons_sb);
1654                 prefetch(fp->tx_cons_sb);
1655                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
1658                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1659
1660                 status &= ~mask;
1661         }
1662
1663
1664         if (unlikely(status & 0x1)) {
1665                 schedule_work(&bp->sp_task);
1666
1667                 status &= ~0x1;
1668                 if (!status)
1669                         return IRQ_HANDLED;
1670         }
1671
1672         if (status)
1673                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1674                    status);
1675
1676         return IRQ_HANDLED;
1677 }
1678
1679 /* end of fast path */
1680
1681 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1682
1683 /* Link */
1684
1685 /*
1686  * General service functions
1687  */
1688
1689 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1690 {
1691         u32 lock_status;
1692         u32 resource_bit = (1 << resource);
1693         int func = BP_FUNC(bp);
1694         u32 hw_lock_control_reg;
1695         int cnt;
1696
1697         /* Validating that the resource is within range */
1698         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1699                 DP(NETIF_MSG_HW,
1700                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1701                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1702                 return -EINVAL;
1703         }
1704
1705         if (func <= 5) {
1706                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707         } else {
1708                 hw_lock_control_reg =
1709                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1710         }
1711
1712         /* Validating that the resource is not already taken */
1713         lock_status = REG_RD(bp, hw_lock_control_reg);
1714         if (lock_status & resource_bit) {
1715                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1716                    lock_status, resource_bit);
1717                 return -EEXIST;
1718         }
1719
1720         /* Try for 1 second every 5ms */
1721         for (cnt = 0; cnt < 200; cnt++) {
1722                 /* Try to acquire the lock */
1723                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724                 lock_status = REG_RD(bp, hw_lock_control_reg);
1725                 if (lock_status & resource_bit)
1726                         return 0;
1727
1728                 msleep(5);
1729         }
1730         DP(NETIF_MSG_HW, "Timeout\n");
1731         return -EAGAIN;
1732 }
1733
1734 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1735 {
1736         u32 lock_status;
1737         u32 resource_bit = (1 << resource);
1738         int func = BP_FUNC(bp);
1739         u32 hw_lock_control_reg;
1740
1741         /* Validating that the resource is within range */
1742         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1743                 DP(NETIF_MSG_HW,
1744                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1746                 return -EINVAL;
1747         }
1748
1749         if (func <= 5) {
1750                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751         } else {
1752                 hw_lock_control_reg =
1753                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754         }
1755
1756         /* Validating that the resource is currently taken */
1757         lock_status = REG_RD(bp, hw_lock_control_reg);
1758         if (!(lock_status & resource_bit)) {
1759                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1760                    lock_status, resource_bit);
1761                 return -EFAULT;
1762         }
1763
1764         REG_WR(bp, hw_lock_control_reg, resource_bit);
1765         return 0;
1766 }
1767
1768 /* HW Lock for shared dual port PHYs */
1769 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1770 {
1771         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1772
1773         mutex_lock(&bp->port.phy_mutex);
1774
1775         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1778 }
1779
1780 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1781 {
1782         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1783
1784         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1787
1788         mutex_unlock(&bp->port.phy_mutex);
1789 }
1790
1791 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1792 {
1793         /* The GPIO should be swapped if swap register is set and active */
1794         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1795                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
1796         int gpio_shift = gpio_num +
1797                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798         u32 gpio_mask = (1 << gpio_shift);
1799         u32 gpio_reg;
1800
1801         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1803                 return -EINVAL;
1804         }
1805
1806         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807         /* read GPIO and mask except the float bits */
1808         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1809
1810         switch (mode) {
1811         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813                    gpio_num, gpio_shift);
1814                 /* clear FLOAT and set CLR */
1815                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1817                 break;
1818
1819         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821                    gpio_num, gpio_shift);
1822                 /* clear FLOAT and set SET */
1823                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1825                 break;
1826
1827         case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1828                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829                    gpio_num, gpio_shift);
1830                 /* set FLOAT */
1831                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1832                 break;
1833
1834         default:
1835                 break;
1836         }
1837
1838         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1840
1841         return 0;
1842 }
1843
1844 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1845 {
1846         u32 spio_mask = (1 << spio_num);
1847         u32 spio_reg;
1848
1849         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850             (spio_num > MISC_REGISTERS_SPIO_7)) {
1851                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1852                 return -EINVAL;
1853         }
1854
1855         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856         /* read SPIO and mask except the float bits */
1857         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1858
1859         switch (mode) {
1860         case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1861                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862                 /* clear FLOAT and set CLR */
1863                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1865                 break;
1866
1867         case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1868                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869                 /* clear FLOAT and set SET */
1870                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1872                 break;
1873
1874         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1876                 /* set FLOAT */
1877                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878                 break;
1879
1880         default:
1881                 break;
1882         }
1883
1884         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1886
1887         return 0;
1888 }
1889
1890 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1891 {
1892         switch (bp->link_vars.ieee_fc) {
1893         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1894                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1895                                           ADVERTISED_Pause);
1896                 break;
1897         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1898                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1899                                          ADVERTISED_Pause);
1900                 break;
1901         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1902                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1903                 break;
1904         default:
1905                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1906                                           ADVERTISED_Pause);
1907                 break;
1908         }
1909 }
1910
1911 static void bnx2x_link_report(struct bnx2x *bp)
1912 {
1913         if (bp->link_vars.link_up) {
1914                 if (bp->state == BNX2X_STATE_OPEN)
1915                         netif_carrier_on(bp->dev);
1916                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1917
1918                 printk("%d Mbps ", bp->link_vars.line_speed);
1919
1920                 if (bp->link_vars.duplex == DUPLEX_FULL)
1921                         printk("full duplex");
1922                 else
1923                         printk("half duplex");
1924
1925                 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926                         if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927                                 printk(", receive ");
1928                                 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929                                         printk("& transmit ");
1930                         } else {
1931                                 printk(", transmit ");
1932                         }
1933                         printk("flow control ON");
1934                 }
1935                 printk("\n");
1936
1937         } else { /* link_down */
1938                 netif_carrier_off(bp->dev);
1939                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1940         }
1941 }
1942
1943 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1944 {
1945         if (!BP_NOMCP(bp)) {
1946                 u8 rc;
1947
1948                 /* Initialize link parameters structure variables */
1949                 bp->link_params.mtu = bp->dev->mtu;
1950
1951                 bnx2x_acquire_phy_lock(bp);
1952                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1953                 bnx2x_release_phy_lock(bp);
1954
1955                 if (bp->link_vars.link_up)
1956                         bnx2x_link_report(bp);
1957
1958                 bnx2x_calc_fc_adv(bp);
1959
1960                 return rc;
1961         }
1962         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1963         return -EINVAL;
1964 }
1965
1966 static void bnx2x_link_set(struct bnx2x *bp)
1967 {
1968         if (!BP_NOMCP(bp)) {
1969                 bnx2x_acquire_phy_lock(bp);
1970                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1971                 bnx2x_release_phy_lock(bp);
1972
1973                 bnx2x_calc_fc_adv(bp);
1974         } else
1975                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1976 }
1977
1978 static void bnx2x__link_reset(struct bnx2x *bp)
1979 {
1980         if (!BP_NOMCP(bp)) {
1981                 bnx2x_acquire_phy_lock(bp);
1982                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1983                 bnx2x_release_phy_lock(bp);
1984         } else
1985                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1986 }
1987
1988 static u8 bnx2x_link_test(struct bnx2x *bp)
1989 {
1990         u8 rc;
1991
1992         bnx2x_acquire_phy_lock(bp);
1993         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1994         bnx2x_release_phy_lock(bp);
1995
1996         return rc;
1997 }
1998
1999 /* Calculates the sum of vn_min_rates.
2000    It's needed for further normalizing of the min_rates.
2001
2002    Returns:
2003      sum of vn_min_rates
2004        or
2005      0 - if all the min_rates are 0.
2006      In the later case fainess algorithm should be deactivated.
2007      If not all min_rates are zero then those that are zeroes will
2008      be set to 1.
2009  */
2010 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2011 {
2012         int i, port = BP_PORT(bp);
2013         u32 wsum = 0;
2014         int all_zero = 1;
2015
2016         for (i = 0; i < E1HVN_MAX; i++) {
2017                 u32 vn_cfg =
2018                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2019                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2020                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2021                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2022                         /* If min rate is zero - set it to 1 */
2023                         if (!vn_min_rate)
2024                                 vn_min_rate = DEF_MIN_RATE;
2025                         else
2026                                 all_zero = 0;
2027
2028                         wsum += vn_min_rate;
2029                 }
2030         }
2031
2032         /* ... only if all min rates are zeros - disable FAIRNESS */
2033         if (all_zero)
2034                 return 0;
2035
2036         return wsum;
2037 }
2038
2039 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2040                                    int en_fness,
2041                                    u16 port_rate,
2042                                    struct cmng_struct_per_port *m_cmng_port)
2043 {
2044         u32 r_param = port_rate / 8;
2045         int port = BP_PORT(bp);
2046         int i;
2047
2048         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2049
2050         /* Enable minmax only if we are in e1hmf mode */
2051         if (IS_E1HMF(bp)) {
2052                 u32 fair_periodic_timeout_usec;
2053                 u32 t_fair;
2054
2055                 /* Enable rate shaping and fairness */
2056                 m_cmng_port->flags.cmng_vn_enable = 1;
2057                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2058                 m_cmng_port->flags.rate_shaping_enable = 1;
2059
2060                 if (!en_fness)
2061                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2062                            "  fairness will be disabled\n");
2063
2064                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2065                 m_cmng_port->rs_vars.rs_periodic_timeout =
2066                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2067
2068                 /* this is the threshold below which no timer arming will occur
2069                    1.25 coefficient is for the threshold to be a little bigger
2070                    than the real time, to compensate for timer in-accuracy */
2071                 m_cmng_port->rs_vars.rs_threshold =
2072                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2073
2074                 /* resolution of fairness timer */
2075                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2076                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2077                 t_fair = T_FAIR_COEF / port_rate;
2078
2079                 /* this is the threshold below which we won't arm
2080                    the timer anymore */
2081                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2082
2083                 /* we multiply by 1e3/8 to get bytes/msec.
2084                    We don't want the credits to pass a credit
2085                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2086                 m_cmng_port->fair_vars.upper_bound =
2087                                                 r_param * t_fair * FAIR_MEM;
2088                 /* since each tick is 4 usec */
2089                 m_cmng_port->fair_vars.fairness_timeout =
2090                                                 fair_periodic_timeout_usec / 4;
2091
2092         } else {
2093                 /* Disable rate shaping and fairness */
2094                 m_cmng_port->flags.cmng_vn_enable = 0;
2095                 m_cmng_port->flags.fairness_enable = 0;
2096                 m_cmng_port->flags.rate_shaping_enable = 0;
2097
2098                 DP(NETIF_MSG_IFUP,
2099                    "Single function mode  minmax will be disabled\n");
2100         }
2101
2102         /* Store it to internal memory */
2103         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2104                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2105                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2106                        ((u32 *)(m_cmng_port))[i]);
2107 }
2108
2109 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2110                                    u32 wsum, u16 port_rate,
2111                                  struct cmng_struct_per_port *m_cmng_port)
2112 {
2113         struct rate_shaping_vars_per_vn m_rs_vn;
2114         struct fairness_vars_per_vn m_fair_vn;
2115         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2116         u16 vn_min_rate, vn_max_rate;
2117         int i;
2118
2119         /* If function is hidden - set min and max to zeroes */
2120         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2121                 vn_min_rate = 0;
2122                 vn_max_rate = 0;
2123
2124         } else {
2125                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2126                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2127                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2128                    if current min rate is zero - set it to 1.
2129                    This is a requirment of the algorithm. */
2130                 if ((vn_min_rate == 0) && wsum)
2131                         vn_min_rate = DEF_MIN_RATE;
2132                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2133                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2134         }
2135
2136         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2137            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2138
2139         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2140         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2141
2142         /* global vn counter - maximal Mbps for this vn */
2143         m_rs_vn.vn_counter.rate = vn_max_rate;
2144
2145         /* quota - number of bytes transmitted in this period */
2146         m_rs_vn.vn_counter.quota =
2147                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2148
2149 #ifdef BNX2X_PER_PROT_QOS
2150         /* per protocol counter */
2151         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2152                 /* maximal Mbps for this protocol */
2153                 m_rs_vn.protocol_counters[protocol].rate =
2154                                                 protocol_max_rate[protocol];
2155                 /* the quota in each timer period -
2156                    number of bytes transmitted in this period */
2157                 m_rs_vn.protocol_counters[protocol].quota =
2158                         (u32)(rs_periodic_timeout_usec *
2159                           ((double)m_rs_vn.
2160                                    protocol_counters[protocol].rate/8));
2161         }
2162 #endif
2163
2164         if (wsum) {
2165                 /* credit for each period of the fairness algorithm:
2166                    number of bytes in T_FAIR (the vn share the port rate).
2167                    wsum should not be larger than 10000, thus
2168                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2169                 m_fair_vn.vn_credit_delta =
2170                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2171                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2172                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2173                    m_fair_vn.vn_credit_delta);
2174         }
2175
2176 #ifdef BNX2X_PER_PROT_QOS
2177         do {
2178                 u32 protocolWeightSum = 0;
2179
2180                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2181                         protocolWeightSum +=
2182                                         drvInit.protocol_min_rate[protocol];
2183                 /* per protocol counter -
2184                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2185                 if (protocolWeightSum > 0) {
2186                         for (protocol = 0;
2187                              protocol < NUM_OF_PROTOCOLS; protocol++)
2188                                 /* credit for each period of the
2189                                    fairness algorithm - number of bytes in
2190                                    T_FAIR (the protocol share the vn rate) */
2191                                 m_fair_vn.protocol_credit_delta[protocol] =
2192                                         (u32)((vn_min_rate / 8) * t_fair *
2193                                         protocol_min_rate / protocolWeightSum);
2194                 }
2195         } while (0);
2196 #endif
2197
2198         /* Store it to internal memory */
2199         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2200                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2201                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2202                        ((u32 *)(&m_rs_vn))[i]);
2203
2204         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2205                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2206                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2207                        ((u32 *)(&m_fair_vn))[i]);
2208 }
2209
2210 /* This function is called upon link interrupt */
2211 static void bnx2x_link_attn(struct bnx2x *bp)
2212 {
2213         int vn;
2214
2215         /* Make sure that we are synced with the current statistics */
2216         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2217
2218         bnx2x_acquire_phy_lock(bp);
2219         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2220         bnx2x_release_phy_lock(bp);
2221
2222         if (bp->link_vars.link_up) {
2223
2224                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2225                         struct host_port_stats *pstats;
2226
2227                         pstats = bnx2x_sp(bp, port_stats);
2228                         /* reset old bmac stats */
2229                         memset(&(pstats->mac_stx[0]), 0,
2230                                sizeof(struct mac_stx));
2231                 }
2232                 if ((bp->state == BNX2X_STATE_OPEN) ||
2233                     (bp->state == BNX2X_STATE_DISABLED))
2234                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2235         }
2236
2237         /* indicate link status */
2238         bnx2x_link_report(bp);
2239
2240         if (IS_E1HMF(bp)) {
2241                 int func;
2242
2243                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2244                         if (vn == BP_E1HVN(bp))
2245                                 continue;
2246
2247                         func = ((vn << 1) | BP_PORT(bp));
2248
2249                         /* Set the attention towards other drivers
2250                            on the same port */
2251                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2252                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2253                 }
2254         }
2255
2256         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2257                 struct cmng_struct_per_port m_cmng_port;
2258                 u32 wsum;
2259                 int port = BP_PORT(bp);
2260
2261                 /* Init RATE SHAPING and FAIRNESS contexts */
2262                 wsum = bnx2x_calc_vn_wsum(bp);
2263                 bnx2x_init_port_minmax(bp, (int)wsum,
2264                                         bp->link_vars.line_speed,
2265                                         &m_cmng_port);
2266                 if (IS_E1HMF(bp))
2267                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2268                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2269                                         wsum, bp->link_vars.line_speed,
2270                                                      &m_cmng_port);
2271         }
2272 }
2273
2274 static void bnx2x__link_status_update(struct bnx2x *bp)
2275 {
2276         if (bp->state != BNX2X_STATE_OPEN)
2277                 return;
2278
2279         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2280
2281         if (bp->link_vars.link_up)
2282                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2283         else
2284                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2285
2286         /* indicate link status */
2287         bnx2x_link_report(bp);
2288 }
2289
2290 static void bnx2x_pmf_update(struct bnx2x *bp)
2291 {
2292         int port = BP_PORT(bp);
2293         u32 val;
2294
2295         bp->port.pmf = 1;
2296         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2297
2298         /* enable nig attention */
2299         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2300         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2301         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2302
2303         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2304 }
2305
2306 /* end of Link */
2307
2308 /* slow path */
2309
2310 /*
2311  * General service functions
2312  */
2313
2314 /* the slow path queue is odd since completions arrive on the fastpath ring */
2315 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2316                          u32 data_hi, u32 data_lo, int common)
2317 {
2318         int func = BP_FUNC(bp);
2319
2320         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2321            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2322            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2323            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2324            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2325
2326 #ifdef BNX2X_STOP_ON_ERROR
2327         if (unlikely(bp->panic))
2328                 return -EIO;
2329 #endif
2330
2331         spin_lock_bh(&bp->spq_lock);
2332
2333         if (!bp->spq_left) {
2334                 BNX2X_ERR("BUG! SPQ ring full!\n");
2335                 spin_unlock_bh(&bp->spq_lock);
2336                 bnx2x_panic();
2337                 return -EBUSY;
2338         }
2339
2340         /* CID needs port number to be encoded int it */
2341         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2342                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2343                                      HW_CID(bp, cid)));
2344         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2345         if (common)
2346                 bp->spq_prod_bd->hdr.type |=
2347                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2348
2349         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2350         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2351
2352         bp->spq_left--;
2353
2354         if (bp->spq_prod_bd == bp->spq_last_bd) {
2355                 bp->spq_prod_bd = bp->spq;
2356                 bp->spq_prod_idx = 0;
2357                 DP(NETIF_MSG_TIMER, "end of spq\n");
2358
2359         } else {
2360                 bp->spq_prod_bd++;
2361                 bp->spq_prod_idx++;
2362         }
2363
2364         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2365                bp->spq_prod_idx);
2366
2367         spin_unlock_bh(&bp->spq_lock);
2368         return 0;
2369 }
2370
2371 /* acquire split MCP access lock register */
2372 static int bnx2x_acquire_alr(struct bnx2x *bp)
2373 {
2374         u32 i, j, val;
2375         int rc = 0;
2376
2377         might_sleep();
2378         i = 100;
2379         for (j = 0; j < i*10; j++) {
2380                 val = (1UL << 31);
2381                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2382                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2383                 if (val & (1L << 31))
2384                         break;
2385
2386                 msleep(5);
2387         }
2388         if (!(val & (1L << 31))) {
2389                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2390                 rc = -EBUSY;
2391         }
2392
2393         return rc;
2394 }
2395
2396 /* release split MCP access lock register */
2397 static void bnx2x_release_alr(struct bnx2x *bp)
2398 {
2399         u32 val = 0;
2400
2401         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2402 }
2403
2404 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2405 {
2406         struct host_def_status_block *def_sb = bp->def_status_blk;
2407         u16 rc = 0;
2408
2409         barrier(); /* status block is written to by the chip */
2410         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2411                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2412                 rc |= 1;
2413         }
2414         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2415                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2416                 rc |= 2;
2417         }
2418         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2419                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2420                 rc |= 4;
2421         }
2422         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2423                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2424                 rc |= 8;
2425         }
2426         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2427                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2428                 rc |= 16;
2429         }
2430         return rc;
2431 }
2432
2433 /*
2434  * slow path service functions
2435  */
2436
2437 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2438 {
2439         int port = BP_PORT(bp);
2440         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2441                        COMMAND_REG_ATTN_BITS_SET);
2442         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2443                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2444         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2445                                        NIG_REG_MASK_INTERRUPT_PORT0;
2446         u32 aeu_mask;
2447
2448         if (bp->attn_state & asserted)
2449                 BNX2X_ERR("IGU ERROR\n");
2450
2451         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2452         aeu_mask = REG_RD(bp, aeu_addr);
2453
2454         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2455            aeu_mask, asserted);
2456         aeu_mask &= ~(asserted & 0xff);
2457         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2458
2459         REG_WR(bp, aeu_addr, aeu_mask);
2460         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2461
2462         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2463         bp->attn_state |= asserted;
2464         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2465
2466         if (asserted & ATTN_HARD_WIRED_MASK) {
2467                 if (asserted & ATTN_NIG_FOR_FUNC) {
2468
2469                         /* save nig interrupt mask */
2470                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2471                         REG_WR(bp, nig_int_mask_addr, 0);
2472
2473                         bnx2x_link_attn(bp);
2474
2475                         /* handle unicore attn? */
2476                 }
2477                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2478                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2479
2480                 if (asserted & GPIO_2_FUNC)
2481                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2482
2483                 if (asserted & GPIO_3_FUNC)
2484                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2485
2486                 if (asserted & GPIO_4_FUNC)
2487                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2488
2489                 if (port == 0) {
2490                         if (asserted & ATTN_GENERAL_ATTN_1) {
2491                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2492                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2493                         }
2494                         if (asserted & ATTN_GENERAL_ATTN_2) {
2495                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2496                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2497                         }
2498                         if (asserted & ATTN_GENERAL_ATTN_3) {
2499                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2500                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2501                         }
2502                 } else {
2503                         if (asserted & ATTN_GENERAL_ATTN_4) {
2504                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2505                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2506                         }
2507                         if (asserted & ATTN_GENERAL_ATTN_5) {
2508                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2509                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2510                         }
2511                         if (asserted & ATTN_GENERAL_ATTN_6) {
2512                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2513                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2514                         }
2515                 }
2516
2517         } /* if hardwired */
2518
2519         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2520            asserted, hc_addr);
2521         REG_WR(bp, hc_addr, asserted);
2522
2523         /* now set back the mask */
2524         if (asserted & ATTN_NIG_FOR_FUNC)
2525                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2526 }
2527
2528 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2529 {
2530         int port = BP_PORT(bp);
2531         int reg_offset;
2532         u32 val;
2533
2534         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2535                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2536
2537         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2538
2539                 val = REG_RD(bp, reg_offset);
2540                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2541                 REG_WR(bp, reg_offset, val);
2542
2543                 BNX2X_ERR("SPIO5 hw attention\n");
2544
2545                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2546                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2547                         /* Fan failure attention */
2548
2549                         /* The PHY reset is controled by GPIO 1 */
2550                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2551                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
2552                         /* Low power mode is controled by GPIO 2 */
2553                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2554                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
2555                         /* mark the failure */
2556                         bp->link_params.ext_phy_config &=
2557                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2558                         bp->link_params.ext_phy_config |=
2559                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2560                         SHMEM_WR(bp,
2561                                  dev_info.port_hw_config[port].
2562                                                         external_phy_config,
2563                                  bp->link_params.ext_phy_config);
2564                         /* log the failure */
2565                         printk(KERN_ERR PFX "Fan Failure on Network"
2566                                " Controller %s has caused the driver to"
2567                                " shutdown the card to prevent permanent"
2568                                " damage.  Please contact Dell Support for"
2569                                " assistance\n", bp->dev->name);
2570                         break;
2571
2572                 default:
2573                         break;
2574                 }
2575         }
2576
2577         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2578
2579                 val = REG_RD(bp, reg_offset);
2580                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2581                 REG_WR(bp, reg_offset, val);
2582
2583                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2584                           (attn & HW_INTERRUT_ASSERT_SET_0));
2585                 bnx2x_panic();
2586         }
2587 }
2588
2589 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2590 {
2591         u32 val;
2592
2593         if (attn & BNX2X_DOORQ_ASSERT) {
2594
2595                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2596                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2597                 /* DORQ discard attention */
2598                 if (val & 0x2)
2599                         BNX2X_ERR("FATAL error from DORQ\n");
2600         }
2601
2602         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2603
2604                 int port = BP_PORT(bp);
2605                 int reg_offset;
2606
2607                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2608                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2609
2610                 val = REG_RD(bp, reg_offset);
2611                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2612                 REG_WR(bp, reg_offset, val);
2613
2614                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2615                           (attn & HW_INTERRUT_ASSERT_SET_1));
2616                 bnx2x_panic();
2617         }
2618 }
2619
2620 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2621 {
2622         u32 val;
2623
2624         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2625
2626                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2627                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2628                 /* CFC error attention */
2629                 if (val & 0x2)
2630                         BNX2X_ERR("FATAL error from CFC\n");
2631         }
2632
2633         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2634
2635                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2636                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2637                 /* RQ_USDMDP_FIFO_OVERFLOW */
2638                 if (val & 0x18000)
2639                         BNX2X_ERR("FATAL error from PXP\n");
2640         }
2641
2642         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2643
2644                 int port = BP_PORT(bp);
2645                 int reg_offset;
2646
2647                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2648                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2649
2650                 val = REG_RD(bp, reg_offset);
2651                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2652                 REG_WR(bp, reg_offset, val);
2653
2654                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2655                           (attn & HW_INTERRUT_ASSERT_SET_2));
2656                 bnx2x_panic();
2657         }
2658 }
2659
2660 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2661 {
2662         u32 val;
2663
2664         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2665
2666                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2667                         int func = BP_FUNC(bp);
2668
2669                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2670                         bnx2x__link_status_update(bp);
2671                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2672                                                         DRV_STATUS_PMF)
2673                                 bnx2x_pmf_update(bp);
2674
2675                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2676
2677                         BNX2X_ERR("MC assert!\n");
2678                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2679                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2680                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2681                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2682                         bnx2x_panic();
2683
2684                 } else if (attn & BNX2X_MCP_ASSERT) {
2685
2686                         BNX2X_ERR("MCP assert!\n");
2687                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2688                         bnx2x_fw_dump(bp);
2689
2690                 } else
2691                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2692         }
2693
2694         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2695                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2696                 if (attn & BNX2X_GRC_TIMEOUT) {
2697                         val = CHIP_IS_E1H(bp) ?
2698                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2699                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2700                 }
2701                 if (attn & BNX2X_GRC_RSV) {
2702                         val = CHIP_IS_E1H(bp) ?
2703                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2704                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2705                 }
2706                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2707         }
2708 }
2709
2710 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2711 {
2712         struct attn_route attn;
2713         struct attn_route group_mask;
2714         int port = BP_PORT(bp);
2715         int index;
2716         u32 reg_addr;
2717         u32 val;
2718         u32 aeu_mask;
2719
2720         /* need to take HW lock because MCP or other port might also
2721            try to handle this event */
2722         bnx2x_acquire_alr(bp);
2723
2724         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2725         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2726         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2727         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2728         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2729            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2730
2731         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2732                 if (deasserted & (1 << index)) {
2733                         group_mask = bp->attn_group[index];
2734
2735                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2736                            index, group_mask.sig[0], group_mask.sig[1],
2737                            group_mask.sig[2], group_mask.sig[3]);
2738
2739                         bnx2x_attn_int_deasserted3(bp,
2740                                         attn.sig[3] & group_mask.sig[3]);
2741                         bnx2x_attn_int_deasserted1(bp,
2742                                         attn.sig[1] & group_mask.sig[1]);
2743                         bnx2x_attn_int_deasserted2(bp,
2744                                         attn.sig[2] & group_mask.sig[2]);
2745                         bnx2x_attn_int_deasserted0(bp,
2746                                         attn.sig[0] & group_mask.sig[0]);
2747
2748                         if ((attn.sig[0] & group_mask.sig[0] &
2749                                                 HW_PRTY_ASSERT_SET_0) ||
2750                             (attn.sig[1] & group_mask.sig[1] &
2751                                                 HW_PRTY_ASSERT_SET_1) ||
2752                             (attn.sig[2] & group_mask.sig[2] &
2753                                                 HW_PRTY_ASSERT_SET_2))
2754                                BNX2X_ERR("FATAL HW block parity attention\n");
2755                 }
2756         }
2757
2758         bnx2x_release_alr(bp);
2759
2760         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2761
2762         val = ~deasserted;
2763         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2764            val, reg_addr);
2765         REG_WR(bp, reg_addr, val);
2766
2767         if (~bp->attn_state & deasserted)
2768                 BNX2X_ERR("IGU ERROR\n");
2769
2770         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2771                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2772
2773         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2774         aeu_mask = REG_RD(bp, reg_addr);
2775
2776         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2777            aeu_mask, deasserted);
2778         aeu_mask |= (deasserted & 0xff);
2779         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2780
2781         REG_WR(bp, reg_addr, aeu_mask);
2782         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2783
2784         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2785         bp->attn_state &= ~deasserted;
2786         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2787 }
2788
2789 static void bnx2x_attn_int(struct bnx2x *bp)
2790 {
2791         /* read local copy of bits */
2792         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2793         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2794         u32 attn_state = bp->attn_state;
2795
2796         /* look for changed bits */
2797         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2798         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2799
2800         DP(NETIF_MSG_HW,
2801            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2802            attn_bits, attn_ack, asserted, deasserted);
2803
2804         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2805                 BNX2X_ERR("BAD attention state\n");
2806
2807         /* handle bits that were raised */
2808         if (asserted)
2809                 bnx2x_attn_int_asserted(bp, asserted);
2810
2811         if (deasserted)
2812                 bnx2x_attn_int_deasserted(bp, deasserted);
2813 }
2814
2815 static void bnx2x_sp_task(struct work_struct *work)
2816 {
2817         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2818         u16 status;
2819
2820
2821         /* Return here if interrupt is disabled */
2822         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2823                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2824                 return;
2825         }
2826
2827         status = bnx2x_update_dsb_idx(bp);
2828 /*      if (status == 0)                                     */
2829 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2830
2831         DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
2832
2833         /* HW attentions */
2834         if (status & 0x1)
2835                 bnx2x_attn_int(bp);
2836
2837         /* CStorm events: query_stats, port delete ramrod */
2838         if (status & 0x2)
2839                 bp->stats_pending = 0;
2840
2841         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2842                      IGU_INT_NOP, 1);
2843         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2844                      IGU_INT_NOP, 1);
2845         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2846                      IGU_INT_NOP, 1);
2847         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2848                      IGU_INT_NOP, 1);
2849         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2850                      IGU_INT_ENABLE, 1);
2851
2852 }
2853
2854 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2855 {
2856         struct net_device *dev = dev_instance;
2857         struct bnx2x *bp = netdev_priv(dev);
2858
2859         /* Return here if interrupt is disabled */
2860         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2861                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2862                 return IRQ_HANDLED;
2863         }
2864
2865         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2866
2867 #ifdef BNX2X_STOP_ON_ERROR
2868         if (unlikely(bp->panic))
2869                 return IRQ_HANDLED;
2870 #endif
2871
2872         schedule_work(&bp->sp_task);
2873
2874         return IRQ_HANDLED;
2875 }
2876
2877 /* end of slow path */
2878
2879 /* Statistics */
2880
2881 /****************************************************************************
2882 * Macros
2883 ****************************************************************************/
2884
2885 /* sum[hi:lo] += add[hi:lo] */
2886 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2887         do { \
2888                 s_lo += a_lo; \
2889                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2890         } while (0)
2891
2892 /* difference = minuend - subtrahend */
2893 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2894         do { \
2895                 if (m_lo < s_lo) { \
2896                         /* underflow */ \
2897                         d_hi = m_hi - s_hi; \
2898                         if (d_hi > 0) { \
2899                         /* we can 'loan' 1 */ \
2900                                 d_hi--; \
2901                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2902                         } else { \
2903                         /* m_hi <= s_hi */ \
2904                                 d_hi = 0; \
2905                                 d_lo = 0; \
2906                         } \
2907                 } else { \
2908                         /* m_lo >= s_lo */ \
2909                         if (m_hi < s_hi) { \
2910                                 d_hi = 0; \
2911                                 d_lo = 0; \
2912                         } else { \
2913                         /* m_hi >= s_hi */ \
2914                                 d_hi = m_hi - s_hi; \
2915                                 d_lo = m_lo - s_lo; \
2916                         } \
2917                 } \
2918         } while (0)
2919
2920 #define UPDATE_STAT64(s, t) \
2921         do { \
2922                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2923                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2924                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2925                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2926                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2927                        pstats->mac_stx[1].t##_lo, diff.lo); \
2928         } while (0)
2929
2930 #define UPDATE_STAT64_NIG(s, t) \
2931         do { \
2932                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2933                         diff.lo, new->s##_lo, old->s##_lo); \
2934                 ADD_64(estats->t##_hi, diff.hi, \
2935                        estats->t##_lo, diff.lo); \
2936         } while (0)
2937
2938 /* sum[hi:lo] += add */
2939 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2940         do { \
2941                 s_lo += a; \
2942                 s_hi += (s_lo < a) ? 1 : 0; \
2943         } while (0)
2944
2945 #define UPDATE_EXTEND_STAT(s) \
2946         do { \
2947                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2948                               pstats->mac_stx[1].s##_lo, \
2949                               new->s); \
2950         } while (0)
2951
2952 #define UPDATE_EXTEND_TSTAT(s, t) \
2953         do { \
2954                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2955                 old_tclient->s = le32_to_cpu(tclient->s); \
2956                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2957         } while (0)
2958
2959 #define UPDATE_EXTEND_XSTAT(s, t) \
2960         do { \
2961                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2962                 old_xclient->s = le32_to_cpu(xclient->s); \
2963                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2964         } while (0)
2965
2966 /*
2967  * General service functions
2968  */
2969
2970 static inline long bnx2x_hilo(u32 *hiref)
2971 {
2972         u32 lo = *(hiref + 1);
2973 #if (BITS_PER_LONG == 64)
2974         u32 hi = *hiref;
2975
2976         return HILO_U64(hi, lo);
2977 #else
2978         return lo;
2979 #endif
2980 }
2981
2982 /*
2983  * Init service functions
2984  */
2985
2986 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2987 {
2988         if (!bp->stats_pending) {
2989                 struct eth_query_ramrod_data ramrod_data = {0};
2990                 int rc;
2991
2992                 ramrod_data.drv_counter = bp->stats_counter++;
2993                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
2994                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
2995
2996                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2997                                    ((u32 *)&ramrod_data)[1],
2998                                    ((u32 *)&ramrod_data)[0], 0);
2999                 if (rc == 0) {
3000                         /* stats ramrod has it's own slot on the spq */
3001                         bp->spq_left++;
3002                         bp->stats_pending = 1;
3003                 }
3004         }
3005 }
3006
3007 static void bnx2x_stats_init(struct bnx2x *bp)
3008 {
3009         int port = BP_PORT(bp);
3010
3011         bp->executer_idx = 0;
3012         bp->stats_counter = 0;
3013
3014         /* port stats */
3015         if (!BP_NOMCP(bp))
3016                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3017         else
3018                 bp->port.port_stx = 0;
3019         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3020
3021         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3022         bp->port.old_nig_stats.brb_discard =
3023                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3024         bp->port.old_nig_stats.brb_truncate =
3025                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3026         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3027                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3028         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3029                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3030
3031         /* function stats */
3032         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3033         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3034         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3035         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3036
3037         bp->stats_state = STATS_STATE_DISABLED;
3038         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3039                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3040 }
3041
3042 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3043 {
3044         struct dmae_command *dmae = &bp->stats_dmae;
3045         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3046
3047         *stats_comp = DMAE_COMP_VAL;
3048
3049         /* loader */
3050         if (bp->executer_idx) {
3051                 int loader_idx = PMF_DMAE_C(bp);
3052
3053                 memset(dmae, 0, sizeof(struct dmae_command));
3054
3055                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3056                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3057                                 DMAE_CMD_DST_RESET |
3058 #ifdef __BIG_ENDIAN
3059                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3060 #else
3061                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3062 #endif
3063                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3064                                                DMAE_CMD_PORT_0) |
3065                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3066                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3067                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3068                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3069                                      sizeof(struct dmae_command) *
3070                                      (loader_idx + 1)) >> 2;
3071                 dmae->dst_addr_hi = 0;
3072                 dmae->len = sizeof(struct dmae_command) >> 2;
3073                 if (CHIP_IS_E1(bp))
3074                         dmae->len--;
3075                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3076                 dmae->comp_addr_hi = 0;
3077                 dmae->comp_val = 1;
3078
3079                 *stats_comp = 0;
3080                 bnx2x_post_dmae(bp, dmae, loader_idx);
3081
3082         } else if (bp->func_stx) {
3083                 *stats_comp = 0;
3084                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3085         }
3086 }
3087
3088 static int bnx2x_stats_comp(struct bnx2x *bp)
3089 {
3090         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3091         int cnt = 10;
3092
3093         might_sleep();
3094         while (*stats_comp != DMAE_COMP_VAL) {
3095                 if (!cnt) {
3096                         BNX2X_ERR("timeout waiting for stats finished\n");
3097                         break;
3098                 }
3099                 cnt--;
3100                 msleep(1);
3101         }
3102         return 1;
3103 }
3104
3105 /*
3106  * Statistics service functions
3107  */
3108
3109 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3110 {
3111         struct dmae_command *dmae;
3112         u32 opcode;
3113         int loader_idx = PMF_DMAE_C(bp);
3114         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3115
3116         /* sanity */
3117         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3118                 BNX2X_ERR("BUG!\n");
3119                 return;
3120         }
3121
3122         bp->executer_idx = 0;
3123
3124         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3125                   DMAE_CMD_C_ENABLE |
3126                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3127 #ifdef __BIG_ENDIAN
3128                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3129 #else
3130                   DMAE_CMD_ENDIANITY_DW_SWAP |
3131 #endif
3132                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3133                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3134
3135         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3136         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3137         dmae->src_addr_lo = bp->port.port_stx >> 2;
3138         dmae->src_addr_hi = 0;
3139         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3140         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3141         dmae->len = DMAE_LEN32_RD_MAX;
3142         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3143         dmae->comp_addr_hi = 0;
3144         dmae->comp_val = 1;
3145
3146         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3147         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3148         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3149         dmae->src_addr_hi = 0;
3150         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3151                                    DMAE_LEN32_RD_MAX * 4);
3152         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3153                                    DMAE_LEN32_RD_MAX * 4);
3154         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3155         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3156         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3157         dmae->comp_val = DMAE_COMP_VAL;
3158
3159         *stats_comp = 0;
3160         bnx2x_hw_stats_post(bp);
3161         bnx2x_stats_comp(bp);
3162 }
3163
3164 static void bnx2x_port_stats_init(struct bnx2x *bp)
3165 {
3166         struct dmae_command *dmae;
3167         int port = BP_PORT(bp);
3168         int vn = BP_E1HVN(bp);
3169         u32 opcode;
3170         int loader_idx = PMF_DMAE_C(bp);
3171         u32 mac_addr;
3172         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3173
3174         /* sanity */
3175         if (!bp->link_vars.link_up || !bp->port.pmf) {
3176                 BNX2X_ERR("BUG!\n");
3177                 return;
3178         }
3179
3180         bp->executer_idx = 0;
3181
3182         /* MCP */
3183         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3184                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3185                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3186 #ifdef __BIG_ENDIAN
3187                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3188 #else
3189                   DMAE_CMD_ENDIANITY_DW_SWAP |
3190 #endif
3191                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3192                   (vn << DMAE_CMD_E1HVN_SHIFT));
3193
3194         if (bp->port.port_stx) {
3195
3196                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3197                 dmae->opcode = opcode;
3198                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3199                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3200                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3201                 dmae->dst_addr_hi = 0;
3202                 dmae->len = sizeof(struct host_port_stats) >> 2;
3203                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3204                 dmae->comp_addr_hi = 0;
3205                 dmae->comp_val = 1;
3206         }
3207
3208         if (bp->func_stx) {
3209
3210                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3211                 dmae->opcode = opcode;
3212                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3213                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3214                 dmae->dst_addr_lo = bp->func_stx >> 2;
3215                 dmae->dst_addr_hi = 0;
3216                 dmae->len = sizeof(struct host_func_stats) >> 2;
3217                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3218                 dmae->comp_addr_hi = 0;
3219                 dmae->comp_val = 1;
3220         }
3221
3222         /* MAC */
3223         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3224                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3225                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3226 #ifdef __BIG_ENDIAN
3227                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3228 #else
3229                   DMAE_CMD_ENDIANITY_DW_SWAP |
3230 #endif
3231                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3232                   (vn << DMAE_CMD_E1HVN_SHIFT));
3233
3234         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3235
3236                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3237                                    NIG_REG_INGRESS_BMAC0_MEM);
3238
3239                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3240                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3241                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3242                 dmae->opcode = opcode;
3243                 dmae->src_addr_lo = (mac_addr +
3244                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3245                 dmae->src_addr_hi = 0;
3246                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3247                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3248                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3249                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3250                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3251                 dmae->comp_addr_hi = 0;
3252                 dmae->comp_val = 1;
3253
3254                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3255                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3256                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3257                 dmae->opcode = opcode;
3258                 dmae->src_addr_lo = (mac_addr +
3259                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3260                 dmae->src_addr_hi = 0;
3261                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3262                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3263                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3264                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3265                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3266                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3268                 dmae->comp_addr_hi = 0;
3269                 dmae->comp_val = 1;
3270
3271         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3272
3273                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3274
3275                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3276                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3277                 dmae->opcode = opcode;
3278                 dmae->src_addr_lo = (mac_addr +
3279                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3280                 dmae->src_addr_hi = 0;
3281                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3282                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3283                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3284                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285                 dmae->comp_addr_hi = 0;
3286                 dmae->comp_val = 1;
3287
3288                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3289                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3290                 dmae->opcode = opcode;
3291                 dmae->src_addr_lo = (mac_addr +
3292                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3293                 dmae->src_addr_hi = 0;
3294                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3295                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3296                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3297                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3298                 dmae->len = 1;
3299                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3300                 dmae->comp_addr_hi = 0;
3301                 dmae->comp_val = 1;
3302
3303                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3304                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3305                 dmae->opcode = opcode;
3306                 dmae->src_addr_lo = (mac_addr +
3307                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3308                 dmae->src_addr_hi = 0;
3309                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3310                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3311                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3312                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3313                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3314                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3315                 dmae->comp_addr_hi = 0;
3316                 dmae->comp_val = 1;
3317         }
3318
3319         /* NIG */
3320         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3321         dmae->opcode = opcode;
3322         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3323                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3324         dmae->src_addr_hi = 0;
3325         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3326         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3327         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3328         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329         dmae->comp_addr_hi = 0;
3330         dmae->comp_val = 1;
3331
3332         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3333         dmae->opcode = opcode;
3334         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3335                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3336         dmae->src_addr_hi = 0;
3337         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3338                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3339         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3340                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3341         dmae->len = (2*sizeof(u32)) >> 2;
3342         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3343         dmae->comp_addr_hi = 0;
3344         dmae->comp_val = 1;
3345
3346         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3347         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3348                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3349                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3350 #ifdef __BIG_ENDIAN
3351                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3352 #else
3353                         DMAE_CMD_ENDIANITY_DW_SWAP |
3354 #endif
3355                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3356                         (vn << DMAE_CMD_E1HVN_SHIFT));
3357         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3358                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3359         dmae->src_addr_hi = 0;
3360         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3361                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3362         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3363                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3364         dmae->len = (2*sizeof(u32)) >> 2;
3365         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3366         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3367         dmae->comp_val = DMAE_COMP_VAL;
3368
3369         *stats_comp = 0;
3370 }
3371
3372 static void bnx2x_func_stats_init(struct bnx2x *bp)
3373 {
3374         struct dmae_command *dmae = &bp->stats_dmae;
3375         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3376
3377         /* sanity */
3378         if (!bp->func_stx) {
3379                 BNX2X_ERR("BUG!\n");
3380                 return;
3381         }
3382
3383         bp->executer_idx = 0;
3384         memset(dmae, 0, sizeof(struct dmae_command));
3385
3386         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3387                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3388                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3389 #ifdef __BIG_ENDIAN
3390                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3391 #else
3392                         DMAE_CMD_ENDIANITY_DW_SWAP |
3393 #endif
3394                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3395                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3396         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3397         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3398         dmae->dst_addr_lo = bp->func_stx >> 2;
3399         dmae->dst_addr_hi = 0;
3400         dmae->len = sizeof(struct host_func_stats) >> 2;
3401         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3402         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3403         dmae->comp_val = DMAE_COMP_VAL;
3404
3405         *stats_comp = 0;
3406 }
3407
3408 static void bnx2x_stats_start(struct bnx2x *bp)
3409 {
3410         if (bp->port.pmf)
3411                 bnx2x_port_stats_init(bp);
3412
3413         else if (bp->func_stx)
3414                 bnx2x_func_stats_init(bp);
3415
3416         bnx2x_hw_stats_post(bp);
3417         bnx2x_storm_stats_post(bp);
3418 }
3419
3420 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3421 {
3422         bnx2x_stats_comp(bp);
3423         bnx2x_stats_pmf_update(bp);
3424         bnx2x_stats_start(bp);
3425 }
3426
3427 static void bnx2x_stats_restart(struct bnx2x *bp)
3428 {
3429         bnx2x_stats_comp(bp);
3430         bnx2x_stats_start(bp);
3431 }
3432
3433 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3434 {
3435         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3436         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3437         struct regpair diff;
3438
3439         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3440         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3441         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3442         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3443         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3444         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3445         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3446         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3447         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3448         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3449         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3450         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3451         UPDATE_STAT64(tx_stat_gt127,
3452                                 tx_stat_etherstatspkts65octetsto127octets);
3453         UPDATE_STAT64(tx_stat_gt255,
3454                                 tx_stat_etherstatspkts128octetsto255octets);
3455         UPDATE_STAT64(tx_stat_gt511,
3456                                 tx_stat_etherstatspkts256octetsto511octets);
3457         UPDATE_STAT64(tx_stat_gt1023,
3458                                 tx_stat_etherstatspkts512octetsto1023octets);
3459         UPDATE_STAT64(tx_stat_gt1518,
3460                                 tx_stat_etherstatspkts1024octetsto1522octets);
3461         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3462         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3463         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3464         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3465         UPDATE_STAT64(tx_stat_gterr,
3466                                 tx_stat_dot3statsinternalmactransmiterrors);
3467         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3468 }
3469
3470 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3471 {
3472         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3473         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3474
3475         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3476         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3477         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3478         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3479         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3480         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3481         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3482         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3483         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3484         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3485         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3486         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3487         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3488         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3489         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3490         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3491         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3492         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3493         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3494         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3495         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3496         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3497         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3498         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3499         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3500         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3501         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3502         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3503         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3504         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3505         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3506 }
3507
3508 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3509 {
3510         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3511         struct nig_stats *old = &(bp->port.old_nig_stats);
3512         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3513         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3514         struct regpair diff;
3515
3516         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3517                 bnx2x_bmac_stats_update(bp);
3518
3519         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3520                 bnx2x_emac_stats_update(bp);
3521
3522         else { /* unreached */
3523                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3524                 return -1;
3525         }
3526
3527         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3528                       new->brb_discard - old->brb_discard);
3529         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3530                       new->brb_truncate - old->brb_truncate);
3531
3532         UPDATE_STAT64_NIG(egress_mac_pkt0,
3533                                         etherstatspkts1024octetsto1522octets);
3534         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3535
3536         memcpy(old, new, sizeof(struct nig_stats));
3537
3538         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3539                sizeof(struct mac_stx));
3540         estats->brb_drop_hi = pstats->brb_drop_hi;
3541         estats->brb_drop_lo = pstats->brb_drop_lo;
3542
3543         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3544
3545         return 0;
3546 }
3547
3548 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3549 {
3550         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3551         int cl_id = BP_CL_ID(bp);
3552         struct tstorm_per_port_stats *tport =
3553                                 &stats->tstorm_common.port_statistics;
3554         struct tstorm_per_client_stats *tclient =
3555                         &stats->tstorm_common.client_statistics[cl_id];
3556         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3557         struct xstorm_per_client_stats *xclient =
3558                         &stats->xstorm_common.client_statistics[cl_id];
3559         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3560         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3561         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3562         u32 diff;
3563
3564         /* are storm stats valid? */
3565         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3566                                                         bp->stats_counter) {
3567                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3568                    "  tstorm counter (%d) != stats_counter (%d)\n",
3569                    tclient->stats_counter, bp->stats_counter);
3570                 return -1;
3571         }
3572         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3573                                                         bp->stats_counter) {
3574                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3575                    "  xstorm counter (%d) != stats_counter (%d)\n",
3576                    xclient->stats_counter, bp->stats_counter);
3577                 return -2;
3578         }
3579
3580         fstats->total_bytes_received_hi =
3581         fstats->valid_bytes_received_hi =
3582                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3583         fstats->total_bytes_received_lo =
3584         fstats->valid_bytes_received_lo =
3585                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3586
3587         estats->error_bytes_received_hi =
3588                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3589         estats->error_bytes_received_lo =
3590                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3591         ADD_64(estats->error_bytes_received_hi,
3592                estats->rx_stat_ifhcinbadoctets_hi,
3593                estats->error_bytes_received_lo,
3594                estats->rx_stat_ifhcinbadoctets_lo);
3595
3596         ADD_64(fstats->total_bytes_received_hi,
3597                estats->error_bytes_received_hi,
3598                fstats->total_bytes_received_lo,
3599                estats->error_bytes_received_lo);
3600
3601         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3602         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3603                                 total_multicast_packets_received);
3604         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3605                                 total_broadcast_packets_received);
3606
3607         fstats->total_bytes_transmitted_hi =
3608                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3609         fstats->total_bytes_transmitted_lo =
3610                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3611
3612         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3613                                 total_unicast_packets_transmitted);
3614         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3615                                 total_multicast_packets_transmitted);
3616         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3617                                 total_broadcast_packets_transmitted);
3618
3619         memcpy(estats, &(fstats->total_bytes_received_hi),
3620                sizeof(struct host_func_stats) - 2*sizeof(u32));
3621
3622         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3623         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3624         estats->brb_truncate_discard =
3625                                 le32_to_cpu(tport->brb_truncate_discard);
3626         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3627
3628         old_tclient->rcv_unicast_bytes.hi =
3629                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3630         old_tclient->rcv_unicast_bytes.lo =
3631                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3632         old_tclient->rcv_broadcast_bytes.hi =
3633                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3634         old_tclient->rcv_broadcast_bytes.lo =
3635                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3636         old_tclient->rcv_multicast_bytes.hi =
3637                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3638         old_tclient->rcv_multicast_bytes.lo =
3639                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3640         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3641
3642         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3643         old_tclient->packets_too_big_discard =
3644                                 le32_to_cpu(tclient->packets_too_big_discard);
3645         estats->no_buff_discard =
3646         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3647         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3648
3649         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3650         old_xclient->unicast_bytes_sent.hi =
3651                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3652         old_xclient->unicast_bytes_sent.lo =
3653                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3654         old_xclient->multicast_bytes_sent.hi =
3655                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3656         old_xclient->multicast_bytes_sent.lo =
3657                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3658         old_xclient->broadcast_bytes_sent.hi =
3659                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3660         old_xclient->broadcast_bytes_sent.lo =
3661                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3662
3663         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3664
3665         return 0;
3666 }
3667
3668 static void bnx2x_net_stats_update(struct bnx2x *bp)
3669 {
3670         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3671         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3672         struct net_device_stats *nstats = &bp->dev->stats;
3673
3674         nstats->rx_packets =
3675                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3676                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3677                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3678
3679         nstats->tx_packets =
3680                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3681                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3682                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3683
3684         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3685
3686         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3687
3688         nstats->rx_dropped = old_tclient->checksum_discard +
3689                              estats->mac_discard;
3690         nstats->tx_dropped = 0;
3691
3692         nstats->multicast =
3693                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3694
3695         nstats->collisions =
3696                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3697                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3698                         estats->tx_stat_dot3statslatecollisions_lo +
3699                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3700
3701         estats->jabber_packets_received =
3702                                 old_tclient->packets_too_big_discard +
3703                                 estats->rx_stat_dot3statsframestoolong_lo;
3704
3705         nstats->rx_length_errors =
3706                                 estats->rx_stat_etherstatsundersizepkts_lo +
3707                                 estats->jabber_packets_received;
3708         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3709         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3710         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3711         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3712         nstats->rx_missed_errors = estats->xxoverflow_discard;
3713
3714         nstats->rx_errors = nstats->rx_length_errors +
3715                             nstats->rx_over_errors +
3716                             nstats->rx_crc_errors +
3717                             nstats->rx_frame_errors +
3718                             nstats->rx_fifo_errors +
3719                             nstats->rx_missed_errors;
3720
3721         nstats->tx_aborted_errors =
3722                         estats->tx_stat_dot3statslatecollisions_lo +
3723                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3724         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3725         nstats->tx_fifo_errors = 0;
3726         nstats->tx_heartbeat_errors = 0;
3727         nstats->tx_window_errors = 0;
3728
3729         nstats->tx_errors = nstats->tx_aborted_errors +
3730                             nstats->tx_carrier_errors;
3731 }
3732
3733 static void bnx2x_stats_update(struct bnx2x *bp)
3734 {
3735         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3736         int update = 0;
3737
3738         if (*stats_comp != DMAE_COMP_VAL)
3739                 return;
3740
3741         if (bp->port.pmf)
3742                 update = (bnx2x_hw_stats_update(bp) == 0);
3743
3744         update |= (bnx2x_storm_stats_update(bp) == 0);
3745
3746         if (update)
3747                 bnx2x_net_stats_update(bp);
3748
3749         else {
3750                 if (bp->stats_pending) {
3751                         bp->stats_pending++;
3752                         if (bp->stats_pending == 3) {
3753                                 BNX2X_ERR("stats not updated for 3 times\n");
3754                                 bnx2x_panic();
3755                                 return;
3756                         }
3757                 }
3758         }
3759
3760         if (bp->msglevel & NETIF_MSG_TIMER) {
3761                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3762                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3763                 struct net_device_stats *nstats = &bp->dev->stats;
3764                 int i;
3765
3766                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3767                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3768                                   "  tx pkt (%lx)\n",
3769                        bnx2x_tx_avail(bp->fp),
3770                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3771                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3772                                   "  rx pkt (%lx)\n",
3773                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3774                              bp->fp->rx_comp_cons),
3775                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3776                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3777                        netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3778                        estats->driver_xoff, estats->brb_drop_lo);
3779                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3780                         "packets_too_big_discard %u  no_buff_discard %u  "
3781                         "mac_discard %u  mac_filter_discard %u  "
3782                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3783                         "ttl0_discard %u\n",
3784                        old_tclient->checksum_discard,
3785                        old_tclient->packets_too_big_discard,
3786                        old_tclient->no_buff_discard, estats->mac_discard,
3787                        estats->mac_filter_discard, estats->xxoverflow_discard,
3788                        estats->brb_truncate_discard,
3789                        old_tclient->ttl0_discard);
3790
3791                 for_each_queue(bp, i) {
3792                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3793                                bnx2x_fp(bp, i, tx_pkt),
3794                                bnx2x_fp(bp, i, rx_pkt),
3795                                bnx2x_fp(bp, i, rx_calls));
3796                 }
3797         }
3798
3799         bnx2x_hw_stats_post(bp);
3800         bnx2x_storm_stats_post(bp);
3801 }
3802
3803 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3804 {
3805         struct dmae_command *dmae;
3806         u32 opcode;
3807         int loader_idx = PMF_DMAE_C(bp);
3808         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3809
3810         bp->executer_idx = 0;
3811
3812         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3813                   DMAE_CMD_C_ENABLE |
3814                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3815 #ifdef __BIG_ENDIAN
3816                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3817 #else
3818                   DMAE_CMD_ENDIANITY_DW_SWAP |
3819 #endif
3820                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3821                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3822
3823         if (bp->port.port_stx) {
3824
3825                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3826                 if (bp->func_stx)
3827                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3828                 else
3829                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3830                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3831                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3832                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3833                 dmae->dst_addr_hi = 0;
3834                 dmae->len = sizeof(struct host_port_stats) >> 2;
3835                 if (bp->func_stx) {
3836                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3837                         dmae->comp_addr_hi = 0;
3838                         dmae->comp_val = 1;
3839                 } else {
3840                         dmae->comp_addr_lo =
3841                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3842                         dmae->comp_addr_hi =
3843                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3844                         dmae->comp_val = DMAE_COMP_VAL;
3845
3846                         *stats_comp = 0;
3847                 }
3848         }
3849
3850         if (bp->func_stx) {
3851
3852                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3853                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3854                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3855                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3856                 dmae->dst_addr_lo = bp->func_stx >> 2;
3857                 dmae->dst_addr_hi = 0;
3858                 dmae->len = sizeof(struct host_func_stats) >> 2;
3859                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3860                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3861                 dmae->comp_val = DMAE_COMP_VAL;
3862
3863                 *stats_comp = 0;
3864         }
3865 }
3866
3867 static void bnx2x_stats_stop(struct bnx2x *bp)
3868 {
3869         int update = 0;
3870
3871         bnx2x_stats_comp(bp);
3872
3873         if (bp->port.pmf)
3874                 update = (bnx2x_hw_stats_update(bp) == 0);
3875
3876         update |= (bnx2x_storm_stats_update(bp) == 0);
3877
3878         if (update) {
3879                 bnx2x_net_stats_update(bp);
3880
3881                 if (bp->port.pmf)
3882                         bnx2x_port_stats_stop(bp);
3883
3884                 bnx2x_hw_stats_post(bp);
3885                 bnx2x_stats_comp(bp);
3886         }
3887 }
3888
3889 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3890 {
3891 }
3892
3893 static const struct {
3894         void (*action)(struct bnx2x *bp);
3895         enum bnx2x_stats_state next_state;
3896 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3897 /* state        event   */
3898 {
3899 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3900 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3901 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3902 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3903 },
3904 {
3905 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3906 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3907 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3908 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3909 }
3910 };
3911
3912 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3913 {
3914         enum bnx2x_stats_state state = bp->stats_state;
3915
3916         bnx2x_stats_stm[state][event].action(bp);
3917         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3918
3919         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3920                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3921                    state, event, bp->stats_state);
3922 }
3923
3924 static void bnx2x_timer(unsigned long data)
3925 {
3926         struct bnx2x *bp = (struct bnx2x *) data;
3927
3928         if (!netif_running(bp->dev))
3929                 return;
3930
3931         if (atomic_read(&bp->intr_sem) != 0)
3932                 goto timer_restart;
3933
3934         if (poll) {
3935                 struct bnx2x_fastpath *fp = &bp->fp[0];
3936                 int rc;
3937
3938                 bnx2x_tx_int(fp, 1000);
3939                 rc = bnx2x_rx_int(fp, 1000);
3940         }
3941
3942         if (!BP_NOMCP(bp)) {
3943                 int func = BP_FUNC(bp);
3944                 u32 drv_pulse;
3945                 u32 mcp_pulse;
3946
3947                 ++bp->fw_drv_pulse_wr_seq;
3948                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3949                 /* TBD - add SYSTEM_TIME */
3950                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3951                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3952
3953                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3954                              MCP_PULSE_SEQ_MASK);
3955                 /* The delta between driver pulse and mcp response
3956                  * should be 1 (before mcp response) or 0 (after mcp response)
3957                  */
3958                 if ((drv_pulse != mcp_pulse) &&
3959                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3960                         /* someone lost a heartbeat... */
3961                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3962                                   drv_pulse, mcp_pulse);
3963                 }
3964         }
3965
3966         if ((bp->state == BNX2X_STATE_OPEN) ||
3967             (bp->state == BNX2X_STATE_DISABLED))
3968                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3969
3970 timer_restart:
3971         mod_timer(&bp->timer, jiffies + bp->current_interval);
3972 }
3973
3974 /* end of Statistics */
3975
3976 /* nic init */
3977
3978 /*
3979  * nic init service functions
3980  */
3981
3982 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3983 {
3984         int port = BP_PORT(bp);
3985
3986         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3987                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3988                         sizeof(struct ustorm_def_status_block)/4);
3989         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3990                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3991                         sizeof(struct cstorm_def_status_block)/4);
3992 }
3993
3994 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
3995                           dma_addr_t mapping, int sb_id)
3996 {
3997         int port = BP_PORT(bp);
3998         int func = BP_FUNC(bp);
3999         int index;
4000         u64 section;
4001
4002         /* USTORM */
4003         section = ((u64)mapping) + offsetof(struct host_status_block,
4004                                             u_status_block);
4005         sb->u_status_block.status_block_id = sb_id;
4006
4007         REG_WR(bp, BAR_USTRORM_INTMEM +
4008                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4009         REG_WR(bp, BAR_USTRORM_INTMEM +
4010                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4011                U64_HI(section));
4012         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4013                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4014
4015         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4016                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4017                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4018
4019         /* CSTORM */
4020         section = ((u64)mapping) + offsetof(struct host_status_block,
4021                                             c_status_block);
4022         sb->c_status_block.status_block_id = sb_id;
4023
4024         REG_WR(bp, BAR_CSTRORM_INTMEM +
4025                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4026         REG_WR(bp, BAR_CSTRORM_INTMEM +
4027                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4028                U64_HI(section));
4029         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4030                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4031
4032         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4033                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4034                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4035
4036         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4037 }
4038
4039 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4040 {
4041         int func = BP_FUNC(bp);
4042
4043         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4044                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4045                         sizeof(struct ustorm_def_status_block)/4);
4046         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4047                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4048                         sizeof(struct cstorm_def_status_block)/4);
4049         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4050                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4051                         sizeof(struct xstorm_def_status_block)/4);
4052         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4053                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4054                         sizeof(struct tstorm_def_status_block)/4);
4055 }
4056
4057 static void bnx2x_init_def_sb(struct bnx2x *bp,
4058                               struct host_def_status_block *def_sb,
4059                               dma_addr_t mapping, int sb_id)
4060 {
4061         int port = BP_PORT(bp);
4062         int func = BP_FUNC(bp);
4063         int index, val, reg_offset;
4064         u64 section;
4065
4066         /* ATTN */
4067         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4068                                             atten_status_block);
4069         def_sb->atten_status_block.status_block_id = sb_id;
4070
4071         bp->attn_state = 0;
4072
4073         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4074                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4075
4076         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4077                 bp->attn_group[index].sig[0] = REG_RD(bp,
4078                                                      reg_offset + 0x10*index);
4079                 bp->attn_group[index].sig[1] = REG_RD(bp,
4080                                                reg_offset + 0x4 + 0x10*index);
4081                 bp->attn_group[index].sig[2] = REG_RD(bp,
4082                                                reg_offset + 0x8 + 0x10*index);
4083                 bp->attn_group[index].sig[3] = REG_RD(bp,
4084                                                reg_offset + 0xc + 0x10*index);
4085         }
4086
4087         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4088                              HC_REG_ATTN_MSG0_ADDR_L);
4089
4090         REG_WR(bp, reg_offset, U64_LO(section));
4091         REG_WR(bp, reg_offset + 4, U64_HI(section));
4092
4093         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4094
4095         val = REG_RD(bp, reg_offset);
4096         val |= sb_id;
4097         REG_WR(bp, reg_offset, val);
4098
4099         /* USTORM */
4100         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4101                                             u_def_status_block);
4102         def_sb->u_def_status_block.status_block_id = sb_id;
4103
4104         REG_WR(bp, BAR_USTRORM_INTMEM +
4105                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4106         REG_WR(bp, BAR_USTRORM_INTMEM +
4107                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4108                U64_HI(section));
4109         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4110                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4111
4112         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4113                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4114                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4115
4116         /* CSTORM */
4117         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4118                                             c_def_status_block);
4119         def_sb->c_def_status_block.status_block_id = sb_id;
4120
4121         REG_WR(bp, BAR_CSTRORM_INTMEM +
4122                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4123         REG_WR(bp, BAR_CSTRORM_INTMEM +
4124                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4125                U64_HI(section));
4126         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4127                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4128
4129         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4130                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4131                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4132
4133         /* TSTORM */
4134         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135                                             t_def_status_block);
4136         def_sb->t_def_status_block.status_block_id = sb_id;
4137
4138         REG_WR(bp, BAR_TSTRORM_INTMEM +
4139                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4140         REG_WR(bp, BAR_TSTRORM_INTMEM +
4141                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4142                U64_HI(section));
4143         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4144                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4145
4146         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4147                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4148                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4149
4150         /* XSTORM */
4151         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152                                             x_def_status_block);
4153         def_sb->x_def_status_block.status_block_id = sb_id;
4154
4155         REG_WR(bp, BAR_XSTRORM_INTMEM +
4156                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4157         REG_WR(bp, BAR_XSTRORM_INTMEM +
4158                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4159                U64_HI(section));
4160         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4161                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4162
4163         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4164                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4165                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4166
4167         bp->stats_pending = 0;
4168         bp->set_mac_pending = 0;
4169
4170         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4171 }
4172
4173 static void bnx2x_update_coalesce(struct bnx2x *bp)
4174 {
4175         int port = BP_PORT(bp);
4176         int i;
4177
4178         for_each_queue(bp, i) {
4179                 int sb_id = bp->fp[i].sb_id;
4180
4181                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4182                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4183                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4184                                                     U_SB_ETH_RX_CQ_INDEX),
4185                         bp->rx_ticks/12);
4186                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4187                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4188                                                      U_SB_ETH_RX_CQ_INDEX),
4189                          bp->rx_ticks ? 0 : 1);
4190                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4191                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4192                                                      U_SB_ETH_RX_BD_INDEX),
4193                          bp->rx_ticks ? 0 : 1);
4194
4195                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4196                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4197                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4198                                                     C_SB_ETH_TX_CQ_INDEX),
4199                         bp->tx_ticks/12);
4200                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4201                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4202                                                      C_SB_ETH_TX_CQ_INDEX),
4203                          bp->tx_ticks ? 0 : 1);
4204         }
4205 }
4206
4207 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4208                                        struct bnx2x_fastpath *fp, int last)
4209 {
4210         int i;
4211
4212         for (i = 0; i < last; i++) {
4213                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4214                 struct sk_buff *skb = rx_buf->skb;
4215
4216                 if (skb == NULL) {
4217                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4218                         continue;
4219                 }
4220
4221                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4222                         pci_unmap_single(bp->pdev,
4223                                          pci_unmap_addr(rx_buf, mapping),
4224                                          bp->rx_buf_use_size,
4225                                          PCI_DMA_FROMDEVICE);
4226
4227                 dev_kfree_skb(skb);
4228                 rx_buf->skb = NULL;
4229         }
4230 }
4231
4232 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4233 {
4234         int func = BP_FUNC(bp);
4235         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4236                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4237         u16 ring_prod, cqe_ring_prod;
4238         int i, j;
4239
4240         bp->rx_buf_use_size = bp->dev->mtu;
4241         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4242         bp->rx_buf_size = bp->rx_buf_use_size + 64;
4243
4244         if (bp->flags & TPA_ENABLE_FLAG) {
4245                 DP(NETIF_MSG_IFUP,
4246                    "rx_buf_use_size %d  rx_buf_size %d  effective_mtu %d\n",
4247                    bp->rx_buf_use_size, bp->rx_buf_size,
4248                    bp->dev->mtu + ETH_OVREHEAD);
4249
4250                 for_each_queue(bp, j) {
4251                         struct bnx2x_fastpath *fp = &bp->fp[j];
4252
4253                         for (i = 0; i < max_agg_queues; i++) {
4254                                 fp->tpa_pool[i].skb =
4255                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4256                                 if (!fp->tpa_pool[i].skb) {
4257                                         BNX2X_ERR("Failed to allocate TPA "
4258                                                   "skb pool for queue[%d] - "
4259                                                   "disabling TPA on this "
4260                                                   "queue!\n", j);
4261                                         bnx2x_free_tpa_pool(bp, fp, i);
4262                                         fp->disable_tpa = 1;
4263                                         break;
4264                                 }
4265                                 pci_unmap_addr_set((struct sw_rx_bd *)
4266                                                         &bp->fp->tpa_pool[i],
4267                                                    mapping, 0);
4268                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4269                         }
4270                 }
4271         }
4272
4273         for_each_queue(bp, j) {
4274                 struct bnx2x_fastpath *fp = &bp->fp[j];
4275
4276                 fp->rx_bd_cons = 0;
4277                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4278                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4279
4280                 /* "next page" elements initialization */
4281                 /* SGE ring */
4282                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4283                         struct eth_rx_sge *sge;
4284
4285                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4286                         sge->addr_hi =
4287                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4288                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4289                         sge->addr_lo =
4290                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4291                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4292                 }
4293
4294                 bnx2x_init_sge_ring_bit_mask(fp);
4295
4296                 /* RX BD ring */
4297                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4298                         struct eth_rx_bd *rx_bd;
4299
4300                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4301                         rx_bd->addr_hi =
4302                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4303                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4304                         rx_bd->addr_lo =
4305                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4306                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4307                 }
4308
4309                 /* CQ ring */
4310                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4311                         struct eth_rx_cqe_next_page *nextpg;
4312
4313                         nextpg = (struct eth_rx_cqe_next_page *)
4314                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4315                         nextpg->addr_hi =
4316                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4317                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4318                         nextpg->addr_lo =
4319                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4320                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4321                 }
4322
4323                 /* Allocate SGEs and initialize the ring elements */
4324                 for (i = 0, ring_prod = 0;
4325                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4326
4327                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4328                                 BNX2X_ERR("was only able to allocate "
4329                                           "%d rx sges\n", i);
4330                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4331                                 /* Cleanup already allocated elements */
4332                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4333                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4334                                 fp->disable_tpa = 1;
4335                                 ring_prod = 0;
4336                                 break;
4337                         }
4338                         ring_prod = NEXT_SGE_IDX(ring_prod);
4339                 }
4340                 fp->rx_sge_prod = ring_prod;
4341
4342                 /* Allocate BDs and initialize BD ring */
4343                 fp->rx_comp_cons = 0;
4344                 cqe_ring_prod = ring_prod = 0;
4345                 for (i = 0; i < bp->rx_ring_size; i++) {
4346                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4347                                 BNX2X_ERR("was only able to allocate "
4348                                           "%d rx skbs\n", i);
4349                                 bp->eth_stats.rx_skb_alloc_failed++;
4350                                 break;
4351                         }
4352                         ring_prod = NEXT_RX_IDX(ring_prod);
4353                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4354                         WARN_ON(ring_prod <= i);
4355                 }
4356
4357                 fp->rx_bd_prod = ring_prod;
4358                 /* must not have more available CQEs than BDs */
4359                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4360                                        cqe_ring_prod);
4361                 fp->rx_pkt = fp->rx_calls = 0;
4362
4363                 /* Warning!
4364                  * this will generate an interrupt (to the TSTORM)
4365                  * must only be done after chip is initialized
4366                  */
4367                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4368                                      fp->rx_sge_prod);
4369                 if (j != 0)
4370                         continue;
4371
4372                 REG_WR(bp, BAR_USTRORM_INTMEM +
4373                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4374                        U64_LO(fp->rx_comp_mapping));
4375                 REG_WR(bp, BAR_USTRORM_INTMEM +
4376                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4377                        U64_HI(fp->rx_comp_mapping));
4378         }
4379 }
4380
4381 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4382 {
4383         int i, j;
4384
4385         for_each_queue(bp, j) {
4386                 struct bnx2x_fastpath *fp = &bp->fp[j];
4387
4388                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4389                         struct eth_tx_bd *tx_bd =
4390                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4391
4392                         tx_bd->addr_hi =
4393                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4394                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4395                         tx_bd->addr_lo =
4396                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4397                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4398                 }
4399
4400                 fp->tx_pkt_prod = 0;
4401                 fp->tx_pkt_cons = 0;
4402                 fp->tx_bd_prod = 0;
4403                 fp->tx_bd_cons = 0;
4404                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4405                 fp->tx_pkt = 0;
4406         }
4407 }
4408
4409 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4410 {
4411         int func = BP_FUNC(bp);
4412
4413         spin_lock_init(&bp->spq_lock);
4414
4415         bp->spq_left = MAX_SPQ_PENDING;
4416         bp->spq_prod_idx = 0;
4417         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4418         bp->spq_prod_bd = bp->spq;
4419         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4420
4421         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4422                U64_LO(bp->spq_mapping));
4423         REG_WR(bp,
4424                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4425                U64_HI(bp->spq_mapping));
4426
4427         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4428                bp->spq_prod_idx);
4429 }
4430
4431 static void bnx2x_init_context(struct bnx2x *bp)
4432 {
4433         int i;
4434
4435         for_each_queue(bp, i) {
4436                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4437                 struct bnx2x_fastpath *fp = &bp->fp[i];
4438                 u8 sb_id = FP_SB_ID(fp);
4439
4440                 context->xstorm_st_context.tx_bd_page_base_hi =
4441                                                 U64_HI(fp->tx_desc_mapping);
4442                 context->xstorm_st_context.tx_bd_page_base_lo =
4443                                                 U64_LO(fp->tx_desc_mapping);
4444                 context->xstorm_st_context.db_data_addr_hi =
4445                                                 U64_HI(fp->tx_prods_mapping);
4446                 context->xstorm_st_context.db_data_addr_lo =
4447                                                 U64_LO(fp->tx_prods_mapping);
4448                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4449                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4450
4451                 context->ustorm_st_context.common.sb_index_numbers =
4452                                                 BNX2X_RX_SB_INDEX_NUM;
4453                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4454                 context->ustorm_st_context.common.status_block_id = sb_id;
4455                 context->ustorm_st_context.common.flags =
4456                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4457                 context->ustorm_st_context.common.mc_alignment_size = 64;
4458                 context->ustorm_st_context.common.bd_buff_size =
4459                                                 bp->rx_buf_use_size;
4460                 context->ustorm_st_context.common.bd_page_base_hi =
4461                                                 U64_HI(fp->rx_desc_mapping);
4462                 context->ustorm_st_context.common.bd_page_base_lo =
4463                                                 U64_LO(fp->rx_desc_mapping);
4464                 if (!fp->disable_tpa) {
4465                         context->ustorm_st_context.common.flags |=
4466                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4467                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4468                         context->ustorm_st_context.common.sge_buff_size =
4469                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4470                         context->ustorm_st_context.common.sge_page_base_hi =
4471                                                 U64_HI(fp->rx_sge_mapping);
4472                         context->ustorm_st_context.common.sge_page_base_lo =
4473                                                 U64_LO(fp->rx_sge_mapping);
4474                 }
4475
4476                 context->cstorm_st_context.sb_index_number =
4477                                                 C_SB_ETH_TX_CQ_INDEX;
4478                 context->cstorm_st_context.status_block_id = sb_id;
4479
4480                 context->xstorm_ag_context.cdu_reserved =
4481                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4482                                                CDU_REGION_NUMBER_XCM_AG,
4483                                                ETH_CONNECTION_TYPE);
4484                 context->ustorm_ag_context.cdu_usage =
4485                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4486                                                CDU_REGION_NUMBER_UCM_AG,
4487                                                ETH_CONNECTION_TYPE);
4488         }
4489 }
4490
4491 static void bnx2x_init_ind_table(struct bnx2x *bp)
4492 {
4493         int port = BP_PORT(bp);
4494         int i;
4495
4496         if (!is_multi(bp))
4497                 return;
4498
4499         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4500         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4501                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4502                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4503                         i % bp->num_queues);
4504
4505         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4506 }
4507
4508 static void bnx2x_set_client_config(struct bnx2x *bp)
4509 {
4510         struct tstorm_eth_client_config tstorm_client = {0};
4511         int port = BP_PORT(bp);
4512         int i;
4513
4514         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4515         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4516         tstorm_client.config_flags =
4517                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4518 #ifdef BCM_VLAN
4519         if (bp->rx_mode && bp->vlgrp) {
4520                 tstorm_client.config_flags |=
4521                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4522                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4523         }
4524 #endif
4525
4526         if (bp->flags & TPA_ENABLE_FLAG) {
4527                 tstorm_client.max_sges_for_packet =
4528                         BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4529                 tstorm_client.max_sges_for_packet =
4530                         ((tstorm_client.max_sges_for_packet +
4531                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4532                         PAGES_PER_SGE_SHIFT;
4533
4534                 tstorm_client.config_flags |=
4535                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4536         }
4537
4538         for_each_queue(bp, i) {
4539                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4540                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4541                        ((u32 *)&tstorm_client)[0]);
4542                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4543                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4544                        ((u32 *)&tstorm_client)[1]);
4545         }
4546
4547         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4548            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4549 }
4550
4551 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4552 {
4553         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4554         int mode = bp->rx_mode;
4555         int mask = (1 << BP_L_ID(bp));
4556         int func = BP_FUNC(bp);
4557         int i;
4558
4559         DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4560
4561         switch (mode) {
4562         case BNX2X_RX_MODE_NONE: /* no Rx */
4563                 tstorm_mac_filter.ucast_drop_all = mask;
4564                 tstorm_mac_filter.mcast_drop_all = mask;
4565                 tstorm_mac_filter.bcast_drop_all = mask;
4566                 break;
4567         case BNX2X_RX_MODE_NORMAL:
4568                 tstorm_mac_filter.bcast_accept_all = mask;
4569                 break;
4570         case BNX2X_RX_MODE_ALLMULTI:
4571                 tstorm_mac_filter.mcast_accept_all = mask;
4572                 tstorm_mac_filter.bcast_accept_all = mask;
4573                 break;
4574         case BNX2X_RX_MODE_PROMISC:
4575                 tstorm_mac_filter.ucast_accept_all = mask;
4576                 tstorm_mac_filter.mcast_accept_all = mask;
4577                 tstorm_mac_filter.bcast_accept_all = mask;
4578                 break;
4579         default:
4580                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4581                 break;
4582         }
4583
4584         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4585                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4586                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4587                        ((u32 *)&tstorm_mac_filter)[i]);
4588
4589 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4590                    ((u32 *)&tstorm_mac_filter)[i]); */
4591         }
4592
4593         if (mode != BNX2X_RX_MODE_NONE)
4594                 bnx2x_set_client_config(bp);
4595 }
4596
4597 static void bnx2x_init_internal_common(struct bnx2x *bp)
4598 {
4599         int i;
4600
4601         /* Zero this manually as its initialization is
4602            currently missing in the initTool */
4603         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4604                 REG_WR(bp, BAR_USTRORM_INTMEM +
4605                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4606 }
4607
4608 static void bnx2x_init_internal_port(struct bnx2x *bp)
4609 {
4610         int port = BP_PORT(bp);
4611
4612         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4613         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4614         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4615         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4616 }
4617
4618 static void bnx2x_init_internal_func(struct bnx2x *bp)
4619 {
4620         struct tstorm_eth_function_common_config tstorm_config = {0};
4621         struct stats_indication_flags stats_flags = {0};
4622         int port = BP_PORT(bp);
4623         int func = BP_FUNC(bp);
4624         int i;
4625         u16 max_agg_size;
4626
4627         if (is_multi(bp)) {
4628                 tstorm_config.config_flags = MULTI_FLAGS;
4629                 tstorm_config.rss_result_mask = MULTI_MASK;
4630         }
4631
4632         tstorm_config.leading_client_id = BP_L_ID(bp);
4633
4634         REG_WR(bp, BAR_TSTRORM_INTMEM +
4635                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4636                (*(u32 *)&tstorm_config));
4637
4638         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4639         bnx2x_set_storm_rx_mode(bp);
4640
4641         /* reset xstorm per client statistics */
4642         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4643                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4644                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4645                        i*4, 0);
4646         }
4647         /* reset tstorm per client statistics */
4648         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4649                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4650                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4651                        i*4, 0);
4652         }
4653
4654         /* Init statistics related context */
4655         stats_flags.collect_eth = 1;
4656
4657         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4658                ((u32 *)&stats_flags)[0]);
4659         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4660                ((u32 *)&stats_flags)[1]);
4661
4662         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4663                ((u32 *)&stats_flags)[0]);
4664         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4665                ((u32 *)&stats_flags)[1]);
4666
4667         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4668                ((u32 *)&stats_flags)[0]);
4669         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4670                ((u32 *)&stats_flags)[1]);
4671
4672         REG_WR(bp, BAR_XSTRORM_INTMEM +
4673                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4674                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4675         REG_WR(bp, BAR_XSTRORM_INTMEM +
4676                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4677                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4678
4679         REG_WR(bp, BAR_TSTRORM_INTMEM +
4680                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4681                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4682         REG_WR(bp, BAR_TSTRORM_INTMEM +
4683                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4684                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4685
4686         if (CHIP_IS_E1H(bp)) {
4687                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4688                         IS_E1HMF(bp));
4689                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4690                         IS_E1HMF(bp));
4691                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4692                         IS_E1HMF(bp));
4693                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4694                         IS_E1HMF(bp));
4695
4696                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4697                          bp->e1hov);
4698         }
4699
4700         /* Init CQ ring mapping and aggregation size */
4701         max_agg_size = min((u32)(bp->rx_buf_use_size +
4702                                  8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4703                            (u32)0xffff);
4704         for_each_queue(bp, i) {
4705                 struct bnx2x_fastpath *fp = &bp->fp[i];
4706
4707                 REG_WR(bp, BAR_USTRORM_INTMEM +
4708                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4709                        U64_LO(fp->rx_comp_mapping));
4710                 REG_WR(bp, BAR_USTRORM_INTMEM +
4711                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4712                        U64_HI(fp->rx_comp_mapping));
4713
4714                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4715                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4716                          max_agg_size);
4717         }
4718 }
4719
4720 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4721 {
4722         switch (load_code) {
4723         case FW_MSG_CODE_DRV_LOAD_COMMON:
4724                 bnx2x_init_internal_common(bp);
4725                 /* no break */
4726
4727         case FW_MSG_CODE_DRV_LOAD_PORT:
4728                 bnx2x_init_internal_port(bp);
4729                 /* no break */
4730
4731         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4732                 bnx2x_init_internal_func(bp);
4733                 break;
4734
4735         default:
4736                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4737                 break;
4738         }
4739 }
4740
4741 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4742 {
4743         int i;
4744
4745         for_each_queue(bp, i) {
4746                 struct bnx2x_fastpath *fp = &bp->fp[i];
4747
4748                 fp->bp = bp;
4749                 fp->state = BNX2X_FP_STATE_CLOSED;
4750                 fp->index = i;
4751                 fp->cl_id = BP_L_ID(bp) + i;
4752                 fp->sb_id = fp->cl_id;
4753                 DP(NETIF_MSG_IFUP,
4754                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4755                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4756                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4757                               FP_SB_ID(fp));
4758                 bnx2x_update_fpsb_idx(fp);
4759         }
4760
4761         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4762                           DEF_SB_ID);
4763         bnx2x_update_dsb_idx(bp);
4764         bnx2x_update_coalesce(bp);
4765         bnx2x_init_rx_rings(bp);
4766         bnx2x_init_tx_ring(bp);
4767         bnx2x_init_sp_ring(bp);
4768         bnx2x_init_context(bp);
4769         bnx2x_init_internal(bp, load_code);
4770         bnx2x_init_ind_table(bp);
4771         bnx2x_int_enable(bp);
4772 }
4773
4774 /* end of nic init */
4775
4776 /*
4777  * gzip service functions
4778  */
4779
4780 static int bnx2x_gunzip_init(struct bnx2x *bp)
4781 {
4782         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4783                                               &bp->gunzip_mapping);
4784         if (bp->gunzip_buf  == NULL)
4785                 goto gunzip_nomem1;
4786
4787         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4788         if (bp->strm  == NULL)
4789                 goto gunzip_nomem2;
4790
4791         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4792                                       GFP_KERNEL);
4793         if (bp->strm->workspace == NULL)
4794                 goto gunzip_nomem3;
4795
4796         return 0;
4797
4798 gunzip_nomem3:
4799         kfree(bp->strm);
4800         bp->strm = NULL;
4801
4802 gunzip_nomem2:
4803         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4804                             bp->gunzip_mapping);
4805         bp->gunzip_buf = NULL;
4806
4807 gunzip_nomem1:
4808         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4809                " un-compression\n", bp->dev->name);
4810         return -ENOMEM;
4811 }
4812
4813 static void bnx2x_gunzip_end(struct bnx2x *bp)
4814 {
4815         kfree(bp->strm->workspace);
4816
4817         kfree(bp->strm);
4818         bp->strm = NULL;
4819
4820         if (bp->gunzip_buf) {
4821                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4822                                     bp->gunzip_mapping);
4823                 bp->gunzip_buf = NULL;
4824         }
4825 }
4826
4827 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4828 {
4829         int n, rc;
4830
4831         /* check gzip header */
4832         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4833                 return -EINVAL;
4834
4835         n = 10;
4836
4837 #define FNAME                           0x8
4838
4839         if (zbuf[3] & FNAME)
4840                 while ((zbuf[n++] != 0) && (n < len));
4841
4842         bp->strm->next_in = zbuf + n;
4843         bp->strm->avail_in = len - n;
4844         bp->strm->next_out = bp->gunzip_buf;
4845         bp->strm->avail_out = FW_BUF_SIZE;
4846
4847         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4848         if (rc != Z_OK)
4849                 return rc;
4850
4851         rc = zlib_inflate(bp->strm, Z_FINISH);
4852         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4853                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4854                        bp->dev->name, bp->strm->msg);
4855
4856         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4857         if (bp->gunzip_outlen & 0x3)
4858                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4859                                     " gunzip_outlen (%d) not aligned\n",
4860                        bp->dev->name, bp->gunzip_outlen);
4861         bp->gunzip_outlen >>= 2;
4862
4863         zlib_inflateEnd(bp->strm);
4864
4865         if (rc == Z_STREAM_END)
4866                 return 0;
4867
4868         return rc;
4869 }
4870
4871 /* nic load/unload */
4872
4873 /*
4874  * General service functions
4875  */
4876
4877 /* send a NIG loopback debug packet */
4878 static void bnx2x_lb_pckt(struct bnx2x *bp)
4879 {
4880         u32 wb_write[3];
4881
4882         /* Ethernet source and destination addresses */
4883         wb_write[0] = 0x55555555;
4884         wb_write[1] = 0x55555555;
4885         wb_write[2] = 0x20;             /* SOP */
4886         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4887
4888         /* NON-IP protocol */
4889         wb_write[0] = 0x09000000;
4890         wb_write[1] = 0x55555555;
4891         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4892         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4893 }
4894
4895 /* some of the internal memories
4896  * are not directly readable from the driver
4897  * to test them we send debug packets
4898  */
4899 static int bnx2x_int_mem_test(struct bnx2x *bp)
4900 {
4901         int factor;
4902         int count, i;
4903         u32 val = 0;
4904
4905         if (CHIP_REV_IS_FPGA(bp))
4906                 factor = 120;
4907         else if (CHIP_REV_IS_EMUL(bp))
4908                 factor = 200;
4909         else
4910                 factor = 1;
4911
4912         DP(NETIF_MSG_HW, "start part1\n");
4913
4914         /* Disable inputs of parser neighbor blocks */
4915         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4916         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4917         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4918         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4919
4920         /*  Write 0 to parser credits for CFC search request */
4921         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4922
4923         /* send Ethernet packet */
4924         bnx2x_lb_pckt(bp);
4925
4926         /* TODO do i reset NIG statistic? */
4927         /* Wait until NIG register shows 1 packet of size 0x10 */
4928         count = 1000 * factor;
4929         while (count) {
4930
4931                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4932                 val = *bnx2x_sp(bp, wb_data[0]);
4933                 if (val == 0x10)
4934                         break;
4935
4936                 msleep(10);
4937                 count--;
4938         }
4939         if (val != 0x10) {
4940                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4941                 return -1;
4942         }
4943
4944         /* Wait until PRS register shows 1 packet */
4945         count = 1000 * factor;
4946         while (count) {
4947                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4948                 if (val == 1)
4949                         break;
4950
4951                 msleep(10);
4952                 count--;
4953         }
4954         if (val != 0x1) {
4955                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4956                 return -2;
4957         }
4958
4959         /* Reset and init BRB, PRS */
4960         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4961         msleep(50);
4962         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4963         msleep(50);
4964         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4965         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4966
4967         DP(NETIF_MSG_HW, "part2\n");
4968
4969         /* Disable inputs of parser neighbor blocks */
4970         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4971         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4972         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4973         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4974
4975         /* Write 0 to parser credits for CFC search request */
4976         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4977
4978         /* send 10 Ethernet packets */
4979         for (i = 0; i < 10; i++)
4980                 bnx2x_lb_pckt(bp);
4981
4982         /* Wait until NIG register shows 10 + 1
4983            packets of size 11*0x10 = 0xb0 */
4984         count = 1000 * factor;
4985         while (count) {
4986
4987                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4988                 val = *bnx2x_sp(bp, wb_data[0]);
4989                 if (val == 0xb0)
4990                         break;
4991
4992                 msleep(10);
4993                 count--;
4994         }
4995         if (val != 0xb0) {
4996                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4997                 return -3;
4998         }
4999
5000         /* Wait until PRS register shows 2 packets */
5001         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5002         if (val != 2)
5003                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5004
5005         /* Write 1 to parser credits for CFC search request */
5006         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5007
5008         /* Wait until PRS register shows 3 packets */
5009         msleep(10 * factor);
5010         /* Wait until NIG register shows 1 packet of size 0x10 */
5011         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5012         if (val != 3)
5013                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5014
5015         /* clear NIG EOP FIFO */
5016         for (i = 0; i < 11; i++)
5017                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5018         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5019         if (val != 1) {
5020                 BNX2X_ERR("clear of NIG failed\n");
5021                 return -4;
5022         }
5023
5024         /* Reset and init BRB, PRS, NIG */
5025         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5026         msleep(50);
5027         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5028         msleep(50);
5029         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5030         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5031 #ifndef BCM_ISCSI
5032         /* set NIC mode */
5033         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5034 #endif
5035
5036         /* Enable inputs of parser neighbor blocks */
5037         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5038         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5039         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5040         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5041
5042         DP(NETIF_MSG_HW, "done\n");
5043
5044         return 0; /* OK */
5045 }
5046
5047 static void enable_blocks_attention(struct bnx2x *bp)
5048 {
5049         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5050         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5051         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5052         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5053         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5054         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5055         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5056         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5057         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5058 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5059 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5060         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5061         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5062         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5063 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5064 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5065         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5066         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5067         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5068         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5069 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5070 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5071         if (CHIP_REV_IS_FPGA(bp))
5072                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5073         else
5074                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5075         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5076         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5077         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5078 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5079 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5080         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5081         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5082 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5083         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5084 }
5085
5086
5087 static int bnx2x_init_common(struct bnx2x *bp)
5088 {
5089         u32 val, i;
5090
5091         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5092
5093         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5094         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5095
5096         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5097         if (CHIP_IS_E1H(bp))
5098                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5099
5100         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5101         msleep(30);
5102         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5103
5104         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5105         if (CHIP_IS_E1(bp)) {
5106                 /* enable HW interrupt from PXP on USDM overflow
5107                    bit 16 on INT_MASK_0 */
5108                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5109         }
5110
5111         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5112         bnx2x_init_pxp(bp);
5113
5114 #ifdef __BIG_ENDIAN
5115         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5116         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5117         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5118         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5119         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5120         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5121
5122 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5123         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5124         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5125         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5126         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5127 #endif
5128
5129 #ifndef BCM_ISCSI
5130                 /* set NIC mode */
5131                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5132 #endif
5133
5134         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5135 #ifdef BCM_ISCSI
5136         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5137         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5138         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5139 #endif
5140
5141         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5142                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5143
5144         /* let the HW do it's magic ... */
5145         msleep(100);
5146         /* finish PXP init */
5147         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5148         if (val != 1) {
5149                 BNX2X_ERR("PXP2 CFG failed\n");
5150                 return -EBUSY;
5151         }
5152         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5153         if (val != 1) {
5154                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5155                 return -EBUSY;
5156         }
5157
5158         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5159         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5160
5161         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5162
5163         /* clean the DMAE memory */
5164         bp->dmae_ready = 1;
5165         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5166
5167         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5168         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5169         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5170         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5171
5172         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5173         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5174         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5175         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5176
5177         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5178         /* soft reset pulse */
5179         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5180         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5181
5182 #ifdef BCM_ISCSI
5183         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5184 #endif
5185
5186         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5187         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5188         if (!CHIP_REV_IS_SLOW(bp)) {
5189                 /* enable hw interrupt from doorbell Q */
5190                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5191         }
5192
5193         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5194         if (CHIP_REV_IS_SLOW(bp)) {
5195                 /* fix for emulation and FPGA for no pause */
5196                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5197                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5198                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5199                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5200         }
5201
5202         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5203         if (CHIP_IS_E1H(bp))
5204                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5205
5206         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5207         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5208         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5209         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5210
5211         if (CHIP_IS_E1H(bp)) {
5212                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5213                                 STORM_INTMEM_SIZE_E1H/2);
5214                 bnx2x_init_fill(bp,
5215                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5216                                 0, STORM_INTMEM_SIZE_E1H/2);
5217                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5218                                 STORM_INTMEM_SIZE_E1H/2);
5219                 bnx2x_init_fill(bp,
5220                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5221                                 0, STORM_INTMEM_SIZE_E1H/2);
5222                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5223                                 STORM_INTMEM_SIZE_E1H/2);
5224                 bnx2x_init_fill(bp,
5225                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5226                                 0, STORM_INTMEM_SIZE_E1H/2);
5227                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5228                                 STORM_INTMEM_SIZE_E1H/2);
5229                 bnx2x_init_fill(bp,
5230                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5231                                 0, STORM_INTMEM_SIZE_E1H/2);
5232         } else { /* E1 */
5233                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5234                                 STORM_INTMEM_SIZE_E1);
5235                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5236                                 STORM_INTMEM_SIZE_E1);
5237                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5238                                 STORM_INTMEM_SIZE_E1);
5239                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5240                                 STORM_INTMEM_SIZE_E1);
5241         }
5242
5243         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5244         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5245         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5246         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5247
5248         /* sync semi rtc */
5249         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5250                0x80000000);
5251         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5252                0x80000000);
5253
5254         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5255         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5256         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5257
5258         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5259         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5260                 REG_WR(bp, i, 0xc0cac01a);
5261                 /* TODO: replace with something meaningful */
5262         }
5263         if (CHIP_IS_E1H(bp))
5264                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5265         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5266
5267         if (sizeof(union cdu_context) != 1024)
5268                 /* we currently assume that a context is 1024 bytes */
5269                 printk(KERN_ALERT PFX "please adjust the size of"
5270                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5271
5272         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5273         val = (4 << 24) + (0 << 12) + 1024;
5274         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5275         if (CHIP_IS_E1(bp)) {
5276                 /* !!! fix pxp client crdit until excel update */
5277                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5278                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5279         }
5280
5281         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5282         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5283
5284         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5285         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5286
5287         /* PXPCS COMMON comes here */
5288         /* Reset PCIE errors for debug */
5289         REG_WR(bp, 0x2814, 0xffffffff);
5290         REG_WR(bp, 0x3820, 0xffffffff);
5291
5292         /* EMAC0 COMMON comes here */
5293         /* EMAC1 COMMON comes here */
5294         /* DBU COMMON comes here */
5295         /* DBG COMMON comes here */
5296
5297         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5298         if (CHIP_IS_E1H(bp)) {
5299                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5300                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5301         }
5302
5303         if (CHIP_REV_IS_SLOW(bp))
5304                 msleep(200);
5305
5306         /* finish CFC init */
5307         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5308         if (val != 1) {
5309                 BNX2X_ERR("CFC LL_INIT failed\n");
5310                 return -EBUSY;
5311         }
5312         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5313         if (val != 1) {
5314                 BNX2X_ERR("CFC AC_INIT failed\n");
5315                 return -EBUSY;
5316         }
5317         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5318         if (val != 1) {
5319                 BNX2X_ERR("CFC CAM_INIT failed\n");
5320                 return -EBUSY;
5321         }
5322         REG_WR(bp, CFC_REG_DEBUG0, 0);
5323
5324         /* read NIG statistic
5325            to see if this is our first up since powerup */
5326         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5327         val = *bnx2x_sp(bp, wb_data[0]);
5328
5329         /* do internal memory self test */
5330         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5331                 BNX2X_ERR("internal mem self test failed\n");
5332                 return -EBUSY;
5333         }
5334
5335         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5336         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5337                 /* Fan failure is indicated by SPIO 5 */
5338                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5339                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5340
5341                 /* set to active low mode */
5342                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5343                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5344                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5345                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5346
5347                 /* enable interrupt to signal the IGU */
5348                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5349                 val |= (1 << MISC_REGISTERS_SPIO_5);
5350                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5351                 break;
5352
5353         default:
5354                 break;
5355         }
5356
5357         /* clear PXP2 attentions */
5358         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5359
5360         enable_blocks_attention(bp);
5361
5362         if (bp->flags & TPA_ENABLE_FLAG) {
5363                 struct tstorm_eth_tpa_exist tmp = {0};
5364
5365                 tmp.tpa_exist = 1;
5366
5367                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5368                        ((u32 *)&tmp)[0]);
5369                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5370                        ((u32 *)&tmp)[1]);
5371         }
5372
5373         return 0;
5374 }
5375
5376 static int bnx2x_init_port(struct bnx2x *bp)
5377 {
5378         int port = BP_PORT(bp);
5379         u32 val;
5380
5381         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5382
5383         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5384
5385         /* Port PXP comes here */
5386         /* Port PXP2 comes here */
5387 #ifdef BCM_ISCSI
5388         /* Port0  1
5389          * Port1  385 */
5390         i++;
5391         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5392         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5393         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5394         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5395
5396         /* Port0  2
5397          * Port1  386 */
5398         i++;
5399         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5400         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5401         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5402         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5403
5404         /* Port0  3
5405          * Port1  387 */
5406         i++;
5407         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5408         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5409         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5410         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5411 #endif
5412         /* Port CMs come here */
5413
5414         /* Port QM comes here */
5415 #ifdef BCM_ISCSI
5416         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5417         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5418
5419         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5420                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5421 #endif
5422         /* Port DQ comes here */
5423         /* Port BRB1 comes here */
5424         /* Port PRS comes here */
5425         /* Port TSDM comes here */
5426         /* Port CSDM comes here */
5427         /* Port USDM comes here */
5428         /* Port XSDM comes here */
5429         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5430                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5431         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5432                              port ? USEM_PORT1_END : USEM_PORT0_END);
5433         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5434                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5435         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5436                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5437         /* Port UPB comes here */
5438         /* Port XPB comes here */
5439
5440         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5441                              port ? PBF_PORT1_END : PBF_PORT0_END);
5442
5443         /* configure PBF to work without PAUSE mtu 9000 */
5444         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5445
5446         /* update threshold */
5447         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5448         /* update init credit */
5449         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5450
5451         /* probe changes */
5452         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5453         msleep(5);
5454         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5455
5456 #ifdef BCM_ISCSI
5457         /* tell the searcher where the T2 table is */
5458         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5459
5460         wb_write[0] = U64_LO(bp->t2_mapping);
5461         wb_write[1] = U64_HI(bp->t2_mapping);
5462         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5463         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5464         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5465         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5466
5467         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5468         /* Port SRCH comes here */
5469 #endif
5470         /* Port CDU comes here */
5471         /* Port CFC comes here */
5472
5473         if (CHIP_IS_E1(bp)) {
5474                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5475                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5476         }
5477         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5478                              port ? HC_PORT1_END : HC_PORT0_END);
5479
5480         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5481                                     MISC_AEU_PORT0_START,
5482                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5483         /* init aeu_mask_attn_func_0/1:
5484          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5485          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5486          *             bits 4-7 are used for "per vn group attention" */
5487         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5488                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5489
5490         /* Port PXPCS comes here */
5491         /* Port EMAC0 comes here */
5492         /* Port EMAC1 comes here */
5493         /* Port DBU comes here */
5494         /* Port DBG comes here */
5495         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5496                              port ? NIG_PORT1_END : NIG_PORT0_END);
5497
5498         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5499
5500         if (CHIP_IS_E1H(bp)) {
5501                 u32 wsum;
5502                 struct cmng_struct_per_port m_cmng_port;
5503                 int vn;
5504
5505                 /* 0x2 disable e1hov, 0x1 enable */
5506                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5507                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5508
5509                 /* Init RATE SHAPING and FAIRNESS contexts.
5510                    Initialize as if there is 10G link. */
5511                 wsum = bnx2x_calc_vn_wsum(bp);
5512                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5513                 if (IS_E1HMF(bp))
5514                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5515                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5516                                         wsum, 10000, &m_cmng_port);
5517         }
5518
5519         /* Port MCP comes here */
5520         /* Port DMAE comes here */
5521
5522         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5523         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5524                 /* add SPIO 5 to group 0 */
5525                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5526                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5527                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5528                 break;
5529
5530         default:
5531                 break;
5532         }
5533
5534         bnx2x__link_reset(bp);
5535
5536         return 0;
5537 }
5538
5539 #define ILT_PER_FUNC            (768/2)
5540 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5541 /* the phys address is shifted right 12 bits and has an added
5542    1=valid bit added to the 53rd bit
5543    then since this is a wide register(TM)
5544    we split it into two 32 bit writes
5545  */
5546 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5547 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5548 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5549 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5550
5551 #define CNIC_ILT_LINES          0
5552
5553 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5554 {
5555         int reg;
5556
5557         if (CHIP_IS_E1H(bp))
5558                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5559         else /* E1 */
5560                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5561
5562         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5563 }
5564
5565 static int bnx2x_init_func(struct bnx2x *bp)
5566 {
5567         int port = BP_PORT(bp);
5568         int func = BP_FUNC(bp);
5569         int i;
5570
5571         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5572
5573         i = FUNC_ILT_BASE(func);
5574
5575         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5576         if (CHIP_IS_E1H(bp)) {
5577                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5578                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5579         } else /* E1 */
5580                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5581                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5582
5583
5584         if (CHIP_IS_E1H(bp)) {
5585                 for (i = 0; i < 9; i++)
5586                         bnx2x_init_block(bp,
5587                                          cm_start[func][i], cm_end[func][i]);
5588
5589                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5590                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5591         }
5592
5593         /* HC init per function */
5594         if (CHIP_IS_E1H(bp)) {
5595                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5596
5597                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5598                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5599         }
5600         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5601
5602         if (CHIP_IS_E1H(bp))
5603                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5604
5605         /* Reset PCIE errors for debug */
5606         REG_WR(bp, 0x2114, 0xffffffff);
5607         REG_WR(bp, 0x2120, 0xffffffff);
5608
5609         return 0;
5610 }
5611
5612 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5613 {
5614         int i, rc = 0;
5615
5616         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5617            BP_FUNC(bp), load_code);
5618
5619         bp->dmae_ready = 0;
5620         mutex_init(&bp->dmae_mutex);
5621         bnx2x_gunzip_init(bp);
5622
5623         switch (load_code) {
5624         case FW_MSG_CODE_DRV_LOAD_COMMON:
5625                 rc = bnx2x_init_common(bp);
5626                 if (rc)
5627                         goto init_hw_err;
5628                 /* no break */
5629
5630         case FW_MSG_CODE_DRV_LOAD_PORT:
5631                 bp->dmae_ready = 1;
5632                 rc = bnx2x_init_port(bp);
5633                 if (rc)
5634                         goto init_hw_err;
5635                 /* no break */
5636
5637         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5638                 bp->dmae_ready = 1;
5639                 rc = bnx2x_init_func(bp);
5640                 if (rc)
5641                         goto init_hw_err;
5642                 break;
5643
5644         default:
5645                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5646                 break;
5647         }
5648
5649         if (!BP_NOMCP(bp)) {
5650                 int func = BP_FUNC(bp);
5651
5652                 bp->fw_drv_pulse_wr_seq =
5653                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5654                                  DRV_PULSE_SEQ_MASK);
5655                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5656                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5657                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5658         } else
5659                 bp->func_stx = 0;
5660
5661         /* this needs to be done before gunzip end */
5662         bnx2x_zero_def_sb(bp);
5663         for_each_queue(bp, i)
5664                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5665
5666 init_hw_err:
5667         bnx2x_gunzip_end(bp);
5668
5669         return rc;
5670 }
5671
5672 /* send the MCP a request, block until there is a reply */
5673 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5674 {
5675         int func = BP_FUNC(bp);
5676         u32 seq = ++bp->fw_seq;
5677         u32 rc = 0;
5678         u32 cnt = 1;
5679         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5680
5681         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5682         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5683
5684         do {
5685                 /* let the FW do it's magic ... */
5686                 msleep(delay);
5687
5688                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5689
5690                 /* Give the FW up to 2 second (200*10ms) */
5691         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5692
5693         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5694            cnt*delay, rc, seq);
5695
5696         /* is this a reply to our command? */
5697         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5698                 rc &= FW_MSG_CODE_MASK;
5699
5700         } else {
5701                 /* FW BUG! */
5702                 BNX2X_ERR("FW failed to respond!\n");
5703                 bnx2x_fw_dump(bp);
5704                 rc = 0;
5705         }
5706
5707         return rc;
5708 }
5709
5710 static void bnx2x_free_mem(struct bnx2x *bp)
5711 {
5712
5713 #define BNX2X_PCI_FREE(x, y, size) \
5714         do { \
5715                 if (x) { \
5716                         pci_free_consistent(bp->pdev, size, x, y); \
5717                         x = NULL; \
5718                         y = 0; \
5719                 } \
5720         } while (0)
5721
5722 #define BNX2X_FREE(x) \
5723         do { \
5724                 if (x) { \
5725                         vfree(x); \
5726                         x = NULL; \
5727                 } \
5728         } while (0)
5729
5730         int i;
5731
5732         /* fastpath */
5733         for_each_queue(bp, i) {
5734
5735                 /* Status blocks */
5736                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5737                                bnx2x_fp(bp, i, status_blk_mapping),
5738                                sizeof(struct host_status_block) +
5739                                sizeof(struct eth_tx_db_data));
5740
5741                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5742                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5743                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5744                                bnx2x_fp(bp, i, tx_desc_mapping),
5745                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5746
5747                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5748                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5749                                bnx2x_fp(bp, i, rx_desc_mapping),
5750                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5751
5752                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5753                                bnx2x_fp(bp, i, rx_comp_mapping),
5754                                sizeof(struct eth_fast_path_rx_cqe) *
5755                                NUM_RCQ_BD);
5756
5757                 /* SGE ring */
5758                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5759                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5760                                bnx2x_fp(bp, i, rx_sge_mapping),
5761                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5762         }
5763         /* end of fastpath */
5764
5765         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5766                        sizeof(struct host_def_status_block));
5767
5768         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5769                        sizeof(struct bnx2x_slowpath));
5770
5771 #ifdef BCM_ISCSI
5772         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5773         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5774         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5775         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5776 #endif
5777         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5778
5779 #undef BNX2X_PCI_FREE
5780 #undef BNX2X_KFREE
5781 }
5782
5783 static int bnx2x_alloc_mem(struct bnx2x *bp)
5784 {
5785
5786 #define BNX2X_PCI_ALLOC(x, y, size) \
5787         do { \
5788                 x = pci_alloc_consistent(bp->pdev, size, y); \
5789                 if (x == NULL) \
5790                         goto alloc_mem_err; \
5791                 memset(x, 0, size); \
5792         } while (0)
5793
5794 #define BNX2X_ALLOC(x, size) \
5795         do { \
5796                 x = vmalloc(size); \
5797                 if (x == NULL) \
5798                         goto alloc_mem_err; \
5799                 memset(x, 0, size); \
5800         } while (0)
5801
5802         int i;
5803
5804         /* fastpath */
5805         for_each_queue(bp, i) {
5806                 bnx2x_fp(bp, i, bp) = bp;
5807
5808                 /* Status blocks */
5809                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5810                                 &bnx2x_fp(bp, i, status_blk_mapping),
5811                                 sizeof(struct host_status_block) +
5812                                 sizeof(struct eth_tx_db_data));
5813
5814                 bnx2x_fp(bp, i, hw_tx_prods) =
5815                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5816
5817                 bnx2x_fp(bp, i, tx_prods_mapping) =
5818                                 bnx2x_fp(bp, i, status_blk_mapping) +
5819                                 sizeof(struct host_status_block);
5820
5821                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5822                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5823                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5824                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5825                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5826                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5827
5828                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5829                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5830                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5831                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5832                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5833
5834                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5835                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5836                                 sizeof(struct eth_fast_path_rx_cqe) *
5837                                 NUM_RCQ_BD);
5838
5839                 /* SGE ring */
5840                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5841                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5842                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5843                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5844                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5845         }
5846         /* end of fastpath */
5847
5848         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5849                         sizeof(struct host_def_status_block));
5850
5851         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5852                         sizeof(struct bnx2x_slowpath));
5853
5854 #ifdef BCM_ISCSI
5855         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5856
5857         /* Initialize T1 */
5858         for (i = 0; i < 64*1024; i += 64) {
5859                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5860                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5861         }
5862
5863         /* allocate searcher T2 table
5864            we allocate 1/4 of alloc num for T2
5865           (which is not entered into the ILT) */
5866         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5867
5868         /* Initialize T2 */
5869         for (i = 0; i < 16*1024; i += 64)
5870                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5871
5872         /* now fixup the last line in the block to point to the next block */
5873         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5874
5875         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5876         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5877
5878         /* QM queues (128*MAX_CONN) */
5879         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5880 #endif
5881
5882         /* Slow path ring */
5883         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5884
5885         return 0;
5886
5887 alloc_mem_err:
5888         bnx2x_free_mem(bp);
5889         return -ENOMEM;
5890
5891 #undef BNX2X_PCI_ALLOC
5892 #undef BNX2X_ALLOC
5893 }
5894
5895 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5896 {
5897         int i;
5898
5899         for_each_queue(bp, i) {
5900                 struct bnx2x_fastpath *fp = &bp->fp[i];
5901
5902                 u16 bd_cons = fp->tx_bd_cons;
5903                 u16 sw_prod = fp->tx_pkt_prod;
5904                 u16 sw_cons = fp->tx_pkt_cons;
5905
5906                 while (sw_cons != sw_prod) {
5907                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5908                         sw_cons++;
5909                 }
5910         }
5911 }
5912
5913 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5914 {
5915         int i, j;
5916
5917         for_each_queue(bp, j) {
5918                 struct bnx2x_fastpath *fp = &bp->fp[j];
5919
5920                 for (i = 0; i < NUM_RX_BD; i++) {
5921                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5922                         struct sk_buff *skb = rx_buf->skb;
5923
5924                         if (skb == NULL)
5925                                 continue;
5926
5927                         pci_unmap_single(bp->pdev,
5928                                          pci_unmap_addr(rx_buf, mapping),
5929                                          bp->rx_buf_use_size,
5930                                          PCI_DMA_FROMDEVICE);
5931
5932                         rx_buf->skb = NULL;
5933                         dev_kfree_skb(skb);
5934                 }
5935                 if (!fp->disable_tpa)
5936                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5937                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5938                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5939         }
5940 }
5941
5942 static void bnx2x_free_skbs(struct bnx2x *bp)
5943 {
5944         bnx2x_free_tx_skbs(bp);
5945         bnx2x_free_rx_skbs(bp);
5946 }
5947
5948 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5949 {
5950         int i, offset = 1;
5951
5952         free_irq(bp->msix_table[0].vector, bp->dev);
5953         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5954            bp->msix_table[0].vector);
5955
5956         for_each_queue(bp, i) {
5957                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
5958                    "state %x\n", i, bp->msix_table[i + offset].vector,
5959                    bnx2x_fp(bp, i, state));
5960
5961                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5962                         BNX2X_ERR("IRQ of fp #%d being freed while "
5963                                   "state != closed\n", i);
5964
5965                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5966         }
5967 }
5968
5969 static void bnx2x_free_irq(struct bnx2x *bp)
5970 {
5971         if (bp->flags & USING_MSIX_FLAG) {
5972                 bnx2x_free_msix_irqs(bp);
5973                 pci_disable_msix(bp->pdev);
5974                 bp->flags &= ~USING_MSIX_FLAG;
5975
5976         } else
5977                 free_irq(bp->pdev->irq, bp->dev);
5978 }
5979
5980 static int bnx2x_enable_msix(struct bnx2x *bp)
5981 {
5982         int i, rc, offset;
5983
5984         bp->msix_table[0].entry = 0;
5985         offset = 1;
5986         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5987
5988         for_each_queue(bp, i) {
5989                 int igu_vec = offset + i + BP_L_ID(bp);
5990
5991                 bp->msix_table[i + offset].entry = igu_vec;
5992                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5993                    "(fastpath #%u)\n", i + offset, igu_vec, i);
5994         }
5995
5996         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5997                              bp->num_queues + offset);
5998         if (rc) {
5999                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6000                 return -1;
6001         }
6002         bp->flags |= USING_MSIX_FLAG;
6003
6004         return 0;
6005 }
6006
6007 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6008 {
6009         int i, rc, offset = 1;
6010
6011         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6012                          bp->dev->name, bp->dev);
6013         if (rc) {
6014                 BNX2X_ERR("request sp irq failed\n");
6015                 return -EBUSY;
6016         }
6017
6018         for_each_queue(bp, i) {
6019                 rc = request_irq(bp->msix_table[i + offset].vector,
6020                                  bnx2x_msix_fp_int, 0,
6021                                  bp->dev->name, &bp->fp[i]);
6022                 if (rc) {
6023                         BNX2X_ERR("request fp #%d irq failed  rc %d\n",
6024                                   i + offset, rc);
6025                         bnx2x_free_msix_irqs(bp);
6026                         return -EBUSY;
6027                 }
6028
6029                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6030         }
6031
6032         return 0;
6033 }
6034
6035 static int bnx2x_req_irq(struct bnx2x *bp)
6036 {
6037         int rc;
6038
6039         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6040                          bp->dev->name, bp->dev);
6041         if (!rc)
6042                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6043
6044         return rc;
6045 }
6046
6047 /*
6048  * Init service functions
6049  */
6050
6051 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6052 {
6053         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6054         int port = BP_PORT(bp);
6055
6056         /* CAM allocation
6057          * unicasts 0-31:port0 32-63:port1
6058          * multicast 64-127:port0 128-191:port1
6059          */
6060         config->hdr.length_6b = 2;
6061         config->hdr.offset = port ? 31 : 0;
6062         config->hdr.client_id = BP_CL_ID(bp);
6063         config->hdr.reserved1 = 0;
6064
6065         /* primary MAC */
6066         config->config_table[0].cam_entry.msb_mac_addr =
6067                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6068         config->config_table[0].cam_entry.middle_mac_addr =
6069                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6070         config->config_table[0].cam_entry.lsb_mac_addr =
6071                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6072         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6073         if (set)
6074                 config->config_table[0].target_table_entry.flags = 0;
6075         else
6076                 CAM_INVALIDATE(config->config_table[0]);
6077         config->config_table[0].target_table_entry.client_id = 0;
6078         config->config_table[0].target_table_entry.vlan_id = 0;
6079
6080         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6081            (set ? "setting" : "clearing"),
6082            config->config_table[0].cam_entry.msb_mac_addr,
6083            config->config_table[0].cam_entry.middle_mac_addr,
6084            config->config_table[0].cam_entry.lsb_mac_addr);
6085
6086         /* broadcast */
6087         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6088         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6089         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6090         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6091         if (set)
6092                 config->config_table[1].target_table_entry.flags =
6093                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6094         else
6095                 CAM_INVALIDATE(config->config_table[1]);
6096         config->config_table[1].target_table_entry.client_id = 0;
6097         config->config_table[1].target_table_entry.vlan_id = 0;
6098
6099         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6100                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6101                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6102 }
6103
6104 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6105 {
6106         struct mac_configuration_cmd_e1h *config =
6107                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6108
6109         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6110                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6111                 return;
6112         }
6113
6114         /* CAM allocation for E1H
6115          * unicasts: by func number
6116          * multicast: 20+FUNC*20, 20 each
6117          */
6118         config->hdr.length_6b = 1;
6119         config->hdr.offset = BP_FUNC(bp);
6120         config->hdr.client_id = BP_CL_ID(bp);
6121         config->hdr.reserved1 = 0;
6122
6123         /* primary MAC */
6124         config->config_table[0].msb_mac_addr =
6125                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6126         config->config_table[0].middle_mac_addr =
6127                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6128         config->config_table[0].lsb_mac_addr =
6129                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6130         config->config_table[0].client_id = BP_L_ID(bp);
6131         config->config_table[0].vlan_id = 0;
6132         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6133         if (set)
6134                 config->config_table[0].flags = BP_PORT(bp);
6135         else
6136                 config->config_table[0].flags =
6137                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6138
6139         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6140            (set ? "setting" : "clearing"),
6141            config->config_table[0].msb_mac_addr,
6142            config->config_table[0].middle_mac_addr,
6143            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6144
6145         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6146                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6147                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6148 }
6149
6150 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6151                              int *state_p, int poll)
6152 {
6153         /* can take a while if any port is running */
6154         int cnt = 500;
6155
6156         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6157            poll ? "polling" : "waiting", state, idx);
6158
6159         might_sleep();
6160         while (cnt--) {
6161                 if (poll) {
6162                         bnx2x_rx_int(bp->fp, 10);
6163                         /* if index is different from 0
6164                          * the reply for some commands will
6165                          * be on the non default queue
6166                          */
6167                         if (idx)
6168                                 bnx2x_rx_int(&bp->fp[idx], 10);
6169                 }
6170
6171                 mb(); /* state is changed by bnx2x_sp_event() */
6172                 if (*state_p == state)
6173                         return 0;
6174
6175                 msleep(1);
6176         }
6177
6178         /* timeout! */
6179         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6180                   poll ? "polling" : "waiting", state, idx);
6181 #ifdef BNX2X_STOP_ON_ERROR
6182         bnx2x_panic();
6183 #endif
6184
6185         return -EBUSY;
6186 }
6187
6188 static int bnx2x_setup_leading(struct bnx2x *bp)
6189 {
6190         int rc;
6191
6192         /* reset IGU state */
6193         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6194
6195         /* SETUP ramrod */
6196         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6197
6198         /* Wait for completion */
6199         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6200
6201         return rc;
6202 }
6203
6204 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6205 {
6206         /* reset IGU state */
6207         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6208
6209         /* SETUP ramrod */
6210         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6211         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6212
6213         /* Wait for completion */
6214         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6215                                  &(bp->fp[index].state), 0);
6216 }
6217
6218 static int bnx2x_poll(struct napi_struct *napi, int budget);
6219 static void bnx2x_set_rx_mode(struct net_device *dev);
6220
6221 /* must be called with rtnl_lock */
6222 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6223 {
6224         u32 load_code;
6225         int i, rc;
6226
6227 #ifdef BNX2X_STOP_ON_ERROR
6228         if (unlikely(bp->panic))
6229                 return -EPERM;
6230 #endif
6231
6232         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6233
6234         /* Send LOAD_REQUEST command to MCP
6235            Returns the type of LOAD command:
6236            if it is the first port to be initialized
6237            common blocks should be initialized, otherwise - not
6238         */
6239         if (!BP_NOMCP(bp)) {
6240                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6241                 if (!load_code) {
6242                         BNX2X_ERR("MCP response failure, aborting\n");
6243                         return -EBUSY;
6244                 }
6245                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6246                         return -EBUSY; /* other port in diagnostic mode */
6247
6248         } else {
6249                 int port = BP_PORT(bp);
6250
6251                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6252                    load_count[0], load_count[1], load_count[2]);
6253                 load_count[0]++;
6254                 load_count[1 + port]++;
6255                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6256                    load_count[0], load_count[1], load_count[2]);
6257                 if (load_count[0] == 1)
6258                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6259                 else if (load_count[1 + port] == 1)
6260                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6261                 else
6262                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6263         }
6264
6265         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6266             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6267                 bp->port.pmf = 1;
6268         else
6269                 bp->port.pmf = 0;
6270         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6271
6272         /* if we can't use MSI-X we only need one fp,
6273          * so try to enable MSI-X with the requested number of fp's
6274          * and fallback to inta with one fp
6275          */
6276         if (use_inta) {
6277                 bp->num_queues = 1;
6278
6279         } else {
6280                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6281                         /* user requested number */
6282                         bp->num_queues = use_multi;
6283
6284                 else if (use_multi)
6285                         bp->num_queues = min_t(u32, num_online_cpus(),
6286                                                BP_MAX_QUEUES(bp));
6287                 else
6288                         bp->num_queues = 1;
6289
6290                 if (bnx2x_enable_msix(bp)) {
6291                         /* failed to enable MSI-X */
6292                         bp->num_queues = 1;
6293                         if (use_multi)
6294                                 BNX2X_ERR("Multi requested but failed"
6295                                           " to enable MSI-X\n");
6296                 }
6297         }
6298         DP(NETIF_MSG_IFUP,
6299            "set number of queues to %d\n", bp->num_queues);
6300
6301         if (bnx2x_alloc_mem(bp))
6302                 return -ENOMEM;
6303
6304         for_each_queue(bp, i)
6305                 bnx2x_fp(bp, i, disable_tpa) =
6306                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6307
6308         if (bp->flags & USING_MSIX_FLAG) {
6309                 rc = bnx2x_req_msix_irqs(bp);
6310                 if (rc) {
6311                         pci_disable_msix(bp->pdev);
6312                         goto load_error;
6313                 }
6314         } else {
6315                 bnx2x_ack_int(bp);
6316                 rc = bnx2x_req_irq(bp);
6317                 if (rc) {
6318                         BNX2X_ERR("IRQ request failed, aborting\n");
6319                         goto load_error;
6320                 }
6321         }
6322
6323         for_each_queue(bp, i)
6324                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6325                                bnx2x_poll, 128);
6326
6327         /* Initialize HW */
6328         rc = bnx2x_init_hw(bp, load_code);
6329         if (rc) {
6330                 BNX2X_ERR("HW init failed, aborting\n");
6331                 goto load_error;
6332         }
6333
6334         /* Setup NIC internals and enable interrupts */
6335         bnx2x_nic_init(bp, load_code);
6336
6337         /* Send LOAD_DONE command to MCP */
6338         if (!BP_NOMCP(bp)) {
6339                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6340                 if (!load_code) {
6341                         BNX2X_ERR("MCP response failure, aborting\n");
6342                         rc = -EBUSY;
6343                         goto load_int_disable;
6344                 }
6345         }
6346
6347         bnx2x_stats_init(bp);
6348
6349         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6350
6351         /* Enable Rx interrupt handling before sending the ramrod
6352            as it's completed on Rx FP queue */
6353         for_each_queue(bp, i)
6354                 napi_enable(&bnx2x_fp(bp, i, napi));
6355
6356         /* Enable interrupt handling */
6357         atomic_set(&bp->intr_sem, 0);
6358
6359         rc = bnx2x_setup_leading(bp);
6360         if (rc) {
6361                 BNX2X_ERR("Setup leading failed!\n");
6362                 goto load_stop_netif;
6363         }
6364
6365         if (CHIP_IS_E1H(bp))
6366                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6367                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6368                         bp->state = BNX2X_STATE_DISABLED;
6369                 }
6370
6371         if (bp->state == BNX2X_STATE_OPEN)
6372                 for_each_nondefault_queue(bp, i) {
6373                         rc = bnx2x_setup_multi(bp, i);
6374                         if (rc)
6375                                 goto load_stop_netif;
6376                 }
6377
6378         if (CHIP_IS_E1(bp))
6379                 bnx2x_set_mac_addr_e1(bp, 1);
6380         else
6381                 bnx2x_set_mac_addr_e1h(bp, 1);
6382
6383         if (bp->port.pmf)
6384                 bnx2x_initial_phy_init(bp);
6385
6386         /* Start fast path */
6387         switch (load_mode) {
6388         case LOAD_NORMAL:
6389                 /* Tx queue should be only reenabled */
6390                 netif_wake_queue(bp->dev);
6391                 bnx2x_set_rx_mode(bp->dev);
6392                 break;
6393
6394         case LOAD_OPEN:
6395                 netif_start_queue(bp->dev);
6396                 bnx2x_set_rx_mode(bp->dev);
6397                 if (bp->flags & USING_MSIX_FLAG)
6398                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6399                                bp->dev->name);
6400                 break;
6401
6402         case LOAD_DIAG:
6403                 bnx2x_set_rx_mode(bp->dev);
6404                 bp->state = BNX2X_STATE_DIAG;
6405                 break;
6406
6407         default:
6408                 break;
6409         }
6410
6411         if (!bp->port.pmf)
6412                 bnx2x__link_status_update(bp);
6413
6414         /* start the timer */
6415         mod_timer(&bp->timer, jiffies + bp->current_interval);
6416
6417
6418         return 0;
6419
6420 load_stop_netif:
6421         for_each_queue(bp, i)
6422                 napi_disable(&bnx2x_fp(bp, i, napi));
6423
6424 load_int_disable:
6425         bnx2x_int_disable_sync(bp);
6426
6427         /* Release IRQs */
6428         bnx2x_free_irq(bp);
6429
6430         /* Free SKBs, SGEs, TPA pool and driver internals */
6431         bnx2x_free_skbs(bp);
6432         for_each_queue(bp, i)
6433                 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6434                                         RX_SGE_CNT*NUM_RX_SGE_PAGES);
6435 load_error:
6436         bnx2x_free_mem(bp);
6437
6438         /* TBD we really need to reset the chip
6439            if we want to recover from this */
6440         return rc;
6441 }
6442
6443 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6444 {
6445         int rc;
6446
6447         /* halt the connection */
6448         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6449         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6450
6451         /* Wait for completion */
6452         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6453                                &(bp->fp[index].state), 1);
6454         if (rc) /* timeout */
6455                 return rc;
6456
6457         /* delete cfc entry */
6458         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6459
6460         /* Wait for completion */
6461         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6462                                &(bp->fp[index].state), 1);
6463         return rc;
6464 }
6465
6466 static int bnx2x_stop_leading(struct bnx2x *bp)
6467 {
6468         u16 dsb_sp_prod_idx;
6469         /* if the other port is handling traffic,
6470            this can take a lot of time */
6471         int cnt = 500;
6472         int rc;
6473
6474         might_sleep();
6475
6476         /* Send HALT ramrod */
6477         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6478         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6479
6480         /* Wait for completion */
6481         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6482                                &(bp->fp[0].state), 1);
6483         if (rc) /* timeout */
6484                 return rc;
6485
6486         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6487
6488         /* Send PORT_DELETE ramrod */
6489         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6490
6491         /* Wait for completion to arrive on default status block
6492            we are going to reset the chip anyway
6493            so there is not much to do if this times out
6494          */
6495         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6496                 if (!cnt) {
6497                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6498                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6499                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6500 #ifdef BNX2X_STOP_ON_ERROR
6501                         bnx2x_panic();
6502 #else
6503                         rc = -EBUSY;
6504 #endif
6505                         break;
6506                 }
6507                 cnt--;
6508                 msleep(1);
6509         }
6510         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6511         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6512
6513         return rc;
6514 }
6515
6516 static void bnx2x_reset_func(struct bnx2x *bp)
6517 {
6518         int port = BP_PORT(bp);
6519         int func = BP_FUNC(bp);
6520         int base, i;
6521
6522         /* Configure IGU */
6523         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6524         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6525
6526         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6527
6528         /* Clear ILT */
6529         base = FUNC_ILT_BASE(func);
6530         for (i = base; i < base + ILT_PER_FUNC; i++)
6531                 bnx2x_ilt_wr(bp, i, 0);
6532 }
6533
6534 static void bnx2x_reset_port(struct bnx2x *bp)
6535 {
6536         int port = BP_PORT(bp);
6537         u32 val;
6538
6539         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6540
6541         /* Do not rcv packets to BRB */
6542         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6543         /* Do not direct rcv packets that are not for MCP to the BRB */
6544         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6545                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6546
6547         /* Configure AEU */
6548         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6549
6550         msleep(100);
6551         /* Check for BRB port occupancy */
6552         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6553         if (val)
6554                 DP(NETIF_MSG_IFDOWN,
6555                    "BRB1 is not empty  %d blooks are occupied\n", val);
6556
6557         /* TODO: Close Doorbell port? */
6558 }
6559
6560 static void bnx2x_reset_common(struct bnx2x *bp)
6561 {
6562         /* reset_common */
6563         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6564                0xd3ffff7f);
6565         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6566 }
6567
6568 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6569 {
6570         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6571            BP_FUNC(bp), reset_code);
6572
6573         switch (reset_code) {
6574         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6575                 bnx2x_reset_port(bp);
6576                 bnx2x_reset_func(bp);
6577                 bnx2x_reset_common(bp);
6578                 break;
6579
6580         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6581                 bnx2x_reset_port(bp);
6582                 bnx2x_reset_func(bp);
6583                 break;
6584
6585         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6586                 bnx2x_reset_func(bp);
6587                 break;
6588
6589         default:
6590                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6591                 break;
6592         }
6593 }
6594
6595 /* msut be called with rtnl_lock */
6596 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6597 {
6598         int port = BP_PORT(bp);
6599         u32 reset_code = 0;
6600         int i, cnt, rc;
6601
6602         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6603
6604         bp->rx_mode = BNX2X_RX_MODE_NONE;
6605         bnx2x_set_storm_rx_mode(bp);
6606
6607         if (netif_running(bp->dev)) {
6608                 netif_tx_disable(bp->dev);
6609                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6610         }
6611
6612         del_timer_sync(&bp->timer);
6613         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6614                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6615         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6616
6617         /* Wait until tx fast path tasks complete */
6618         for_each_queue(bp, i) {
6619                 struct bnx2x_fastpath *fp = &bp->fp[i];
6620
6621                 cnt = 1000;
6622                 smp_rmb();
6623                 while (BNX2X_HAS_TX_WORK(fp)) {
6624
6625                         if (!netif_running(bp->dev))
6626                                 bnx2x_tx_int(fp, 1000);
6627
6628                         if (!cnt) {
6629                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6630                                           i);
6631 #ifdef BNX2X_STOP_ON_ERROR
6632                                 bnx2x_panic();
6633                                 return -EBUSY;
6634 #else
6635                                 break;
6636 #endif
6637                         }
6638                         cnt--;
6639                         msleep(1);
6640                         smp_rmb();
6641                 }
6642         }
6643
6644         /* Give HW time to discard old tx messages */
6645         msleep(1);
6646
6647         for_each_queue(bp, i)
6648                 napi_disable(&bnx2x_fp(bp, i, napi));
6649         /* Disable interrupts after Tx and Rx are disabled on stack level */
6650         bnx2x_int_disable_sync(bp);
6651
6652         /* Release IRQs */
6653         bnx2x_free_irq(bp);
6654
6655         if (unload_mode == UNLOAD_NORMAL)
6656                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6657
6658         else if (bp->flags & NO_WOL_FLAG) {
6659                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6660                 if (CHIP_IS_E1H(bp))
6661                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6662
6663         } else if (bp->wol) {
6664                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6665                 u8 *mac_addr = bp->dev->dev_addr;
6666                 u32 val;
6667                 /* The mac address is written to entries 1-4 to
6668                    preserve entry 0 which is used by the PMF */
6669                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6670
6671                 val = (mac_addr[0] << 8) | mac_addr[1];
6672                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
6673
6674                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6675                       (mac_addr[4] << 8) | mac_addr[5];
6676                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6677
6678                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6679
6680         } else
6681                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6682
6683         if (CHIP_IS_E1(bp)) {
6684                 struct mac_configuration_cmd *config =
6685                                                 bnx2x_sp(bp, mcast_config);
6686
6687                 bnx2x_set_mac_addr_e1(bp, 0);
6688
6689                 for (i = 0; i < config->hdr.length_6b; i++)
6690                         CAM_INVALIDATE(config->config_table[i]);
6691
6692                 config->hdr.length_6b = i;
6693                 if (CHIP_REV_IS_SLOW(bp))
6694                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6695                 else
6696                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6697                 config->hdr.client_id = BP_CL_ID(bp);
6698                 config->hdr.reserved1 = 0;
6699
6700                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6701                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6702                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6703
6704         } else { /* E1H */
6705                 bnx2x_set_mac_addr_e1h(bp, 0);
6706
6707                 for (i = 0; i < MC_HASH_SIZE; i++)
6708                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6709         }
6710
6711         if (CHIP_IS_E1H(bp))
6712                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6713
6714         /* Close multi and leading connections
6715            Completions for ramrods are collected in a synchronous way */
6716         for_each_nondefault_queue(bp, i)
6717                 if (bnx2x_stop_multi(bp, i))
6718                         goto unload_error;
6719
6720         rc = bnx2x_stop_leading(bp);
6721         if (rc) {
6722                 BNX2X_ERR("Stop leading failed!\n");
6723 #ifdef BNX2X_STOP_ON_ERROR
6724                 return -EBUSY;
6725 #else
6726                 goto unload_error;
6727 #endif
6728         }
6729
6730 unload_error:
6731         if (!BP_NOMCP(bp))
6732                 reset_code = bnx2x_fw_command(bp, reset_code);
6733         else {
6734                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6735                    load_count[0], load_count[1], load_count[2]);
6736                 load_count[0]--;
6737                 load_count[1 + port]--;
6738                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6739                    load_count[0], load_count[1], load_count[2]);
6740                 if (load_count[0] == 0)
6741                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6742                 else if (load_count[1 + port] == 0)
6743                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6744                 else
6745                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6746         }
6747
6748         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6749             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6750                 bnx2x__link_reset(bp);
6751
6752         /* Reset the chip */
6753         bnx2x_reset_chip(bp, reset_code);
6754
6755         /* Report UNLOAD_DONE to MCP */
6756         if (!BP_NOMCP(bp))
6757                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6758
6759         /* Free SKBs, SGEs, TPA pool and driver internals */
6760         bnx2x_free_skbs(bp);
6761         for_each_queue(bp, i)
6762                 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6763                                         RX_SGE_CNT*NUM_RX_SGE_PAGES);
6764         bnx2x_free_mem(bp);
6765
6766         bp->state = BNX2X_STATE_CLOSED;
6767
6768         netif_carrier_off(bp->dev);
6769
6770         return 0;
6771 }
6772
6773 static void bnx2x_reset_task(struct work_struct *work)
6774 {
6775         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6776
6777 #ifdef BNX2X_STOP_ON_ERROR
6778         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6779                   " so reset not done to allow debug dump,\n"
6780          KERN_ERR " you will need to reboot when done\n");
6781         return;
6782 #endif
6783
6784         rtnl_lock();
6785
6786         if (!netif_running(bp->dev))
6787                 goto reset_task_exit;
6788
6789         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6790         bnx2x_nic_load(bp, LOAD_NORMAL);
6791
6792 reset_task_exit:
6793         rtnl_unlock();
6794 }
6795
6796 /* end of nic load/unload */
6797
6798 /* ethtool_ops */
6799
6800 /*
6801  * Init service functions
6802  */
6803
6804 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6805 {
6806         u32 val;
6807
6808         /* Check if there is any driver already loaded */
6809         val = REG_RD(bp, MISC_REG_UNPREPARED);
6810         if (val == 0x1) {
6811                 /* Check if it is the UNDI driver
6812                  * UNDI driver initializes CID offset for normal bell to 0x7
6813                  */
6814                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6815                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6816                 if (val == 0x7) {
6817                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6818                         /* save our func */
6819                         int func = BP_FUNC(bp);
6820                         u32 swap_en;
6821                         u32 swap_val;
6822
6823                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6824
6825                         /* try unload UNDI on port 0 */
6826                         bp->func = 0;
6827                         bp->fw_seq =
6828                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6829                                 DRV_MSG_SEQ_NUMBER_MASK);
6830                         reset_code = bnx2x_fw_command(bp, reset_code);
6831
6832                         /* if UNDI is loaded on the other port */
6833                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6834
6835                                 /* send "DONE" for previous unload */
6836                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6837
6838                                 /* unload UNDI on port 1 */
6839                                 bp->func = 1;
6840                                 bp->fw_seq =
6841                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6842                                         DRV_MSG_SEQ_NUMBER_MASK);
6843                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6844
6845                                 bnx2x_fw_command(bp, reset_code);
6846                         }
6847
6848                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6849                                     HC_REG_CONFIG_0), 0x1000);
6850
6851                         /* close input traffic and wait for it */
6852                         /* Do not rcv packets to BRB */
6853                         REG_WR(bp,
6854                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6855                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6856                         /* Do not direct rcv packets that are not for MCP to
6857                          * the BRB */
6858                         REG_WR(bp,
6859                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6860                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6861                         /* clear AEU */
6862                         REG_WR(bp,
6863                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6864                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6865                         msleep(10);
6866
6867                         /* save NIG port swap info */
6868                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6869                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6870                         /* reset device */
6871                         REG_WR(bp,
6872                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6873                                0xd3ffffff);
6874                         REG_WR(bp,
6875                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6876                                0x1403);
6877                         /* take the NIG out of reset and restore swap values */
6878                         REG_WR(bp,
6879                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6880                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6881                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6882                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6883
6884                         /* send unload done to the MCP */
6885                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6886
6887                         /* restore our func and fw_seq */
6888                         bp->func = func;
6889                         bp->fw_seq =
6890                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6891                                 DRV_MSG_SEQ_NUMBER_MASK);
6892                 }
6893                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6894         }
6895 }
6896
6897 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6898 {
6899         u32 val, val2, val3, val4, id;
6900
6901         /* Get the chip revision id and number. */
6902         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6903         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6904         id = ((val & 0xffff) << 16);
6905         val = REG_RD(bp, MISC_REG_CHIP_REV);
6906         id |= ((val & 0xf) << 12);
6907         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6908         id |= ((val & 0xff) << 4);
6909         REG_RD(bp, MISC_REG_BOND_ID);
6910         id |= (val & 0xf);
6911         bp->common.chip_id = id;
6912         bp->link_params.chip_id = bp->common.chip_id;
6913         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6914
6915         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6916         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6917                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6918         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6919                        bp->common.flash_size, bp->common.flash_size);
6920
6921         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6922         bp->link_params.shmem_base = bp->common.shmem_base;
6923         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6924
6925         if (!bp->common.shmem_base ||
6926             (bp->common.shmem_base < 0xA0000) ||
6927             (bp->common.shmem_base >= 0xC0000)) {
6928                 BNX2X_DEV_INFO("MCP not active\n");
6929                 bp->flags |= NO_MCP_FLAG;
6930                 return;
6931         }
6932
6933         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6934         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6935                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6936                 BNX2X_ERR("BAD MCP validity signature\n");
6937
6938         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6939         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6940
6941         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
6942                        bp->common.hw_config, bp->common.board);
6943
6944         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6945                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6946                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6947
6948         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6949         bp->common.bc_ver = val;
6950         BNX2X_DEV_INFO("bc_ver %X\n", val);
6951         if (val < BNX2X_BC_VER) {
6952                 /* for now only warn
6953                  * later we might need to enforce this */
6954                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6955                           " please upgrade BC\n", BNX2X_BC_VER, val);
6956         }
6957         BNX2X_DEV_INFO("%sWoL Capable\n",
6958                        (bp->flags & NO_WOL_FLAG)? "Not " : "");
6959
6960         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6961         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6962         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6963         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6964
6965         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6966                val, val2, val3, val4);
6967 }
6968
6969 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6970                                                     u32 switch_cfg)
6971 {
6972         int port = BP_PORT(bp);
6973         u32 ext_phy_type;
6974
6975         switch (switch_cfg) {
6976         case SWITCH_CFG_1G:
6977                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6978
6979                 ext_phy_type =
6980                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6981                 switch (ext_phy_type) {
6982                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6983                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6984                                        ext_phy_type);
6985
6986                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6987                                                SUPPORTED_10baseT_Full |
6988                                                SUPPORTED_100baseT_Half |
6989                                                SUPPORTED_100baseT_Full |
6990                                                SUPPORTED_1000baseT_Full |
6991                                                SUPPORTED_2500baseX_Full |
6992                                                SUPPORTED_TP |
6993                                                SUPPORTED_FIBRE |
6994                                                SUPPORTED_Autoneg |
6995                                                SUPPORTED_Pause |
6996                                                SUPPORTED_Asym_Pause);
6997                         break;
6998
6999                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7000                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7001                                        ext_phy_type);
7002
7003                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7004                                                SUPPORTED_10baseT_Full |
7005                                                SUPPORTED_100baseT_Half |
7006                                                SUPPORTED_100baseT_Full |
7007                                                SUPPORTED_1000baseT_Full |
7008                                                SUPPORTED_TP |
7009                                                SUPPORTED_FIBRE |
7010                                                SUPPORTED_Autoneg |
7011                                                SUPPORTED_Pause |
7012                                                SUPPORTED_Asym_Pause);
7013                         break;
7014
7015                 default:
7016                         BNX2X_ERR("NVRAM config error. "
7017                                   "BAD SerDes ext_phy_config 0x%x\n",
7018                                   bp->link_params.ext_phy_config);
7019                         return;
7020                 }
7021
7022                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7023                                            port*0x10);
7024                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7025                 break;
7026
7027         case SWITCH_CFG_10G:
7028                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7029
7030                 ext_phy_type =
7031                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7032                 switch (ext_phy_type) {
7033                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7034                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7035                                        ext_phy_type);
7036
7037                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7038                                                SUPPORTED_10baseT_Full |
7039                                                SUPPORTED_100baseT_Half |
7040                                                SUPPORTED_100baseT_Full |
7041                                                SUPPORTED_1000baseT_Full |
7042                                                SUPPORTED_2500baseX_Full |
7043                                                SUPPORTED_10000baseT_Full |
7044                                                SUPPORTED_TP |
7045                                                SUPPORTED_FIBRE |
7046                                                SUPPORTED_Autoneg |
7047                                                SUPPORTED_Pause |
7048                                                SUPPORTED_Asym_Pause);
7049                         break;
7050
7051                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7052                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7053                                        ext_phy_type);
7054
7055                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7056                                                SUPPORTED_FIBRE |
7057                                                SUPPORTED_Pause |
7058                                                SUPPORTED_Asym_Pause);
7059                         break;
7060
7061                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7062                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7063                                        ext_phy_type);
7064
7065                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7066                                                SUPPORTED_1000baseT_Full |
7067                                                SUPPORTED_FIBRE |
7068                                                SUPPORTED_Pause |
7069                                                SUPPORTED_Asym_Pause);
7070                         break;
7071
7072                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7073                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7074                                        ext_phy_type);
7075
7076                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7077                                                SUPPORTED_1000baseT_Full |
7078                                                SUPPORTED_FIBRE |
7079                                                SUPPORTED_Autoneg |
7080                                                SUPPORTED_Pause |
7081                                                SUPPORTED_Asym_Pause);
7082                         break;
7083
7084                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7085                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7086                                        ext_phy_type);
7087
7088                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7089                                                SUPPORTED_2500baseX_Full |
7090                                                SUPPORTED_1000baseT_Full |
7091                                                SUPPORTED_FIBRE |
7092                                                SUPPORTED_Autoneg |
7093                                                SUPPORTED_Pause |
7094                                                SUPPORTED_Asym_Pause);
7095                         break;
7096
7097                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7098                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7099                                        ext_phy_type);
7100
7101                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7102                                                SUPPORTED_TP |
7103                                                SUPPORTED_Autoneg |
7104                                                SUPPORTED_Pause |
7105                                                SUPPORTED_Asym_Pause);
7106                         break;
7107
7108                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7109                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7110                                   bp->link_params.ext_phy_config);
7111                         break;
7112
7113                 default:
7114                         BNX2X_ERR("NVRAM config error. "
7115                                   "BAD XGXS ext_phy_config 0x%x\n",
7116                                   bp->link_params.ext_phy_config);
7117                         return;
7118                 }
7119
7120                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7121                                            port*0x18);
7122                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7123
7124                 break;
7125
7126         default:
7127                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7128                           bp->port.link_config);
7129                 return;
7130         }
7131         bp->link_params.phy_addr = bp->port.phy_addr;
7132
7133         /* mask what we support according to speed_cap_mask */
7134         if (!(bp->link_params.speed_cap_mask &
7135                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7136                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7137
7138         if (!(bp->link_params.speed_cap_mask &
7139                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7140                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7141
7142         if (!(bp->link_params.speed_cap_mask &
7143                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7144                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7145
7146         if (!(bp->link_params.speed_cap_mask &
7147                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7148                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7149
7150         if (!(bp->link_params.speed_cap_mask &
7151                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7152                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7153                                         SUPPORTED_1000baseT_Full);
7154
7155         if (!(bp->link_params.speed_cap_mask &
7156                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7157                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7158
7159         if (!(bp->link_params.speed_cap_mask &
7160                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7161                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7162
7163         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7164 }
7165
7166 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7167 {
7168         bp->link_params.req_duplex = DUPLEX_FULL;
7169
7170         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7171         case PORT_FEATURE_LINK_SPEED_AUTO:
7172                 if (bp->port.supported & SUPPORTED_Autoneg) {
7173                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7174                         bp->port.advertising = bp->port.supported;
7175                 } else {
7176                         u32 ext_phy_type =
7177                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7178
7179                         if ((ext_phy_type ==
7180                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7181                             (ext_phy_type ==
7182                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7183                                 /* force 10G, no AN */
7184                                 bp->link_params.req_line_speed = SPEED_10000;
7185                                 bp->port.advertising =
7186                                                 (ADVERTISED_10000baseT_Full |
7187                                                  ADVERTISED_FIBRE);
7188                                 break;
7189                         }
7190                         BNX2X_ERR("NVRAM config error. "
7191                                   "Invalid link_config 0x%x"
7192                                   "  Autoneg not supported\n",
7193                                   bp->port.link_config);
7194                         return;
7195                 }
7196                 break;
7197
7198         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7199                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7200                         bp->link_params.req_line_speed = SPEED_10;
7201                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7202                                                 ADVERTISED_TP);
7203                 } else {
7204                         BNX2X_ERR("NVRAM config error. "
7205                                   "Invalid link_config 0x%x"
7206                                   "  speed_cap_mask 0x%x\n",
7207                                   bp->port.link_config,
7208                                   bp->link_params.speed_cap_mask);
7209                         return;
7210                 }
7211                 break;
7212
7213         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7214                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7215                         bp->link_params.req_line_speed = SPEED_10;
7216                         bp->link_params.req_duplex = DUPLEX_HALF;
7217                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7218                                                 ADVERTISED_TP);
7219                 } else {
7220                         BNX2X_ERR("NVRAM config error. "
7221                                   "Invalid link_config 0x%x"
7222                                   "  speed_cap_mask 0x%x\n",
7223                                   bp->port.link_config,
7224                                   bp->link_params.speed_cap_mask);
7225                         return;
7226                 }
7227                 break;
7228
7229         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7230                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7231                         bp->link_params.req_line_speed = SPEED_100;
7232                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7233                                                 ADVERTISED_TP);
7234                 } else {
7235                         BNX2X_ERR("NVRAM config error. "
7236                                   "Invalid link_config 0x%x"
7237                                   "  speed_cap_mask 0x%x\n",
7238                                   bp->port.link_config,
7239                                   bp->link_params.speed_cap_mask);
7240                         return;
7241                 }
7242                 break;
7243
7244         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7245                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7246                         bp->link_params.req_line_speed = SPEED_100;
7247                         bp->link_params.req_duplex = DUPLEX_HALF;
7248                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7249                                                 ADVERTISED_TP);
7250                 } else {
7251                         BNX2X_ERR("NVRAM config error. "
7252                                   "Invalid link_config 0x%x"
7253                                   "  speed_cap_mask 0x%x\n",
7254                                   bp->port.link_config,
7255                                   bp->link_params.speed_cap_mask);
7256                         return;
7257                 }
7258                 break;
7259
7260         case PORT_FEATURE_LINK_SPEED_1G:
7261                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7262                         bp->link_params.req_line_speed = SPEED_1000;
7263                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7264                                                 ADVERTISED_TP);
7265                 } else {
7266                         BNX2X_ERR("NVRAM config error. "
7267                                   "Invalid link_config 0x%x"
7268                                   "  speed_cap_mask 0x%x\n",
7269                                   bp->port.link_config,
7270                                   bp->link_params.speed_cap_mask);
7271                         return;
7272                 }
7273                 break;
7274
7275         case PORT_FEATURE_LINK_SPEED_2_5G:
7276                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7277                         bp->link_params.req_line_speed = SPEED_2500;
7278                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7279                                                 ADVERTISED_TP);
7280                 } else {
7281                         BNX2X_ERR("NVRAM config error. "
7282                                   "Invalid link_config 0x%x"
7283                                   "  speed_cap_mask 0x%x\n",
7284                                   bp->port.link_config,
7285                                   bp->link_params.speed_cap_mask);
7286                         return;
7287                 }
7288                 break;
7289
7290         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7291         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7292         case PORT_FEATURE_LINK_SPEED_10G_KR:
7293                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7294                         bp->link_params.req_line_speed = SPEED_10000;
7295                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7296                                                 ADVERTISED_FIBRE);
7297                 } else {
7298                         BNX2X_ERR("NVRAM config error. "
7299                                   "Invalid link_config 0x%x"
7300                                   "  speed_cap_mask 0x%x\n",
7301                                   bp->port.link_config,
7302                                   bp->link_params.speed_cap_mask);
7303                         return;
7304                 }
7305                 break;
7306
7307         default:
7308                 BNX2X_ERR("NVRAM config error. "
7309                           "BAD link speed link_config 0x%x\n",
7310                           bp->port.link_config);
7311                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7312                 bp->port.advertising = bp->port.supported;
7313                 break;
7314         }
7315
7316         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7317                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7318         if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7319             !(bp->port.supported & SUPPORTED_Autoneg))
7320                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7321
7322         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7323                        "  advertising 0x%x\n",
7324                        bp->link_params.req_line_speed,
7325                        bp->link_params.req_duplex,
7326                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7327 }
7328
7329 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7330 {
7331         int port = BP_PORT(bp);
7332         u32 val, val2;
7333
7334         bp->link_params.bp = bp;
7335         bp->link_params.port = port;
7336
7337         bp->link_params.serdes_config =
7338                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7339         bp->link_params.lane_config =
7340                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7341         bp->link_params.ext_phy_config =
7342                 SHMEM_RD(bp,
7343                          dev_info.port_hw_config[port].external_phy_config);
7344         bp->link_params.speed_cap_mask =
7345                 SHMEM_RD(bp,
7346                          dev_info.port_hw_config[port].speed_capability_mask);
7347
7348         bp->port.link_config =
7349                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7350
7351         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7352              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7353                        "  link_config 0x%08x\n",
7354                        bp->link_params.serdes_config,
7355                        bp->link_params.lane_config,
7356                        bp->link_params.ext_phy_config,
7357                        bp->link_params.speed_cap_mask, bp->port.link_config);
7358
7359         bp->link_params.switch_cfg = (bp->port.link_config &
7360                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7361         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7362
7363         bnx2x_link_settings_requested(bp);
7364
7365         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7366         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7367         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7368         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7369         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7370         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7371         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7372         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7373         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7374         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7375 }
7376
7377 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7378 {
7379         int func = BP_FUNC(bp);
7380         u32 val, val2;
7381         int rc = 0;
7382
7383         bnx2x_get_common_hwinfo(bp);
7384
7385         bp->e1hov = 0;
7386         bp->e1hmf = 0;
7387         if (CHIP_IS_E1H(bp)) {
7388                 bp->mf_config =
7389                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7390
7391                 val =
7392                    (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7393                     FUNC_MF_CFG_E1HOV_TAG_MASK);
7394                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7395
7396                         bp->e1hov = val;
7397                         bp->e1hmf = 1;
7398                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7399                                        "(0x%04x)\n",
7400                                        func, bp->e1hov, bp->e1hov);
7401                 } else {
7402                         BNX2X_DEV_INFO("Single function mode\n");
7403                         if (BP_E1HVN(bp)) {
7404                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7405                                           "  aborting\n", func);
7406                                 rc = -EPERM;
7407                         }
7408                 }
7409         }
7410
7411         if (!BP_NOMCP(bp)) {
7412                 bnx2x_get_port_hwinfo(bp);
7413
7414                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7415                               DRV_MSG_SEQ_NUMBER_MASK);
7416                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7417         }
7418
7419         if (IS_E1HMF(bp)) {
7420                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7421                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7422                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7423                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7424                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7425                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7426                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7427                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7428                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7429                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7430                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7431                                ETH_ALEN);
7432                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7433                                ETH_ALEN);
7434                 }
7435
7436                 return rc;
7437         }
7438
7439         if (BP_NOMCP(bp)) {
7440                 /* only supposed to happen on emulation/FPGA */
7441                 BNX2X_ERR("warning rendom MAC workaround active\n");
7442                 random_ether_addr(bp->dev->dev_addr);
7443                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7444         }
7445
7446         return rc;
7447 }
7448
7449 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7450 {
7451         int func = BP_FUNC(bp);
7452         int rc;
7453
7454         /* Disable interrupt handling until HW is initialized */
7455         atomic_set(&bp->intr_sem, 1);
7456
7457         mutex_init(&bp->port.phy_mutex);
7458
7459         INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7460         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7461
7462         rc = bnx2x_get_hwinfo(bp);
7463
7464         /* need to reset chip if undi was active */
7465         if (!BP_NOMCP(bp))
7466                 bnx2x_undi_unload(bp);
7467
7468         if (CHIP_REV_IS_FPGA(bp))
7469                 printk(KERN_ERR PFX "FPGA detected\n");
7470
7471         if (BP_NOMCP(bp) && (func == 0))
7472                 printk(KERN_ERR PFX
7473                        "MCP disabled, must load devices in order!\n");
7474
7475         /* Set TPA flags */
7476         if (disable_tpa) {
7477                 bp->flags &= ~TPA_ENABLE_FLAG;
7478                 bp->dev->features &= ~NETIF_F_LRO;
7479         } else {
7480                 bp->flags |= TPA_ENABLE_FLAG;
7481                 bp->dev->features |= NETIF_F_LRO;
7482         }
7483
7484
7485         bp->tx_ring_size = MAX_TX_AVAIL;
7486         bp->rx_ring_size = MAX_RX_AVAIL;
7487
7488         bp->rx_csum = 1;
7489         bp->rx_offset = 0;
7490
7491         bp->tx_ticks = 50;
7492         bp->rx_ticks = 25;
7493
7494         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7495         bp->current_interval = (poll ? poll : bp->timer_interval);
7496
7497         init_timer(&bp->timer);
7498         bp->timer.expires = jiffies + bp->current_interval;
7499         bp->timer.data = (unsigned long) bp;
7500         bp->timer.function = bnx2x_timer;
7501
7502         return rc;
7503 }
7504
7505 /*
7506  * ethtool service functions
7507  */
7508
7509 /* All ethtool functions called with rtnl_lock */
7510
7511 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7512 {
7513         struct bnx2x *bp = netdev_priv(dev);
7514
7515         cmd->supported = bp->port.supported;
7516         cmd->advertising = bp->port.advertising;
7517
7518         if (netif_carrier_ok(dev)) {
7519                 cmd->speed = bp->link_vars.line_speed;
7520                 cmd->duplex = bp->link_vars.duplex;
7521         } else {
7522                 cmd->speed = bp->link_params.req_line_speed;
7523                 cmd->duplex = bp->link_params.req_duplex;
7524         }
7525         if (IS_E1HMF(bp)) {
7526                 u16 vn_max_rate;
7527
7528                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7529                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7530                 if (vn_max_rate < cmd->speed)
7531                         cmd->speed = vn_max_rate;
7532         }
7533
7534         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7535                 u32 ext_phy_type =
7536                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7537
7538                 switch (ext_phy_type) {
7539                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7540                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7541                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7542                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7543                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7544                         cmd->port = PORT_FIBRE;
7545                         break;
7546
7547                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7548                         cmd->port = PORT_TP;
7549                         break;
7550
7551                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7552                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7553                                   bp->link_params.ext_phy_config);
7554                         break;
7555
7556                 default:
7557                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7558                            bp->link_params.ext_phy_config);
7559                         break;
7560                 }
7561         } else
7562                 cmd->port = PORT_TP;
7563
7564         cmd->phy_address = bp->port.phy_addr;
7565         cmd->transceiver = XCVR_INTERNAL;
7566
7567         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7568                 cmd->autoneg = AUTONEG_ENABLE;
7569         else
7570                 cmd->autoneg = AUTONEG_DISABLE;
7571
7572         cmd->maxtxpkt = 0;
7573         cmd->maxrxpkt = 0;
7574
7575         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7576            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7577            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7578            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7579            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7580            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7581            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7582
7583         return 0;
7584 }
7585
7586 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7587 {
7588         struct bnx2x *bp = netdev_priv(dev);
7589         u32 advertising;
7590
7591         if (IS_E1HMF(bp))
7592                 return 0;
7593
7594         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7595            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7596            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7597            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7598            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7599            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7600            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7601
7602         if (cmd->autoneg == AUTONEG_ENABLE) {
7603                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7604                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7605                         return -EINVAL;
7606                 }
7607
7608                 /* advertise the requested speed and duplex if supported */
7609                 cmd->advertising &= bp->port.supported;
7610
7611                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7612                 bp->link_params.req_duplex = DUPLEX_FULL;
7613                 bp->port.advertising |= (ADVERTISED_Autoneg |
7614                                          cmd->advertising);
7615
7616         } else { /* forced speed */
7617                 /* advertise the requested speed and duplex if supported */
7618                 switch (cmd->speed) {
7619                 case SPEED_10:
7620                         if (cmd->duplex == DUPLEX_FULL) {
7621                                 if (!(bp->port.supported &
7622                                       SUPPORTED_10baseT_Full)) {
7623                                         DP(NETIF_MSG_LINK,
7624                                            "10M full not supported\n");
7625                                         return -EINVAL;
7626                                 }
7627
7628                                 advertising = (ADVERTISED_10baseT_Full |
7629                                                ADVERTISED_TP);
7630                         } else {
7631                                 if (!(bp->port.supported &
7632                                       SUPPORTED_10baseT_Half)) {
7633                                         DP(NETIF_MSG_LINK,
7634                                            "10M half not supported\n");
7635                                         return -EINVAL;
7636                                 }
7637
7638                                 advertising = (ADVERTISED_10baseT_Half |
7639                                                ADVERTISED_TP);
7640                         }
7641                         break;
7642
7643                 case SPEED_100:
7644                         if (cmd->duplex == DUPLEX_FULL) {
7645                                 if (!(bp->port.supported &
7646                                                 SUPPORTED_100baseT_Full)) {
7647                                         DP(NETIF_MSG_LINK,
7648                                            "100M full not supported\n");
7649                                         return -EINVAL;
7650                                 }
7651
7652                                 advertising = (ADVERTISED_100baseT_Full |
7653                                                ADVERTISED_TP);
7654                         } else {
7655                                 if (!(bp->port.supported &
7656                                                 SUPPORTED_100baseT_Half)) {
7657                                         DP(NETIF_MSG_LINK,
7658                                            "100M half not supported\n");
7659                                         return -EINVAL;
7660                                 }
7661
7662                                 advertising = (ADVERTISED_100baseT_Half |
7663                                                ADVERTISED_TP);
7664                         }
7665                         break;
7666
7667                 case SPEED_1000:
7668                         if (cmd->duplex != DUPLEX_FULL) {
7669                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7670                                 return -EINVAL;
7671                         }
7672
7673                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7674                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7675                                 return -EINVAL;
7676                         }
7677
7678                         advertising = (ADVERTISED_1000baseT_Full |
7679                                        ADVERTISED_TP);
7680                         break;
7681
7682                 case SPEED_2500:
7683                         if (cmd->duplex != DUPLEX_FULL) {
7684                                 DP(NETIF_MSG_LINK,
7685                                    "2.5G half not supported\n");
7686                                 return -EINVAL;
7687                         }
7688
7689                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7690                                 DP(NETIF_MSG_LINK,
7691                                    "2.5G full not supported\n");
7692                                 return -EINVAL;
7693                         }
7694
7695                         advertising = (ADVERTISED_2500baseX_Full |
7696                                        ADVERTISED_TP);
7697                         break;
7698
7699                 case SPEED_10000:
7700                         if (cmd->duplex != DUPLEX_FULL) {
7701                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7702                                 return -EINVAL;
7703                         }
7704
7705                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7706                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7707                                 return -EINVAL;
7708                         }
7709
7710                         advertising = (ADVERTISED_10000baseT_Full |
7711                                        ADVERTISED_FIBRE);
7712                         break;
7713
7714                 default:
7715                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7716                         return -EINVAL;
7717                 }
7718
7719                 bp->link_params.req_line_speed = cmd->speed;
7720                 bp->link_params.req_duplex = cmd->duplex;
7721                 bp->port.advertising = advertising;
7722         }
7723
7724         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7725            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7726            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7727            bp->port.advertising);
7728
7729         if (netif_running(dev)) {
7730                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7731                 bnx2x_link_set(bp);
7732         }
7733
7734         return 0;
7735 }
7736
7737 #define PHY_FW_VER_LEN                  10
7738
7739 static void bnx2x_get_drvinfo(struct net_device *dev,
7740                               struct ethtool_drvinfo *info)
7741 {
7742         struct bnx2x *bp = netdev_priv(dev);
7743         char phy_fw_ver[PHY_FW_VER_LEN];
7744
7745         strcpy(info->driver, DRV_MODULE_NAME);
7746         strcpy(info->version, DRV_MODULE_VERSION);
7747
7748         phy_fw_ver[0] = '\0';
7749         if (bp->port.pmf) {
7750                 bnx2x_acquire_phy_lock(bp);
7751                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7752                                              (bp->state != BNX2X_STATE_CLOSED),
7753                                              phy_fw_ver, PHY_FW_VER_LEN);
7754                 bnx2x_release_phy_lock(bp);
7755         }
7756
7757         snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
7758                  BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7759                  BCM_5710_FW_REVISION_VERSION,
7760                  BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
7761                  ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
7762         strcpy(info->bus_info, pci_name(bp->pdev));
7763         info->n_stats = BNX2X_NUM_STATS;
7764         info->testinfo_len = BNX2X_NUM_TESTS;
7765         info->eedump_len = bp->common.flash_size;
7766         info->regdump_len = 0;
7767 }
7768
7769 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7770 {
7771         struct bnx2x *bp = netdev_priv(dev);
7772
7773         if (bp->flags & NO_WOL_FLAG) {
7774                 wol->supported = 0;
7775                 wol->wolopts = 0;
7776         } else {
7777                 wol->supported = WAKE_MAGIC;
7778                 if (bp->wol)
7779                         wol->wolopts = WAKE_MAGIC;
7780                 else
7781                         wol->wolopts = 0;
7782         }
7783         memset(&wol->sopass, 0, sizeof(wol->sopass));
7784 }
7785
7786 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7787 {
7788         struct bnx2x *bp = netdev_priv(dev);
7789
7790         if (wol->wolopts & ~WAKE_MAGIC)
7791                 return -EINVAL;
7792
7793         if (wol->wolopts & WAKE_MAGIC) {
7794                 if (bp->flags & NO_WOL_FLAG)
7795                         return -EINVAL;
7796
7797                 bp->wol = 1;
7798         } else
7799                 bp->wol = 0;
7800
7801         return 0;
7802 }
7803
7804 static u32 bnx2x_get_msglevel(struct net_device *dev)
7805 {
7806         struct bnx2x *bp = netdev_priv(dev);
7807
7808         return bp->msglevel;
7809 }
7810
7811 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7812 {
7813         struct bnx2x *bp = netdev_priv(dev);
7814
7815         if (capable(CAP_NET_ADMIN))
7816                 bp->msglevel = level;
7817 }
7818
7819 static int bnx2x_nway_reset(struct net_device *dev)
7820 {
7821         struct bnx2x *bp = netdev_priv(dev);
7822
7823         if (!bp->port.pmf)
7824                 return 0;
7825
7826         if (netif_running(dev)) {
7827                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7828                 bnx2x_link_set(bp);
7829         }
7830
7831         return 0;
7832 }
7833
7834 static int bnx2x_get_eeprom_len(struct net_device *dev)
7835 {
7836         struct bnx2x *bp = netdev_priv(dev);
7837
7838         return bp->common.flash_size;
7839 }
7840
7841 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7842 {
7843         int port = BP_PORT(bp);
7844         int count, i;
7845         u32 val = 0;
7846
7847         /* adjust timeout for emulation/FPGA */
7848         count = NVRAM_TIMEOUT_COUNT;
7849         if (CHIP_REV_IS_SLOW(bp))
7850                 count *= 100;
7851
7852         /* request access to nvram interface */
7853         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7854                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7855
7856         for (i = 0; i < count*10; i++) {
7857                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7858                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7859                         break;
7860
7861                 udelay(5);
7862         }
7863
7864         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7865                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7866                 return -EBUSY;
7867         }
7868
7869         return 0;
7870 }
7871
7872 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7873 {
7874         int port = BP_PORT(bp);
7875         int count, i;
7876         u32 val = 0;
7877
7878         /* adjust timeout for emulation/FPGA */
7879         count = NVRAM_TIMEOUT_COUNT;
7880         if (CHIP_REV_IS_SLOW(bp))
7881                 count *= 100;
7882
7883         /* relinquish nvram interface */
7884         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7885                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7886
7887         for (i = 0; i < count*10; i++) {
7888                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7889                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7890                         break;
7891
7892                 udelay(5);
7893         }
7894
7895         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7896                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7897                 return -EBUSY;
7898         }
7899
7900         return 0;
7901 }
7902
7903 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7904 {
7905         u32 val;
7906
7907         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7908
7909         /* enable both bits, even on read */
7910         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7911                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7912                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7913 }
7914
7915 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7916 {
7917         u32 val;
7918
7919         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7920
7921         /* disable both bits, even after read */
7922         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7923                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7924                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7925 }
7926
7927 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7928                                   u32 cmd_flags)
7929 {
7930         int count, i, rc;
7931         u32 val;
7932
7933         /* build the command word */
7934         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7935
7936         /* need to clear DONE bit separately */
7937         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7938
7939         /* address of the NVRAM to read from */
7940         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7941                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7942
7943         /* issue a read command */
7944         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7945
7946         /* adjust timeout for emulation/FPGA */
7947         count = NVRAM_TIMEOUT_COUNT;
7948         if (CHIP_REV_IS_SLOW(bp))
7949                 count *= 100;
7950
7951         /* wait for completion */
7952         *ret_val = 0;
7953         rc = -EBUSY;
7954         for (i = 0; i < count; i++) {
7955                 udelay(5);
7956                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7957
7958                 if (val & MCPR_NVM_COMMAND_DONE) {
7959                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7960                         /* we read nvram data in cpu order
7961                          * but ethtool sees it as an array of bytes
7962                          * converting to big-endian will do the work */
7963                         val = cpu_to_be32(val);
7964                         *ret_val = val;
7965                         rc = 0;
7966                         break;
7967                 }
7968         }
7969
7970         return rc;
7971 }
7972
7973 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7974                             int buf_size)
7975 {
7976         int rc;
7977         u32 cmd_flags;
7978         u32 val;
7979
7980         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7981                 DP(BNX2X_MSG_NVM,
7982                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
7983                    offset, buf_size);
7984                 return -EINVAL;
7985         }
7986
7987         if (offset + buf_size > bp->common.flash_size) {
7988                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7989                                   " buf_size (0x%x) > flash_size (0x%x)\n",
7990                    offset, buf_size, bp->common.flash_size);
7991                 return -EINVAL;
7992         }
7993
7994         /* request access to nvram interface */
7995         rc = bnx2x_acquire_nvram_lock(bp);
7996         if (rc)
7997                 return rc;
7998
7999         /* enable access to nvram interface */
8000         bnx2x_enable_nvram_access(bp);
8001
8002         /* read the first word(s) */
8003         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8004         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8005                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8006                 memcpy(ret_buf, &val, 4);
8007
8008                 /* advance to the next dword */
8009                 offset += sizeof(u32);
8010                 ret_buf += sizeof(u32);
8011                 buf_size -= sizeof(u32);
8012                 cmd_flags = 0;
8013         }
8014
8015         if (rc == 0) {
8016                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8017                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8018                 memcpy(ret_buf, &val, 4);
8019         }
8020
8021         /* disable access to nvram interface */
8022         bnx2x_disable_nvram_access(bp);
8023         bnx2x_release_nvram_lock(bp);
8024
8025         return rc;
8026 }
8027
8028 static int bnx2x_get_eeprom(struct net_device *dev,
8029                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8030 {
8031         struct bnx2x *bp = netdev_priv(dev);
8032         int rc;
8033
8034         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8035            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8036            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8037            eeprom->len, eeprom->len);
8038
8039         /* parameters already validated in ethtool_get_eeprom */
8040
8041         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8042
8043         return rc;
8044 }
8045
8046 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8047                                    u32 cmd_flags)
8048 {
8049         int count, i, rc;
8050
8051         /* build the command word */
8052         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8053
8054         /* need to clear DONE bit separately */
8055         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8056
8057         /* write the data */
8058         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8059
8060         /* address of the NVRAM to write to */
8061         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8062                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8063
8064         /* issue the write command */
8065         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8066
8067         /* adjust timeout for emulation/FPGA */
8068         count = NVRAM_TIMEOUT_COUNT;
8069         if (CHIP_REV_IS_SLOW(bp))
8070                 count *= 100;
8071
8072         /* wait for completion */
8073         rc = -EBUSY;
8074         for (i = 0; i < count; i++) {
8075                 udelay(5);
8076                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8077                 if (val & MCPR_NVM_COMMAND_DONE) {
8078                         rc = 0;
8079                         break;
8080                 }
8081         }
8082
8083         return rc;
8084 }
8085
8086 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8087
8088 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8089                               int buf_size)
8090 {
8091         int rc;
8092         u32 cmd_flags;
8093         u32 align_offset;
8094         u32 val;
8095
8096         if (offset + buf_size > bp->common.flash_size) {
8097                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8098                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8099                    offset, buf_size, bp->common.flash_size);
8100                 return -EINVAL;
8101         }
8102
8103         /* request access to nvram interface */
8104         rc = bnx2x_acquire_nvram_lock(bp);
8105         if (rc)
8106                 return rc;
8107
8108         /* enable access to nvram interface */
8109         bnx2x_enable_nvram_access(bp);
8110
8111         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8112         align_offset = (offset & ~0x03);
8113         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8114
8115         if (rc == 0) {
8116                 val &= ~(0xff << BYTE_OFFSET(offset));
8117                 val |= (*data_buf << BYTE_OFFSET(offset));
8118
8119                 /* nvram data is returned as an array of bytes
8120                  * convert it back to cpu order */
8121                 val = be32_to_cpu(val);
8122
8123                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8124                                              cmd_flags);
8125         }
8126
8127         /* disable access to nvram interface */
8128         bnx2x_disable_nvram_access(bp);
8129         bnx2x_release_nvram_lock(bp);
8130
8131         return rc;
8132 }
8133
8134 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8135                              int buf_size)
8136 {
8137         int rc;
8138         u32 cmd_flags;
8139         u32 val;
8140         u32 written_so_far;
8141
8142         if (buf_size == 1)      /* ethtool */
8143                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8144
8145         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8146                 DP(BNX2X_MSG_NVM,
8147                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8148                    offset, buf_size);
8149                 return -EINVAL;
8150         }
8151
8152         if (offset + buf_size > bp->common.flash_size) {
8153                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8154                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8155                    offset, buf_size, bp->common.flash_size);
8156                 return -EINVAL;
8157         }
8158
8159         /* request access to nvram interface */
8160         rc = bnx2x_acquire_nvram_lock(bp);
8161         if (rc)
8162                 return rc;
8163
8164         /* enable access to nvram interface */
8165         bnx2x_enable_nvram_access(bp);
8166
8167         written_so_far = 0;
8168         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8169         while ((written_so_far < buf_size) && (rc == 0)) {
8170                 if (written_so_far == (buf_size - sizeof(u32)))
8171                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8172                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8173                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8174                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8175                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8176
8177                 memcpy(&val, data_buf, 4);
8178
8179                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8180
8181                 /* advance to the next dword */
8182                 offset += sizeof(u32);
8183                 data_buf += sizeof(u32);
8184                 written_so_far += sizeof(u32);
8185                 cmd_flags = 0;
8186         }
8187
8188         /* disable access to nvram interface */
8189         bnx2x_disable_nvram_access(bp);
8190         bnx2x_release_nvram_lock(bp);
8191
8192         return rc;
8193 }
8194
8195 static int bnx2x_set_eeprom(struct net_device *dev,
8196                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8197 {
8198         struct bnx2x *bp = netdev_priv(dev);
8199         int rc;
8200
8201         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8202            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8203            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8204            eeprom->len, eeprom->len);
8205
8206         /* parameters already validated in ethtool_set_eeprom */
8207
8208         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8209         if (eeprom->magic == 0x00504859)
8210                 if (bp->port.pmf) {
8211
8212                         bnx2x_acquire_phy_lock(bp);
8213                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8214                                              bp->link_params.ext_phy_config,
8215                                              (bp->state != BNX2X_STATE_CLOSED),
8216                                              eebuf, eeprom->len);
8217                         if ((bp->state == BNX2X_STATE_OPEN) ||
8218                             (bp->state == BNX2X_STATE_DISABLED)) {
8219                                 rc |= bnx2x_link_reset(&bp->link_params,
8220                                                        &bp->link_vars);
8221                                 rc |= bnx2x_phy_init(&bp->link_params,
8222                                                      &bp->link_vars);
8223                         }
8224                         bnx2x_release_phy_lock(bp);
8225
8226                 } else /* Only the PMF can access the PHY */
8227                         return -EINVAL;
8228         else
8229                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8230
8231         return rc;
8232 }
8233
8234 static int bnx2x_get_coalesce(struct net_device *dev,
8235                               struct ethtool_coalesce *coal)
8236 {
8237         struct bnx2x *bp = netdev_priv(dev);
8238
8239         memset(coal, 0, sizeof(struct ethtool_coalesce));
8240
8241         coal->rx_coalesce_usecs = bp->rx_ticks;
8242         coal->tx_coalesce_usecs = bp->tx_ticks;
8243
8244         return 0;
8245 }
8246
8247 static int bnx2x_set_coalesce(struct net_device *dev,
8248                               struct ethtool_coalesce *coal)
8249 {
8250         struct bnx2x *bp = netdev_priv(dev);
8251
8252         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8253         if (bp->rx_ticks > 3000)
8254                 bp->rx_ticks = 3000;
8255
8256         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8257         if (bp->tx_ticks > 0x3000)
8258                 bp->tx_ticks = 0x3000;
8259
8260         if (netif_running(dev))
8261                 bnx2x_update_coalesce(bp);
8262
8263         return 0;
8264 }
8265
8266 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8267 {
8268         struct bnx2x *bp = netdev_priv(dev);
8269         int changed = 0;
8270         int rc = 0;
8271
8272         if (data & ETH_FLAG_LRO) {
8273                 if (!(dev->features & NETIF_F_LRO)) {
8274                         dev->features |= NETIF_F_LRO;
8275                         bp->flags |= TPA_ENABLE_FLAG;
8276                         changed = 1;
8277                 }
8278
8279         } else if (dev->features & NETIF_F_LRO) {
8280                 dev->features &= ~NETIF_F_LRO;
8281                 bp->flags &= ~TPA_ENABLE_FLAG;
8282                 changed = 1;
8283         }
8284
8285         if (changed && netif_running(dev)) {
8286                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8287                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8288         }
8289
8290         return rc;
8291 }
8292
8293 static void bnx2x_get_ringparam(struct net_device *dev,
8294                                 struct ethtool_ringparam *ering)
8295 {
8296         struct bnx2x *bp = netdev_priv(dev);
8297
8298         ering->rx_max_pending = MAX_RX_AVAIL;
8299         ering->rx_mini_max_pending = 0;
8300         ering->rx_jumbo_max_pending = 0;
8301
8302         ering->rx_pending = bp->rx_ring_size;
8303         ering->rx_mini_pending = 0;
8304         ering->rx_jumbo_pending = 0;
8305
8306         ering->tx_max_pending = MAX_TX_AVAIL;
8307         ering->tx_pending = bp->tx_ring_size;
8308 }
8309
8310 static int bnx2x_set_ringparam(struct net_device *dev,
8311                                struct ethtool_ringparam *ering)
8312 {
8313         struct bnx2x *bp = netdev_priv(dev);
8314         int rc = 0;
8315
8316         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8317             (ering->tx_pending > MAX_TX_AVAIL) ||
8318             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8319                 return -EINVAL;
8320
8321         bp->rx_ring_size = ering->rx_pending;
8322         bp->tx_ring_size = ering->tx_pending;
8323
8324         if (netif_running(dev)) {
8325                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8326                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8327         }
8328
8329         return rc;
8330 }
8331
8332 static void bnx2x_get_pauseparam(struct net_device *dev,
8333                                  struct ethtool_pauseparam *epause)
8334 {
8335         struct bnx2x *bp = netdev_priv(dev);
8336
8337         epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8338                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8339
8340         epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8341                             FLOW_CTRL_RX);
8342         epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8343                             FLOW_CTRL_TX);
8344
8345         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8346            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8347            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8348 }
8349
8350 static int bnx2x_set_pauseparam(struct net_device *dev,
8351                                 struct ethtool_pauseparam *epause)
8352 {
8353         struct bnx2x *bp = netdev_priv(dev);
8354
8355         if (IS_E1HMF(bp))
8356                 return 0;
8357
8358         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8359            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8360            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8361
8362         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8363
8364         if (epause->rx_pause)
8365                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8366
8367         if (epause->tx_pause)
8368                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8369
8370         if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8371                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8372
8373         if (epause->autoneg) {
8374                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8375                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8376                         return -EINVAL;
8377                 }
8378
8379                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8380                         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8381         }
8382
8383         DP(NETIF_MSG_LINK,
8384            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8385
8386         if (netif_running(dev)) {
8387                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8388                 bnx2x_link_set(bp);
8389         }
8390
8391         return 0;
8392 }
8393
8394 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8395 {
8396         struct bnx2x *bp = netdev_priv(dev);
8397
8398         return bp->rx_csum;
8399 }
8400
8401 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8402 {
8403         struct bnx2x *bp = netdev_priv(dev);
8404
8405         bp->rx_csum = data;
8406         return 0;
8407 }
8408
8409 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8410 {
8411         if (data) {
8412                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8413                 dev->features |= NETIF_F_TSO6;
8414         } else {
8415                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8416                 dev->features &= ~NETIF_F_TSO6;
8417         }
8418
8419         return 0;
8420 }
8421
8422 static const struct {
8423         char string[ETH_GSTRING_LEN];
8424 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8425         { "register_test (offline)" },
8426         { "memory_test (offline)" },
8427         { "loopback_test (offline)" },
8428         { "nvram_test (online)" },
8429         { "interrupt_test (online)" },
8430         { "link_test (online)" },
8431         { "idle check (online)" },
8432         { "MC errors (online)" }
8433 };
8434
8435 static int bnx2x_self_test_count(struct net_device *dev)
8436 {
8437         return BNX2X_NUM_TESTS;
8438 }
8439
8440 static int bnx2x_test_registers(struct bnx2x *bp)
8441 {
8442         int idx, i, rc = -ENODEV;
8443         u32 wr_val = 0;
8444         int port = BP_PORT(bp);
8445         static const struct {
8446                 u32  offset0;
8447                 u32  offset1;
8448                 u32  mask;
8449         } reg_tbl[] = {
8450 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8451                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8452                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8453                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8454                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8455                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8456                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8457                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8458                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8459                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8460 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8461                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8462                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8463                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8464                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8465                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8466                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8467                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8468                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8469                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8470 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8471                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8472                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8473                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8474                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8475                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8476                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8477                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8478                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8479                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8480 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8481                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8482                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8483                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8484                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8485                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8486                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8487                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8488
8489                 { 0xffffffff, 0, 0x00000000 }
8490         };
8491
8492         if (!netif_running(bp->dev))
8493                 return rc;
8494
8495         /* Repeat the test twice:
8496            First by writing 0x00000000, second by writing 0xffffffff */
8497         for (idx = 0; idx < 2; idx++) {
8498
8499                 switch (idx) {
8500                 case 0:
8501                         wr_val = 0;
8502                         break;
8503                 case 1:
8504                         wr_val = 0xffffffff;
8505                         break;
8506                 }
8507
8508                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8509                         u32 offset, mask, save_val, val;
8510
8511                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8512                         mask = reg_tbl[i].mask;
8513
8514                         save_val = REG_RD(bp, offset);
8515
8516                         REG_WR(bp, offset, wr_val);
8517                         val = REG_RD(bp, offset);
8518
8519                         /* Restore the original register's value */
8520                         REG_WR(bp, offset, save_val);
8521
8522                         /* verify that value is as expected value */
8523                         if ((val & mask) != (wr_val & mask))
8524                                 goto test_reg_exit;
8525                 }
8526         }
8527
8528         rc = 0;
8529
8530 test_reg_exit:
8531         return rc;
8532 }
8533
8534 static int bnx2x_test_memory(struct bnx2x *bp)
8535 {
8536         int i, j, rc = -ENODEV;
8537         u32 val;
8538         static const struct {
8539                 u32 offset;
8540                 int size;
8541         } mem_tbl[] = {
8542                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8543                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8544                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8545                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8546                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8547                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8548                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8549
8550                 { 0xffffffff, 0 }
8551         };
8552         static const struct {
8553                 char *name;
8554                 u32 offset;
8555                 u32 e1_mask;
8556                 u32 e1h_mask;
8557         } prty_tbl[] = {
8558                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8559                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8560                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8561                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8562                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8563                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8564
8565                 { NULL, 0xffffffff, 0, 0 }
8566         };
8567
8568         if (!netif_running(bp->dev))
8569                 return rc;
8570
8571         /* Go through all the memories */
8572         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8573                 for (j = 0; j < mem_tbl[i].size; j++)
8574                         REG_RD(bp, mem_tbl[i].offset + j*4);
8575
8576         /* Check the parity status */
8577         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8578                 val = REG_RD(bp, prty_tbl[i].offset);
8579                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8580                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8581                         DP(NETIF_MSG_HW,
8582                            "%s is 0x%x\n", prty_tbl[i].name, val);
8583                         goto test_mem_exit;
8584                 }
8585         }
8586
8587         rc = 0;
8588
8589 test_mem_exit:
8590         return rc;
8591 }
8592
8593 static void bnx2x_netif_start(struct bnx2x *bp)
8594 {
8595         int i;
8596
8597         if (atomic_dec_and_test(&bp->intr_sem)) {
8598                 if (netif_running(bp->dev)) {
8599                         bnx2x_int_enable(bp);
8600                         for_each_queue(bp, i)
8601                                 napi_enable(&bnx2x_fp(bp, i, napi));
8602                         if (bp->state == BNX2X_STATE_OPEN)
8603                                 netif_wake_queue(bp->dev);
8604                 }
8605         }
8606 }
8607
8608 static void bnx2x_netif_stop(struct bnx2x *bp)
8609 {
8610         int i;
8611
8612         if (netif_running(bp->dev)) {
8613                 netif_tx_disable(bp->dev);
8614                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8615                 for_each_queue(bp, i)
8616                         napi_disable(&bnx2x_fp(bp, i, napi));
8617         }
8618         bnx2x_int_disable_sync(bp);
8619 }
8620
8621 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8622 {
8623         int cnt = 1000;
8624
8625         if (link_up)
8626                 while (bnx2x_link_test(bp) && cnt--)
8627                         msleep(10);
8628 }
8629
8630 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8631 {
8632         unsigned int pkt_size, num_pkts, i;
8633         struct sk_buff *skb;
8634         unsigned char *packet;
8635         struct bnx2x_fastpath *fp = &bp->fp[0];
8636         u16 tx_start_idx, tx_idx;
8637         u16 rx_start_idx, rx_idx;
8638         u16 pkt_prod;
8639         struct sw_tx_bd *tx_buf;
8640         struct eth_tx_bd *tx_bd;
8641         dma_addr_t mapping;
8642         union eth_rx_cqe *cqe;
8643         u8 cqe_fp_flags;
8644         struct sw_rx_bd *rx_buf;
8645         u16 len;
8646         int rc = -ENODEV;
8647
8648         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8649                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8650                 bnx2x_acquire_phy_lock(bp);
8651                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8652                 bnx2x_release_phy_lock(bp);
8653
8654         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8655                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8656                 bnx2x_acquire_phy_lock(bp);
8657                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8658                 bnx2x_release_phy_lock(bp);
8659                 /* wait until link state is restored */
8660                 bnx2x_wait_for_link(bp, link_up);
8661
8662         } else
8663                 return -EINVAL;
8664
8665         pkt_size = 1514;
8666         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8667         if (!skb) {
8668                 rc = -ENOMEM;
8669                 goto test_loopback_exit;
8670         }
8671         packet = skb_put(skb, pkt_size);
8672         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8673         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8674         for (i = ETH_HLEN; i < pkt_size; i++)
8675                 packet[i] = (unsigned char) (i & 0xff);
8676
8677         num_pkts = 0;
8678         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8679         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8680
8681         pkt_prod = fp->tx_pkt_prod++;
8682         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8683         tx_buf->first_bd = fp->tx_bd_prod;
8684         tx_buf->skb = skb;
8685
8686         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8687         mapping = pci_map_single(bp->pdev, skb->data,
8688                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8689         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8690         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8691         tx_bd->nbd = cpu_to_le16(1);
8692         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8693         tx_bd->vlan = cpu_to_le16(pkt_prod);
8694         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8695                                        ETH_TX_BD_FLAGS_END_BD);
8696         tx_bd->general_data = ((UNICAST_ADDRESS <<
8697                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8698
8699         fp->hw_tx_prods->bds_prod =
8700                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8701         mb(); /* FW restriction: must not reorder writing nbd and packets */
8702         fp->hw_tx_prods->packets_prod =
8703                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8704         DOORBELL(bp, FP_IDX(fp), 0);
8705
8706         mmiowb();
8707
8708         num_pkts++;
8709         fp->tx_bd_prod++;
8710         bp->dev->trans_start = jiffies;
8711
8712         udelay(100);
8713
8714         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8715         if (tx_idx != tx_start_idx + num_pkts)
8716                 goto test_loopback_exit;
8717
8718         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8719         if (rx_idx != rx_start_idx + num_pkts)
8720                 goto test_loopback_exit;
8721
8722         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8723         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8724         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8725                 goto test_loopback_rx_exit;
8726
8727         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8728         if (len != pkt_size)
8729                 goto test_loopback_rx_exit;
8730
8731         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8732         skb = rx_buf->skb;
8733         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8734         for (i = ETH_HLEN; i < pkt_size; i++)
8735                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8736                         goto test_loopback_rx_exit;
8737
8738         rc = 0;
8739
8740 test_loopback_rx_exit:
8741         bp->dev->last_rx = jiffies;
8742
8743         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8744         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8745         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8746         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8747
8748         /* Update producers */
8749         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8750                              fp->rx_sge_prod);
8751         mmiowb(); /* keep prod updates ordered */
8752
8753 test_loopback_exit:
8754         bp->link_params.loopback_mode = LOOPBACK_NONE;
8755
8756         return rc;
8757 }
8758
8759 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8760 {
8761         int rc = 0;
8762
8763         if (!netif_running(bp->dev))
8764                 return BNX2X_LOOPBACK_FAILED;
8765
8766         bnx2x_netif_stop(bp);
8767
8768         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8769                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8770                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8771         }
8772
8773         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8774                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8775                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8776         }
8777
8778         bnx2x_netif_start(bp);
8779
8780         return rc;
8781 }
8782
8783 #define CRC32_RESIDUAL                  0xdebb20e3
8784
8785 static int bnx2x_test_nvram(struct bnx2x *bp)
8786 {
8787         static const struct {
8788                 int offset;
8789                 int size;
8790         } nvram_tbl[] = {
8791                 {     0,  0x14 }, /* bootstrap */
8792                 {  0x14,  0xec }, /* dir */
8793                 { 0x100, 0x350 }, /* manuf_info */
8794                 { 0x450,  0xf0 }, /* feature_info */
8795                 { 0x640,  0x64 }, /* upgrade_key_info */
8796                 { 0x6a4,  0x64 },
8797                 { 0x708,  0x70 }, /* manuf_key_info */
8798                 { 0x778,  0x70 },
8799                 {     0,     0 }
8800         };
8801         u32 buf[0x350 / 4];
8802         u8 *data = (u8 *)buf;
8803         int i, rc;
8804         u32 magic, csum;
8805
8806         rc = bnx2x_nvram_read(bp, 0, data, 4);
8807         if (rc) {
8808                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8809                 goto test_nvram_exit;
8810         }
8811
8812         magic = be32_to_cpu(buf[0]);
8813         if (magic != 0x669955aa) {
8814                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8815                 rc = -ENODEV;
8816                 goto test_nvram_exit;
8817         }
8818
8819         for (i = 0; nvram_tbl[i].size; i++) {
8820
8821                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8822                                       nvram_tbl[i].size);
8823                 if (rc) {
8824                         DP(NETIF_MSG_PROBE,
8825                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8826                         goto test_nvram_exit;
8827                 }
8828
8829                 csum = ether_crc_le(nvram_tbl[i].size, data);
8830                 if (csum != CRC32_RESIDUAL) {
8831                         DP(NETIF_MSG_PROBE,
8832                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8833                         rc = -ENODEV;
8834                         goto test_nvram_exit;
8835                 }
8836         }
8837
8838 test_nvram_exit:
8839         return rc;
8840 }
8841
8842 static int bnx2x_test_intr(struct bnx2x *bp)
8843 {
8844         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8845         int i, rc;
8846
8847         if (!netif_running(bp->dev))
8848                 return -ENODEV;
8849
8850         config->hdr.length_6b = 0;
8851         config->hdr.offset = 0;
8852         config->hdr.client_id = BP_CL_ID(bp);
8853         config->hdr.reserved1 = 0;
8854
8855         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8856                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8857                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8858         if (rc == 0) {
8859                 bp->set_mac_pending++;
8860                 for (i = 0; i < 10; i++) {
8861                         if (!bp->set_mac_pending)
8862                                 break;
8863                         msleep_interruptible(10);
8864                 }
8865                 if (i == 10)
8866                         rc = -ENODEV;
8867         }
8868
8869         return rc;
8870 }
8871
8872 static void bnx2x_self_test(struct net_device *dev,
8873                             struct ethtool_test *etest, u64 *buf)
8874 {
8875         struct bnx2x *bp = netdev_priv(dev);
8876
8877         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8878
8879         if (!netif_running(dev))
8880                 return;
8881
8882         /* offline tests are not suppoerted in MF mode */
8883         if (IS_E1HMF(bp))
8884                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8885
8886         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8887                 u8 link_up;
8888
8889                 link_up = bp->link_vars.link_up;
8890                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8891                 bnx2x_nic_load(bp, LOAD_DIAG);
8892                 /* wait until link state is restored */
8893                 bnx2x_wait_for_link(bp, link_up);
8894
8895                 if (bnx2x_test_registers(bp) != 0) {
8896                         buf[0] = 1;
8897                         etest->flags |= ETH_TEST_FL_FAILED;
8898                 }
8899                 if (bnx2x_test_memory(bp) != 0) {
8900                         buf[1] = 1;
8901                         etest->flags |= ETH_TEST_FL_FAILED;
8902                 }
8903                 buf[2] = bnx2x_test_loopback(bp, link_up);
8904                 if (buf[2] != 0)
8905                         etest->flags |= ETH_TEST_FL_FAILED;
8906
8907                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8908                 bnx2x_nic_load(bp, LOAD_NORMAL);
8909                 /* wait until link state is restored */
8910                 bnx2x_wait_for_link(bp, link_up);
8911         }
8912         if (bnx2x_test_nvram(bp) != 0) {
8913                 buf[3] = 1;
8914                 etest->flags |= ETH_TEST_FL_FAILED;
8915         }
8916         if (bnx2x_test_intr(bp) != 0) {
8917                 buf[4] = 1;
8918                 etest->flags |= ETH_TEST_FL_FAILED;
8919         }
8920         if (bp->port.pmf)
8921                 if (bnx2x_link_test(bp) != 0) {
8922                         buf[5] = 1;
8923                         etest->flags |= ETH_TEST_FL_FAILED;
8924                 }
8925         buf[7] = bnx2x_mc_assert(bp);
8926         if (buf[7] != 0)
8927                 etest->flags |= ETH_TEST_FL_FAILED;
8928
8929 #ifdef BNX2X_EXTRA_DEBUG
8930         bnx2x_panic_dump(bp);
8931 #endif
8932 }
8933
8934 static const struct {
8935         long offset;
8936         int size;
8937         u32 flags;
8938 #define STATS_FLAGS_PORT                1
8939 #define STATS_FLAGS_FUNC                2
8940         u8 string[ETH_GSTRING_LEN];
8941 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8942 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8943                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
8944         { STATS_OFFSET32(error_bytes_received_hi),
8945                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8946         { STATS_OFFSET32(total_bytes_transmitted_hi),
8947                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
8948         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8949                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8950         { STATS_OFFSET32(total_unicast_packets_received_hi),
8951                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8952         { STATS_OFFSET32(total_multicast_packets_received_hi),
8953                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8954         { STATS_OFFSET32(total_broadcast_packets_received_hi),
8955                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8956         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8957                                 8, STATS_FLAGS_FUNC, "tx_packets" },
8958         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8959                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8960 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8961                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8962         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8963                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8964         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8965                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
8966         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8967                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8968         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8969                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8970         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8971                                 8, STATS_FLAGS_PORT, "tx_deferred" },
8972         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8973                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8974         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8975                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8976         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8977                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8978         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8979                                 8, STATS_FLAGS_PORT, "rx_fragments" },
8980 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8981                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
8982         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8983                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8984         { STATS_OFFSET32(jabber_packets_received),
8985                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8986         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8987                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8988         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8989                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8990         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8991                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8992         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8993                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8994         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8995                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8996         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8997                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8998         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8999                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9000 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9001                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9002         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9003                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9004         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9005                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9006         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9007                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9008         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9009                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9010         { STATS_OFFSET32(mac_filter_discard),
9011                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9012         { STATS_OFFSET32(no_buff_discard),
9013                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9014         { STATS_OFFSET32(xxoverflow_discard),
9015                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9016         { STATS_OFFSET32(brb_drop_hi),
9017                                 8, STATS_FLAGS_PORT, "brb_discard" },
9018         { STATS_OFFSET32(brb_truncate_hi),
9019                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9020 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9021                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9022         { STATS_OFFSET32(rx_skb_alloc_failed),
9023                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9024 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9025                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9026 };
9027
9028 #define IS_NOT_E1HMF_STAT(bp, i) \
9029                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9030
9031 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9032 {
9033         struct bnx2x *bp = netdev_priv(dev);
9034         int i, j;
9035
9036         switch (stringset) {
9037         case ETH_SS_STATS:
9038                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9039                         if (IS_NOT_E1HMF_STAT(bp, i))
9040                                 continue;
9041                         strcpy(buf + j*ETH_GSTRING_LEN,
9042                                bnx2x_stats_arr[i].string);
9043                         j++;
9044                 }
9045                 break;
9046
9047         case ETH_SS_TEST:
9048                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9049                 break;
9050         }
9051 }
9052
9053 static int bnx2x_get_stats_count(struct net_device *dev)
9054 {
9055         struct bnx2x *bp = netdev_priv(dev);
9056         int i, num_stats = 0;
9057
9058         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9059                 if (IS_NOT_E1HMF_STAT(bp, i))
9060                         continue;
9061                 num_stats++;
9062         }
9063         return num_stats;
9064 }
9065
9066 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9067                                     struct ethtool_stats *stats, u64 *buf)
9068 {
9069         struct bnx2x *bp = netdev_priv(dev);
9070         u32 *hw_stats = (u32 *)&bp->eth_stats;
9071         int i, j;
9072
9073         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9074                 if (IS_NOT_E1HMF_STAT(bp, i))
9075                         continue;
9076
9077                 if (bnx2x_stats_arr[i].size == 0) {
9078                         /* skip this counter */
9079                         buf[j] = 0;
9080                         j++;
9081                         continue;
9082                 }
9083                 if (bnx2x_stats_arr[i].size == 4) {
9084                         /* 4-byte counter */
9085                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9086                         j++;
9087                         continue;
9088                 }
9089                 /* 8-byte counter */
9090                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9091                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9092                 j++;
9093         }
9094 }
9095
9096 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9097 {
9098         struct bnx2x *bp = netdev_priv(dev);
9099         int port = BP_PORT(bp);
9100         int i;
9101
9102         if (!netif_running(dev))
9103                 return 0;
9104
9105         if (!bp->port.pmf)
9106                 return 0;
9107
9108         if (data == 0)
9109                 data = 2;
9110
9111         for (i = 0; i < (data * 2); i++) {
9112                 if ((i % 2) == 0)
9113                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9114                                       bp->link_params.hw_led_mode,
9115                                       bp->link_params.chip_id);
9116                 else
9117                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9118                                       bp->link_params.hw_led_mode,
9119                                       bp->link_params.chip_id);
9120
9121                 msleep_interruptible(500);
9122                 if (signal_pending(current))
9123                         break;
9124         }
9125
9126         if (bp->link_vars.link_up)
9127                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9128                               bp->link_vars.line_speed,
9129                               bp->link_params.hw_led_mode,
9130                               bp->link_params.chip_id);
9131
9132         return 0;
9133 }
9134
9135 static struct ethtool_ops bnx2x_ethtool_ops = {
9136         .get_settings           = bnx2x_get_settings,
9137         .set_settings           = bnx2x_set_settings,
9138         .get_drvinfo            = bnx2x_get_drvinfo,
9139         .get_wol                = bnx2x_get_wol,
9140         .set_wol                = bnx2x_set_wol,
9141         .get_msglevel           = bnx2x_get_msglevel,
9142         .set_msglevel           = bnx2x_set_msglevel,
9143         .nway_reset             = bnx2x_nway_reset,
9144         .get_link               = ethtool_op_get_link,
9145         .get_eeprom_len         = bnx2x_get_eeprom_len,
9146         .get_eeprom             = bnx2x_get_eeprom,
9147         .set_eeprom             = bnx2x_set_eeprom,
9148         .get_coalesce           = bnx2x_get_coalesce,
9149         .set_coalesce           = bnx2x_set_coalesce,
9150         .get_ringparam          = bnx2x_get_ringparam,
9151         .set_ringparam          = bnx2x_set_ringparam,
9152         .get_pauseparam         = bnx2x_get_pauseparam,
9153         .set_pauseparam         = bnx2x_set_pauseparam,
9154         .get_rx_csum            = bnx2x_get_rx_csum,
9155         .set_rx_csum            = bnx2x_set_rx_csum,
9156         .get_tx_csum            = ethtool_op_get_tx_csum,
9157         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9158         .set_flags              = bnx2x_set_flags,
9159         .get_flags              = ethtool_op_get_flags,
9160         .get_sg                 = ethtool_op_get_sg,
9161         .set_sg                 = ethtool_op_set_sg,
9162         .get_tso                = ethtool_op_get_tso,
9163         .set_tso                = bnx2x_set_tso,
9164         .self_test_count        = bnx2x_self_test_count,
9165         .self_test              = bnx2x_self_test,
9166         .get_strings            = bnx2x_get_strings,
9167         .phys_id                = bnx2x_phys_id,
9168         .get_stats_count        = bnx2x_get_stats_count,
9169         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9170 };
9171
9172 /* end of ethtool_ops */
9173
9174 /****************************************************************************
9175 * General service functions
9176 ****************************************************************************/
9177
9178 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9179 {
9180         u16 pmcsr;
9181
9182         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9183
9184         switch (state) {
9185         case PCI_D0:
9186                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9187                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9188                                        PCI_PM_CTRL_PME_STATUS));
9189
9190                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9191                 /* delay required during transition out of D3hot */
9192                         msleep(20);
9193                 break;
9194
9195         case PCI_D3hot:
9196                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9197                 pmcsr |= 3;
9198
9199                 if (bp->wol)
9200                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9201
9202                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9203                                       pmcsr);
9204
9205                 /* No more memory access after this point until
9206                 * device is brought back to D0.
9207                 */
9208                 break;
9209
9210         default:
9211                 return -EINVAL;
9212         }
9213         return 0;
9214 }
9215
9216 /*
9217  * net_device service functions
9218  */
9219
9220 static int bnx2x_poll(struct napi_struct *napi, int budget)
9221 {
9222         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9223                                                  napi);
9224         struct bnx2x *bp = fp->bp;
9225         int work_done = 0;
9226
9227 #ifdef BNX2X_STOP_ON_ERROR
9228         if (unlikely(bp->panic))
9229                 goto poll_panic;
9230 #endif
9231
9232         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9233         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9234         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9235
9236         bnx2x_update_fpsb_idx(fp);
9237
9238         if (BNX2X_HAS_TX_WORK(fp))
9239                 bnx2x_tx_int(fp, budget);
9240
9241         if (BNX2X_HAS_RX_WORK(fp))
9242                 work_done = bnx2x_rx_int(fp, budget);
9243
9244         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9245
9246         /* must not complete if we consumed full budget */
9247         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9248
9249 #ifdef BNX2X_STOP_ON_ERROR
9250 poll_panic:
9251 #endif
9252                 netif_rx_complete(bp->dev, napi);
9253
9254                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9255                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9256                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9257                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9258         }
9259         return work_done;
9260 }
9261
9262
9263 /* we split the first BD into headers and data BDs
9264  * to ease the pain of our fellow micocode engineers
9265  * we use one mapping for both BDs
9266  * So far this has only been observed to happen
9267  * in Other Operating Systems(TM)
9268  */
9269 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9270                                    struct bnx2x_fastpath *fp,
9271                                    struct eth_tx_bd **tx_bd, u16 hlen,
9272                                    u16 bd_prod, int nbd)
9273 {
9274         struct eth_tx_bd *h_tx_bd = *tx_bd;
9275         struct eth_tx_bd *d_tx_bd;
9276         dma_addr_t mapping;
9277         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9278
9279         /* first fix first BD */
9280         h_tx_bd->nbd = cpu_to_le16(nbd);
9281         h_tx_bd->nbytes = cpu_to_le16(hlen);
9282
9283         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9284            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9285            h_tx_bd->addr_lo, h_tx_bd->nbd);
9286
9287         /* now get a new data BD
9288          * (after the pbd) and fill it */
9289         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9290         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9291
9292         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9293                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9294
9295         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9296         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9297         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9298         d_tx_bd->vlan = 0;
9299         /* this marks the BD as one that has no individual mapping
9300          * the FW ignores this flag in a BD not marked start
9301          */
9302         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9303         DP(NETIF_MSG_TX_QUEUED,
9304            "TSO split data size is %d (%x:%x)\n",
9305            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9306
9307         /* update tx_bd for marking the last BD flag */
9308         *tx_bd = d_tx_bd;
9309
9310         return bd_prod;
9311 }
9312
9313 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9314 {
9315         if (fix > 0)
9316                 csum = (u16) ~csum_fold(csum_sub(csum,
9317                                 csum_partial(t_header - fix, fix, 0)));
9318
9319         else if (fix < 0)
9320                 csum = (u16) ~csum_fold(csum_add(csum,
9321                                 csum_partial(t_header, -fix, 0)));
9322
9323         return swab16(csum);
9324 }
9325
9326 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9327 {
9328         u32 rc;
9329
9330         if (skb->ip_summed != CHECKSUM_PARTIAL)
9331                 rc = XMIT_PLAIN;
9332
9333         else {
9334                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9335                         rc = XMIT_CSUM_V6;
9336                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9337                                 rc |= XMIT_CSUM_TCP;
9338
9339                 } else {
9340                         rc = XMIT_CSUM_V4;
9341                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9342                                 rc |= XMIT_CSUM_TCP;
9343                 }
9344         }
9345
9346         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9347                 rc |= XMIT_GSO_V4;
9348
9349         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9350                 rc |= XMIT_GSO_V6;
9351
9352         return rc;
9353 }
9354
9355 /* check if packet requires linearization (packet is too fragmented) */
9356 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9357                              u32 xmit_type)
9358 {
9359         int to_copy = 0;
9360         int hlen = 0;
9361         int first_bd_sz = 0;
9362
9363         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9364         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9365
9366                 if (xmit_type & XMIT_GSO) {
9367                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9368                         /* Check if LSO packet needs to be copied:
9369                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9370                         int wnd_size = MAX_FETCH_BD - 3;
9371                         /* Number of widnows to check */
9372                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9373                         int wnd_idx = 0;
9374                         int frag_idx = 0;
9375                         u32 wnd_sum = 0;
9376
9377                         /* Headers length */
9378                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9379                                 tcp_hdrlen(skb);
9380
9381                         /* Amount of data (w/o headers) on linear part of SKB*/
9382                         first_bd_sz = skb_headlen(skb) - hlen;
9383
9384                         wnd_sum  = first_bd_sz;
9385
9386                         /* Calculate the first sum - it's special */
9387                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9388                                 wnd_sum +=
9389                                         skb_shinfo(skb)->frags[frag_idx].size;
9390
9391                         /* If there was data on linear skb data - check it */
9392                         if (first_bd_sz > 0) {
9393                                 if (unlikely(wnd_sum < lso_mss)) {
9394                                         to_copy = 1;
9395                                         goto exit_lbl;
9396                                 }
9397
9398                                 wnd_sum -= first_bd_sz;
9399                         }
9400
9401                         /* Others are easier: run through the frag list and
9402                            check all windows */
9403                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9404                                 wnd_sum +=
9405                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9406
9407                                 if (unlikely(wnd_sum < lso_mss)) {
9408                                         to_copy = 1;
9409                                         break;
9410                                 }
9411                                 wnd_sum -=
9412                                         skb_shinfo(skb)->frags[wnd_idx].size;
9413                         }
9414
9415                 } else {
9416                         /* in non-LSO too fragmented packet should always
9417                            be linearized */
9418                         to_copy = 1;
9419                 }
9420         }
9421
9422 exit_lbl:
9423         if (unlikely(to_copy))
9424                 DP(NETIF_MSG_TX_QUEUED,
9425                    "Linearization IS REQUIRED for %s packet. "
9426                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9427                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9428                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9429
9430         return to_copy;
9431 }
9432
9433 /* called with netif_tx_lock
9434  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9435  * netif_wake_queue()
9436  */
9437 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9438 {
9439         struct bnx2x *bp = netdev_priv(dev);
9440         struct bnx2x_fastpath *fp;
9441         struct sw_tx_bd *tx_buf;
9442         struct eth_tx_bd *tx_bd;
9443         struct eth_tx_parse_bd *pbd = NULL;
9444         u16 pkt_prod, bd_prod;
9445         int nbd, fp_index;
9446         dma_addr_t mapping;
9447         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9448         int vlan_off = (bp->e1hov ? 4 : 0);
9449         int i;
9450         u8 hlen = 0;
9451
9452 #ifdef BNX2X_STOP_ON_ERROR
9453         if (unlikely(bp->panic))
9454                 return NETDEV_TX_BUSY;
9455 #endif
9456
9457         fp_index = (smp_processor_id() % bp->num_queues);
9458         fp = &bp->fp[fp_index];
9459
9460         if (unlikely(bnx2x_tx_avail(bp->fp) <
9461                                         (skb_shinfo(skb)->nr_frags + 3))) {
9462                 bp->eth_stats.driver_xoff++,
9463                 netif_stop_queue(dev);
9464                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9465                 return NETDEV_TX_BUSY;
9466         }
9467
9468         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9469            "  gso type %x  xmit_type %x\n",
9470            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9471            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9472
9473         /* First, check if we need to linearaize the skb
9474            (due to FW restrictions) */
9475         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9476                 /* Statistics of linearization */
9477                 bp->lin_cnt++;
9478                 if (skb_linearize(skb) != 0) {
9479                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9480                            "silently dropping this SKB\n");
9481                         dev_kfree_skb_any(skb);
9482                         return NETDEV_TX_OK;
9483                 }
9484         }
9485
9486         /*
9487         Please read carefully. First we use one BD which we mark as start,
9488         then for TSO or xsum we have a parsing info BD,
9489         and only then we have the rest of the TSO BDs.
9490         (don't forget to mark the last one as last,
9491         and to unmap only AFTER you write to the BD ...)
9492         And above all, all pdb sizes are in words - NOT DWORDS!
9493         */
9494
9495         pkt_prod = fp->tx_pkt_prod++;
9496         bd_prod = TX_BD(fp->tx_bd_prod);
9497
9498         /* get a tx_buf and first BD */
9499         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9500         tx_bd = &fp->tx_desc_ring[bd_prod];
9501
9502         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9503         tx_bd->general_data = (UNICAST_ADDRESS <<
9504                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9505         tx_bd->general_data |= 1; /* header nbd */
9506
9507         /* remember the first BD of the packet */
9508         tx_buf->first_bd = fp->tx_bd_prod;
9509         tx_buf->skb = skb;
9510
9511         DP(NETIF_MSG_TX_QUEUED,
9512            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9513            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9514
9515         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9516                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9517                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9518                 vlan_off += 4;
9519         } else
9520                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9521
9522         if (xmit_type) {
9523
9524                 /* turn on parsing and get a BD */
9525                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9526                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9527
9528                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9529         }
9530
9531         if (xmit_type & XMIT_CSUM) {
9532                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9533
9534                 /* for now NS flag is not used in Linux */
9535                 pbd->global_data = (hlen |
9536                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9537                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9538
9539                 pbd->ip_hlen = (skb_transport_header(skb) -
9540                                 skb_network_header(skb)) / 2;
9541
9542                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9543
9544                 pbd->total_hlen = cpu_to_le16(hlen);
9545                 hlen = hlen*2 - vlan_off;
9546
9547                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9548
9549                 if (xmit_type & XMIT_CSUM_V4)
9550                         tx_bd->bd_flags.as_bitfield |=
9551                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9552                 else
9553                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9554
9555                 if (xmit_type & XMIT_CSUM_TCP) {
9556                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9557
9558                 } else {
9559                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9560
9561                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9562                         pbd->cs_offset = fix / 2;
9563
9564                         DP(NETIF_MSG_TX_QUEUED,
9565                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9566                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9567                            SKB_CS(skb));
9568
9569                         /* HW bug: fixup the CSUM */
9570                         pbd->tcp_pseudo_csum =
9571                                 bnx2x_csum_fix(skb_transport_header(skb),
9572                                                SKB_CS(skb), fix);
9573
9574                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9575                            pbd->tcp_pseudo_csum);
9576                 }
9577         }
9578
9579         mapping = pci_map_single(bp->pdev, skb->data,
9580                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9581
9582         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9583         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9584         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9585         tx_bd->nbd = cpu_to_le16(nbd);
9586         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9587
9588         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9589            "  nbytes %d  flags %x  vlan %x\n",
9590            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9591            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9592            le16_to_cpu(tx_bd->vlan));
9593
9594         if (xmit_type & XMIT_GSO) {
9595
9596                 DP(NETIF_MSG_TX_QUEUED,
9597                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9598                    skb->len, hlen, skb_headlen(skb),
9599                    skb_shinfo(skb)->gso_size);
9600
9601                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9602
9603                 if (unlikely(skb_headlen(skb) > hlen))
9604                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9605                                                  bd_prod, ++nbd);
9606
9607                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9608                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9609                 pbd->tcp_flags = pbd_tcp_flags(skb);
9610
9611                 if (xmit_type & XMIT_GSO_V4) {
9612                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9613                         pbd->tcp_pseudo_csum =
9614                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9615                                                           ip_hdr(skb)->daddr,
9616                                                           0, IPPROTO_TCP, 0));
9617
9618                 } else
9619                         pbd->tcp_pseudo_csum =
9620                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9621                                                         &ipv6_hdr(skb)->daddr,
9622                                                         0, IPPROTO_TCP, 0));
9623
9624                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9625         }
9626
9627         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9628                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9629
9630                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9631                 tx_bd = &fp->tx_desc_ring[bd_prod];
9632
9633                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9634                                        frag->size, PCI_DMA_TODEVICE);
9635
9636                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9637                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9638                 tx_bd->nbytes = cpu_to_le16(frag->size);
9639                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9640                 tx_bd->bd_flags.as_bitfield = 0;
9641
9642                 DP(NETIF_MSG_TX_QUEUED,
9643                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9644                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9645                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9646         }
9647
9648         /* now at last mark the BD as the last BD */
9649         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9650
9651         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9652            tx_bd, tx_bd->bd_flags.as_bitfield);
9653
9654         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9655
9656         /* now send a tx doorbell, counting the next BD
9657          * if the packet contains or ends with it
9658          */
9659         if (TX_BD_POFF(bd_prod) < nbd)
9660                 nbd++;
9661
9662         if (pbd)
9663                 DP(NETIF_MSG_TX_QUEUED,
9664                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9665                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9666                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9667                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9668                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9669
9670         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9671
9672         fp->hw_tx_prods->bds_prod =
9673                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9674         mb(); /* FW restriction: must not reorder writing nbd and packets */
9675         fp->hw_tx_prods->packets_prod =
9676                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9677         DOORBELL(bp, FP_IDX(fp), 0);
9678
9679         mmiowb();
9680
9681         fp->tx_bd_prod += nbd;
9682         dev->trans_start = jiffies;
9683
9684         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9685                 netif_stop_queue(dev);
9686                 bp->eth_stats.driver_xoff++;
9687                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9688                         netif_wake_queue(dev);
9689         }
9690         fp->tx_pkt++;
9691
9692         return NETDEV_TX_OK;
9693 }
9694
9695 /* called with rtnl_lock */
9696 static int bnx2x_open(struct net_device *dev)
9697 {
9698         struct bnx2x *bp = netdev_priv(dev);
9699
9700         bnx2x_set_power_state(bp, PCI_D0);
9701
9702         return bnx2x_nic_load(bp, LOAD_OPEN);
9703 }
9704
9705 /* called with rtnl_lock */
9706 static int bnx2x_close(struct net_device *dev)
9707 {
9708         struct bnx2x *bp = netdev_priv(dev);
9709
9710         /* Unload the driver, release IRQs */
9711         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9712         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9713                 if (!CHIP_REV_IS_SLOW(bp))
9714                         bnx2x_set_power_state(bp, PCI_D3hot);
9715
9716         return 0;
9717 }
9718
9719 /* called with netif_tx_lock from set_multicast */
9720 static void bnx2x_set_rx_mode(struct net_device *dev)
9721 {
9722         struct bnx2x *bp = netdev_priv(dev);
9723         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9724         int port = BP_PORT(bp);
9725
9726         if (bp->state != BNX2X_STATE_OPEN) {
9727                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9728                 return;
9729         }
9730
9731         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9732
9733         if (dev->flags & IFF_PROMISC)
9734                 rx_mode = BNX2X_RX_MODE_PROMISC;
9735
9736         else if ((dev->flags & IFF_ALLMULTI) ||
9737                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9738                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9739
9740         else { /* some multicasts */
9741                 if (CHIP_IS_E1(bp)) {
9742                         int i, old, offset;
9743                         struct dev_mc_list *mclist;
9744                         struct mac_configuration_cmd *config =
9745                                                 bnx2x_sp(bp, mcast_config);
9746
9747                         for (i = 0, mclist = dev->mc_list;
9748                              mclist && (i < dev->mc_count);
9749                              i++, mclist = mclist->next) {
9750
9751                                 config->config_table[i].
9752                                         cam_entry.msb_mac_addr =
9753                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9754                                 config->config_table[i].
9755                                         cam_entry.middle_mac_addr =
9756                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9757                                 config->config_table[i].
9758                                         cam_entry.lsb_mac_addr =
9759                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9760                                 config->config_table[i].cam_entry.flags =
9761                                                         cpu_to_le16(port);
9762                                 config->config_table[i].
9763                                         target_table_entry.flags = 0;
9764                                 config->config_table[i].
9765                                         target_table_entry.client_id = 0;
9766                                 config->config_table[i].
9767                                         target_table_entry.vlan_id = 0;
9768
9769                                 DP(NETIF_MSG_IFUP,
9770                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9771                                    config->config_table[i].
9772                                                 cam_entry.msb_mac_addr,
9773                                    config->config_table[i].
9774                                                 cam_entry.middle_mac_addr,
9775                                    config->config_table[i].
9776                                                 cam_entry.lsb_mac_addr);
9777                         }
9778                         old = config->hdr.length_6b;
9779                         if (old > i) {
9780                                 for (; i < old; i++) {
9781                                         if (CAM_IS_INVALID(config->
9782                                                            config_table[i])) {
9783                                                 i--; /* already invalidated */
9784                                                 break;
9785                                         }
9786                                         /* invalidate */
9787                                         CAM_INVALIDATE(config->
9788                                                        config_table[i]);
9789                                 }
9790                         }
9791
9792                         if (CHIP_REV_IS_SLOW(bp))
9793                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9794                         else
9795                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9796
9797                         config->hdr.length_6b = i;
9798                         config->hdr.offset = offset;
9799                         config->hdr.client_id = BP_CL_ID(bp);
9800                         config->hdr.reserved1 = 0;
9801
9802                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9803                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9804                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9805                                       0);
9806                 } else { /* E1H */
9807                         /* Accept one or more multicasts */
9808                         struct dev_mc_list *mclist;
9809                         u32 mc_filter[MC_HASH_SIZE];
9810                         u32 crc, bit, regidx;
9811                         int i;
9812
9813                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9814
9815                         for (i = 0, mclist = dev->mc_list;
9816                              mclist && (i < dev->mc_count);
9817                              i++, mclist = mclist->next) {
9818
9819                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9820                                    "%02x:%02x:%02x:%02x:%02x:%02x\n",
9821                                    mclist->dmi_addr[0], mclist->dmi_addr[1],
9822                                    mclist->dmi_addr[2], mclist->dmi_addr[3],
9823                                    mclist->dmi_addr[4], mclist->dmi_addr[5]);
9824
9825                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9826                                 bit = (crc >> 24) & 0xff;
9827                                 regidx = bit >> 5;
9828                                 bit &= 0x1f;
9829                                 mc_filter[regidx] |= (1 << bit);
9830                         }
9831
9832                         for (i = 0; i < MC_HASH_SIZE; i++)
9833                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9834                                        mc_filter[i]);
9835                 }
9836         }
9837
9838         bp->rx_mode = rx_mode;
9839         bnx2x_set_storm_rx_mode(bp);
9840 }
9841
9842 /* called with rtnl_lock */
9843 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9844 {
9845         struct sockaddr *addr = p;
9846         struct bnx2x *bp = netdev_priv(dev);
9847
9848         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9849                 return -EINVAL;
9850
9851         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9852         if (netif_running(dev)) {
9853                 if (CHIP_IS_E1(bp))
9854                         bnx2x_set_mac_addr_e1(bp, 1);
9855                 else
9856                         bnx2x_set_mac_addr_e1h(bp, 1);
9857         }
9858
9859         return 0;
9860 }
9861
9862 /* called with rtnl_lock */
9863 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9864 {
9865         struct mii_ioctl_data *data = if_mii(ifr);
9866         struct bnx2x *bp = netdev_priv(dev);
9867         int err;
9868
9869         switch (cmd) {
9870         case SIOCGMIIPHY:
9871                 data->phy_id = bp->port.phy_addr;
9872
9873                 /* fallthrough */
9874
9875         case SIOCGMIIREG: {
9876                 u16 mii_regval;
9877
9878                 if (!netif_running(dev))
9879                         return -EAGAIN;
9880
9881                 mutex_lock(&bp->port.phy_mutex);
9882                 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9883                                       DEFAULT_PHY_DEV_ADDR,
9884                                       (data->reg_num & 0x1f), &mii_regval);
9885                 data->val_out = mii_regval;
9886                 mutex_unlock(&bp->port.phy_mutex);
9887                 return err;
9888         }
9889
9890         case SIOCSMIIREG:
9891                 if (!capable(CAP_NET_ADMIN))
9892                         return -EPERM;
9893
9894                 if (!netif_running(dev))
9895                         return -EAGAIN;
9896
9897                 mutex_lock(&bp->port.phy_mutex);
9898                 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9899                                        DEFAULT_PHY_DEV_ADDR,
9900                                        (data->reg_num & 0x1f), data->val_in);
9901                 mutex_unlock(&bp->port.phy_mutex);
9902                 return err;
9903
9904         default:
9905                 /* do nothing */
9906                 break;
9907         }
9908
9909         return -EOPNOTSUPP;
9910 }
9911
9912 /* called with rtnl_lock */
9913 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9914 {
9915         struct bnx2x *bp = netdev_priv(dev);
9916         int rc = 0;
9917
9918         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9919             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9920                 return -EINVAL;
9921
9922         /* This does not race with packet allocation
9923          * because the actual alloc size is
9924          * only updated as part of load
9925          */
9926         dev->mtu = new_mtu;
9927
9928         if (netif_running(dev)) {
9929                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9930                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9931         }
9932
9933         return rc;
9934 }
9935
9936 static void bnx2x_tx_timeout(struct net_device *dev)
9937 {
9938         struct bnx2x *bp = netdev_priv(dev);
9939
9940 #ifdef BNX2X_STOP_ON_ERROR
9941         if (!bp->panic)
9942                 bnx2x_panic();
9943 #endif
9944         /* This allows the netif to be shutdown gracefully before resetting */
9945         schedule_work(&bp->reset_task);
9946 }
9947
9948 #ifdef BCM_VLAN
9949 /* called with rtnl_lock */
9950 static void bnx2x_vlan_rx_register(struct net_device *dev,
9951                                    struct vlan_group *vlgrp)
9952 {
9953         struct bnx2x *bp = netdev_priv(dev);
9954
9955         bp->vlgrp = vlgrp;
9956         if (netif_running(dev))
9957                 bnx2x_set_client_config(bp);
9958 }
9959
9960 #endif
9961
9962 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9963 static void poll_bnx2x(struct net_device *dev)
9964 {
9965         struct bnx2x *bp = netdev_priv(dev);
9966
9967         disable_irq(bp->pdev->irq);
9968         bnx2x_interrupt(bp->pdev->irq, dev);
9969         enable_irq(bp->pdev->irq);
9970 }
9971 #endif
9972
9973 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9974                                     struct net_device *dev)
9975 {
9976         struct bnx2x *bp;
9977         int rc;
9978
9979         SET_NETDEV_DEV(dev, &pdev->dev);
9980         bp = netdev_priv(dev);
9981
9982         bp->dev = dev;
9983         bp->pdev = pdev;
9984         bp->flags = 0;
9985         bp->func = PCI_FUNC(pdev->devfn);
9986
9987         rc = pci_enable_device(pdev);
9988         if (rc) {
9989                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9990                 goto err_out;
9991         }
9992
9993         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9994                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9995                        " aborting\n");
9996                 rc = -ENODEV;
9997                 goto err_out_disable;
9998         }
9999
10000         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10001                 printk(KERN_ERR PFX "Cannot find second PCI device"
10002                        " base address, aborting\n");
10003                 rc = -ENODEV;
10004                 goto err_out_disable;
10005         }
10006
10007         if (atomic_read(&pdev->enable_cnt) == 1) {
10008                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10009                 if (rc) {
10010                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10011                                " aborting\n");
10012                         goto err_out_disable;
10013                 }
10014
10015                 pci_set_master(pdev);
10016                 pci_save_state(pdev);
10017         }
10018
10019         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10020         if (bp->pm_cap == 0) {
10021                 printk(KERN_ERR PFX "Cannot find power management"
10022                        " capability, aborting\n");
10023                 rc = -EIO;
10024                 goto err_out_release;
10025         }
10026
10027         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10028         if (bp->pcie_cap == 0) {
10029                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10030                        " aborting\n");
10031                 rc = -EIO;
10032                 goto err_out_release;
10033         }
10034
10035         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10036                 bp->flags |= USING_DAC_FLAG;
10037                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10038                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10039                                " failed, aborting\n");
10040                         rc = -EIO;
10041                         goto err_out_release;
10042                 }
10043
10044         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10045                 printk(KERN_ERR PFX "System does not support DMA,"
10046                        " aborting\n");
10047                 rc = -EIO;
10048                 goto err_out_release;
10049         }
10050
10051         dev->mem_start = pci_resource_start(pdev, 0);
10052         dev->base_addr = dev->mem_start;
10053         dev->mem_end = pci_resource_end(pdev, 0);
10054
10055         dev->irq = pdev->irq;
10056
10057         bp->regview = ioremap_nocache(dev->base_addr,
10058                                       pci_resource_len(pdev, 0));
10059         if (!bp->regview) {
10060                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10061                 rc = -ENOMEM;
10062                 goto err_out_release;
10063         }
10064
10065         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10066                                         min_t(u64, BNX2X_DB_SIZE,
10067                                               pci_resource_len(pdev, 2)));
10068         if (!bp->doorbells) {
10069                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10070                 rc = -ENOMEM;
10071                 goto err_out_unmap;
10072         }
10073
10074         bnx2x_set_power_state(bp, PCI_D0);
10075
10076         /* clean indirect addresses */
10077         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10078                                PCICFG_VENDOR_ID_OFFSET);
10079         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10080         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10081         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10082         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10083
10084         dev->hard_start_xmit = bnx2x_start_xmit;
10085         dev->watchdog_timeo = TX_TIMEOUT;
10086
10087         dev->ethtool_ops = &bnx2x_ethtool_ops;
10088         dev->open = bnx2x_open;
10089         dev->stop = bnx2x_close;
10090         dev->set_multicast_list = bnx2x_set_rx_mode;
10091         dev->set_mac_address = bnx2x_change_mac_addr;
10092         dev->do_ioctl = bnx2x_ioctl;
10093         dev->change_mtu = bnx2x_change_mtu;
10094         dev->tx_timeout = bnx2x_tx_timeout;
10095 #ifdef BCM_VLAN
10096         dev->vlan_rx_register = bnx2x_vlan_rx_register;
10097 #endif
10098 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10099         dev->poll_controller = poll_bnx2x;
10100 #endif
10101         dev->features |= NETIF_F_SG;
10102         dev->features |= NETIF_F_HW_CSUM;
10103         if (bp->flags & USING_DAC_FLAG)
10104                 dev->features |= NETIF_F_HIGHDMA;
10105 #ifdef BCM_VLAN
10106         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10107 #endif
10108         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10109         dev->features |= NETIF_F_TSO6;
10110
10111         return 0;
10112
10113 err_out_unmap:
10114         if (bp->regview) {
10115                 iounmap(bp->regview);
10116                 bp->regview = NULL;
10117         }
10118         if (bp->doorbells) {
10119                 iounmap(bp->doorbells);
10120                 bp->doorbells = NULL;
10121         }
10122
10123 err_out_release:
10124         if (atomic_read(&pdev->enable_cnt) == 1)
10125                 pci_release_regions(pdev);
10126
10127 err_out_disable:
10128         pci_disable_device(pdev);
10129         pci_set_drvdata(pdev, NULL);
10130
10131 err_out:
10132         return rc;
10133 }
10134
10135 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10136 {
10137         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10138
10139         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10140         return val;
10141 }
10142
10143 /* return value of 1=2.5GHz 2=5GHz */
10144 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10145 {
10146         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10147
10148         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10149         return val;
10150 }
10151
10152 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10153                                     const struct pci_device_id *ent)
10154 {
10155         static int version_printed;
10156         struct net_device *dev = NULL;
10157         struct bnx2x *bp;
10158         int rc;
10159         DECLARE_MAC_BUF(mac);
10160
10161         if (version_printed++ == 0)
10162                 printk(KERN_INFO "%s", version);
10163
10164         /* dev zeroed in init_etherdev */
10165         dev = alloc_etherdev(sizeof(*bp));
10166         if (!dev) {
10167                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10168                 return -ENOMEM;
10169         }
10170
10171         netif_carrier_off(dev);
10172
10173         bp = netdev_priv(dev);
10174         bp->msglevel = debug;
10175
10176         rc = bnx2x_init_dev(pdev, dev);
10177         if (rc < 0) {
10178                 free_netdev(dev);
10179                 return rc;
10180         }
10181
10182         rc = register_netdev(dev);
10183         if (rc) {
10184                 dev_err(&pdev->dev, "Cannot register net device\n");
10185                 goto init_one_exit;
10186         }
10187
10188         pci_set_drvdata(pdev, dev);
10189
10190         rc = bnx2x_init_bp(bp);
10191         if (rc) {
10192                 unregister_netdev(dev);
10193                 goto init_one_exit;
10194         }
10195
10196         bp->common.name = board_info[ent->driver_data].name;
10197         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10198                " IRQ %d, ", dev->name, bp->common.name,
10199                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10200                bnx2x_get_pcie_width(bp),
10201                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10202                dev->base_addr, bp->pdev->irq);
10203         printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10204         return 0;
10205
10206 init_one_exit:
10207         if (bp->regview)
10208                 iounmap(bp->regview);
10209
10210         if (bp->doorbells)
10211                 iounmap(bp->doorbells);
10212
10213         free_netdev(dev);
10214
10215         if (atomic_read(&pdev->enable_cnt) == 1)
10216                 pci_release_regions(pdev);
10217
10218         pci_disable_device(pdev);
10219         pci_set_drvdata(pdev, NULL);
10220
10221         return rc;
10222 }
10223
10224 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10225 {
10226         struct net_device *dev = pci_get_drvdata(pdev);
10227         struct bnx2x *bp;
10228
10229         if (!dev) {
10230                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10231                 return;
10232         }
10233         bp = netdev_priv(dev);
10234
10235         unregister_netdev(dev);
10236
10237         if (bp->regview)
10238                 iounmap(bp->regview);
10239
10240         if (bp->doorbells)
10241                 iounmap(bp->doorbells);
10242
10243         free_netdev(dev);
10244
10245         if (atomic_read(&pdev->enable_cnt) == 1)
10246                 pci_release_regions(pdev);
10247
10248         pci_disable_device(pdev);
10249         pci_set_drvdata(pdev, NULL);
10250 }
10251
10252 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10253 {
10254         struct net_device *dev = pci_get_drvdata(pdev);
10255         struct bnx2x *bp;
10256
10257         if (!dev) {
10258                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10259                 return -ENODEV;
10260         }
10261         bp = netdev_priv(dev);
10262
10263         rtnl_lock();
10264
10265         pci_save_state(pdev);
10266
10267         if (!netif_running(dev)) {
10268                 rtnl_unlock();
10269                 return 0;
10270         }
10271
10272         netif_device_detach(dev);
10273
10274         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10275
10276         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10277
10278         rtnl_unlock();
10279
10280         return 0;
10281 }
10282
10283 static int bnx2x_resume(struct pci_dev *pdev)
10284 {
10285         struct net_device *dev = pci_get_drvdata(pdev);
10286         struct bnx2x *bp;
10287         int rc;
10288
10289         if (!dev) {
10290                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10291                 return -ENODEV;
10292         }
10293         bp = netdev_priv(dev);
10294
10295         rtnl_lock();
10296
10297         pci_restore_state(pdev);
10298
10299         if (!netif_running(dev)) {
10300                 rtnl_unlock();
10301                 return 0;
10302         }
10303
10304         bnx2x_set_power_state(bp, PCI_D0);
10305         netif_device_attach(dev);
10306
10307         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10308
10309         rtnl_unlock();
10310
10311         return rc;
10312 }
10313
10314 /**
10315  * bnx2x_io_error_detected - called when PCI error is detected
10316  * @pdev: Pointer to PCI device
10317  * @state: The current pci connection state
10318  *
10319  * This function is called after a PCI bus error affecting
10320  * this device has been detected.
10321  */
10322 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10323                                                 pci_channel_state_t state)
10324 {
10325         struct net_device *dev = pci_get_drvdata(pdev);
10326         struct bnx2x *bp = netdev_priv(dev);
10327
10328         rtnl_lock();
10329
10330         netif_device_detach(dev);
10331
10332         if (netif_running(dev))
10333                 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10334
10335         pci_disable_device(pdev);
10336
10337         rtnl_unlock();
10338
10339         /* Request a slot reset */
10340         return PCI_ERS_RESULT_NEED_RESET;
10341 }
10342
10343 /**
10344  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10345  * @pdev: Pointer to PCI device
10346  *
10347  * Restart the card from scratch, as if from a cold-boot.
10348  */
10349 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10350 {
10351         struct net_device *dev = pci_get_drvdata(pdev);
10352         struct bnx2x *bp = netdev_priv(dev);
10353
10354         rtnl_lock();
10355
10356         if (pci_enable_device(pdev)) {
10357                 dev_err(&pdev->dev,
10358                         "Cannot re-enable PCI device after reset\n");
10359                 rtnl_unlock();
10360                 return PCI_ERS_RESULT_DISCONNECT;
10361         }
10362
10363         pci_set_master(pdev);
10364         pci_restore_state(pdev);
10365
10366         if (netif_running(dev))
10367                 bnx2x_set_power_state(bp, PCI_D0);
10368
10369         rtnl_unlock();
10370
10371         return PCI_ERS_RESULT_RECOVERED;
10372 }
10373
10374 /**
10375  * bnx2x_io_resume - called when traffic can start flowing again
10376  * @pdev: Pointer to PCI device
10377  *
10378  * This callback is called when the error recovery driver tells us that
10379  * its OK to resume normal operation.
10380  */
10381 static void bnx2x_io_resume(struct pci_dev *pdev)
10382 {
10383         struct net_device *dev = pci_get_drvdata(pdev);
10384         struct bnx2x *bp = netdev_priv(dev);
10385
10386         rtnl_lock();
10387
10388         if (netif_running(dev))
10389                 bnx2x_nic_load(bp, LOAD_OPEN);
10390
10391         netif_device_attach(dev);
10392
10393         rtnl_unlock();
10394 }
10395
10396 static struct pci_error_handlers bnx2x_err_handler = {
10397         .error_detected = bnx2x_io_error_detected,
10398         .slot_reset = bnx2x_io_slot_reset,
10399         .resume = bnx2x_io_resume,
10400 };
10401
10402 static struct pci_driver bnx2x_pci_driver = {
10403         .name        = DRV_MODULE_NAME,
10404         .id_table    = bnx2x_pci_tbl,
10405         .probe       = bnx2x_init_one,
10406         .remove      = __devexit_p(bnx2x_remove_one),
10407         .suspend     = bnx2x_suspend,
10408         .resume      = bnx2x_resume,
10409         .err_handler = &bnx2x_err_handler,
10410 };
10411
10412 static int __init bnx2x_init(void)
10413 {
10414         return pci_register_driver(&bnx2x_pci_driver);
10415 }
10416
10417 static void __exit bnx2x_cleanup(void)
10418 {
10419         pci_unregister_driver(&bnx2x_pci_driver);
10420 }
10421
10422 module_init(bnx2x_init);
10423 module_exit(bnx2x_cleanup);
10424