]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/bnx2.c
[BNX2]: Support multiple MSIX IRQs.
[linux-2.6-omap-h63xx.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.0"
60 #define DRV_MODULE_RELDATE      "December 11, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bnapi->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         int i;
403         struct bnx2_napi *bnapi;
404
405         for (i = 0; i < bp->irq_nvecs; i++) {
406                 bnapi = &bp->bnx2_napi[i];
407                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
408                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
409         }
410         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
411 }
412
413 static void
414 bnx2_enable_int(struct bnx2 *bp)
415 {
416         int i;
417         struct bnx2_napi *bnapi;
418
419         for (i = 0; i < bp->irq_nvecs; i++) {
420                 bnapi = &bp->bnx2_napi[i];
421
422                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
423                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
424                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
425                        bnapi->last_status_idx);
426
427                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
429                        bnapi->last_status_idx);
430         }
431         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
432 }
433
434 static void
435 bnx2_disable_int_sync(struct bnx2 *bp)
436 {
437         int i;
438
439         atomic_inc(&bp->intr_sem);
440         bnx2_disable_int(bp);
441         for (i = 0; i < bp->irq_nvecs; i++)
442                 synchronize_irq(bp->irq_tbl[i].vector);
443 }
444
445 static void
446 bnx2_napi_disable(struct bnx2 *bp)
447 {
448         int i;
449
450         for (i = 0; i < bp->irq_nvecs; i++)
451                 napi_disable(&bp->bnx2_napi[i].napi);
452 }
453
454 static void
455 bnx2_napi_enable(struct bnx2 *bp)
456 {
457         int i;
458
459         for (i = 0; i < bp->irq_nvecs; i++)
460                 napi_enable(&bp->bnx2_napi[i].napi);
461 }
462
463 static void
464 bnx2_netif_stop(struct bnx2 *bp)
465 {
466         bnx2_disable_int_sync(bp);
467         if (netif_running(bp->dev)) {
468                 bnx2_napi_disable(bp);
469                 netif_tx_disable(bp->dev);
470                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
471         }
472 }
473
474 static void
475 bnx2_netif_start(struct bnx2 *bp)
476 {
477         if (atomic_dec_and_test(&bp->intr_sem)) {
478                 if (netif_running(bp->dev)) {
479                         netif_wake_queue(bp->dev);
480                         bnx2_napi_enable(bp);
481                         bnx2_enable_int(bp);
482                 }
483         }
484 }
485
486 static void
487 bnx2_free_mem(struct bnx2 *bp)
488 {
489         int i;
490
491         for (i = 0; i < bp->ctx_pages; i++) {
492                 if (bp->ctx_blk[i]) {
493                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
494                                             bp->ctx_blk[i],
495                                             bp->ctx_blk_mapping[i]);
496                         bp->ctx_blk[i] = NULL;
497                 }
498         }
499         if (bp->status_blk) {
500                 pci_free_consistent(bp->pdev, bp->status_stats_size,
501                                     bp->status_blk, bp->status_blk_mapping);
502                 bp->status_blk = NULL;
503                 bp->stats_blk = NULL;
504         }
505         if (bp->tx_desc_ring) {
506                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
507                                     bp->tx_desc_ring, bp->tx_desc_mapping);
508                 bp->tx_desc_ring = NULL;
509         }
510         kfree(bp->tx_buf_ring);
511         bp->tx_buf_ring = NULL;
512         for (i = 0; i < bp->rx_max_ring; i++) {
513                 if (bp->rx_desc_ring[i])
514                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
515                                             bp->rx_desc_ring[i],
516                                             bp->rx_desc_mapping[i]);
517                 bp->rx_desc_ring[i] = NULL;
518         }
519         vfree(bp->rx_buf_ring);
520         bp->rx_buf_ring = NULL;
521         for (i = 0; i < bp->rx_max_pg_ring; i++) {
522                 if (bp->rx_pg_desc_ring[i])
523                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
524                                             bp->rx_pg_desc_ring[i],
525                                             bp->rx_pg_desc_mapping[i]);
526                 bp->rx_pg_desc_ring[i] = NULL;
527         }
528         if (bp->rx_pg_ring)
529                 vfree(bp->rx_pg_ring);
530         bp->rx_pg_ring = NULL;
531 }
532
533 static int
534 bnx2_alloc_mem(struct bnx2 *bp)
535 {
536         int i, status_blk_size;
537
538         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
539         if (bp->tx_buf_ring == NULL)
540                 return -ENOMEM;
541
542         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
543                                                 &bp->tx_desc_mapping);
544         if (bp->tx_desc_ring == NULL)
545                 goto alloc_mem_err;
546
547         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
548         if (bp->rx_buf_ring == NULL)
549                 goto alloc_mem_err;
550
551         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
552
553         for (i = 0; i < bp->rx_max_ring; i++) {
554                 bp->rx_desc_ring[i] =
555                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
556                                              &bp->rx_desc_mapping[i]);
557                 if (bp->rx_desc_ring[i] == NULL)
558                         goto alloc_mem_err;
559
560         }
561
562         if (bp->rx_pg_ring_size) {
563                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
564                                          bp->rx_max_pg_ring);
565                 if (bp->rx_pg_ring == NULL)
566                         goto alloc_mem_err;
567
568                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
569                        bp->rx_max_pg_ring);
570         }
571
572         for (i = 0; i < bp->rx_max_pg_ring; i++) {
573                 bp->rx_pg_desc_ring[i] =
574                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
575                                              &bp->rx_pg_desc_mapping[i]);
576                 if (bp->rx_pg_desc_ring[i] == NULL)
577                         goto alloc_mem_err;
578
579         }
580
581         /* Combine status and statistics blocks into one allocation. */
582         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
583         if (bp->flags & MSIX_CAP_FLAG)
584                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
585                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
586         bp->status_stats_size = status_blk_size +
587                                 sizeof(struct statistics_block);
588
589         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
590                                               &bp->status_blk_mapping);
591         if (bp->status_blk == NULL)
592                 goto alloc_mem_err;
593
594         memset(bp->status_blk, 0, bp->status_stats_size);
595
596         bp->bnx2_napi[0].status_blk = bp->status_blk;
597         if (bp->flags & MSIX_CAP_FLAG) {
598                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
599                         struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
600
601                         bnapi->status_blk = (void *)
602                                 ((unsigned long) bp->status_blk +
603                                  BNX2_SBLK_MSIX_ALIGN_SIZE * i);
604                         bnapi->int_num = i << 24;
605                 }
606         }
607
608         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
609                                   status_blk_size);
610
611         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
612
613         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
614                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
615                 if (bp->ctx_pages == 0)
616                         bp->ctx_pages = 1;
617                 for (i = 0; i < bp->ctx_pages; i++) {
618                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
619                                                 BCM_PAGE_SIZE,
620                                                 &bp->ctx_blk_mapping[i]);
621                         if (bp->ctx_blk[i] == NULL)
622                                 goto alloc_mem_err;
623                 }
624         }
625         return 0;
626
627 alloc_mem_err:
628         bnx2_free_mem(bp);
629         return -ENOMEM;
630 }
631
632 static void
633 bnx2_report_fw_link(struct bnx2 *bp)
634 {
635         u32 fw_link_status = 0;
636
637         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
638                 return;
639
640         if (bp->link_up) {
641                 u32 bmsr;
642
643                 switch (bp->line_speed) {
644                 case SPEED_10:
645                         if (bp->duplex == DUPLEX_HALF)
646                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
647                         else
648                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
649                         break;
650                 case SPEED_100:
651                         if (bp->duplex == DUPLEX_HALF)
652                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
653                         else
654                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
655                         break;
656                 case SPEED_1000:
657                         if (bp->duplex == DUPLEX_HALF)
658                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
659                         else
660                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
661                         break;
662                 case SPEED_2500:
663                         if (bp->duplex == DUPLEX_HALF)
664                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
665                         else
666                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
667                         break;
668                 }
669
670                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
671
672                 if (bp->autoneg) {
673                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
674
675                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
676                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
677
678                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
679                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
680                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
681                         else
682                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
683                 }
684         }
685         else
686                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
687
688         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
689 }
690
691 static char *
692 bnx2_xceiver_str(struct bnx2 *bp)
693 {
694         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
695                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
696                  "Copper"));
697 }
698
699 static void
700 bnx2_report_link(struct bnx2 *bp)
701 {
702         if (bp->link_up) {
703                 netif_carrier_on(bp->dev);
704                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
705                        bnx2_xceiver_str(bp));
706
707                 printk("%d Mbps ", bp->line_speed);
708
709                 if (bp->duplex == DUPLEX_FULL)
710                         printk("full duplex");
711                 else
712                         printk("half duplex");
713
714                 if (bp->flow_ctrl) {
715                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
716                                 printk(", receive ");
717                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
718                                         printk("& transmit ");
719                         }
720                         else {
721                                 printk(", transmit ");
722                         }
723                         printk("flow control ON");
724                 }
725                 printk("\n");
726         }
727         else {
728                 netif_carrier_off(bp->dev);
729                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
730                        bnx2_xceiver_str(bp));
731         }
732
733         bnx2_report_fw_link(bp);
734 }
735
736 static void
737 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
738 {
739         u32 local_adv, remote_adv;
740
741         bp->flow_ctrl = 0;
742         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
743                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
744
745                 if (bp->duplex == DUPLEX_FULL) {
746                         bp->flow_ctrl = bp->req_flow_ctrl;
747                 }
748                 return;
749         }
750
751         if (bp->duplex != DUPLEX_FULL) {
752                 return;
753         }
754
755         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
756             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
757                 u32 val;
758
759                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
760                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
761                         bp->flow_ctrl |= FLOW_CTRL_TX;
762                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
763                         bp->flow_ctrl |= FLOW_CTRL_RX;
764                 return;
765         }
766
767         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
768         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
769
770         if (bp->phy_flags & PHY_SERDES_FLAG) {
771                 u32 new_local_adv = 0;
772                 u32 new_remote_adv = 0;
773
774                 if (local_adv & ADVERTISE_1000XPAUSE)
775                         new_local_adv |= ADVERTISE_PAUSE_CAP;
776                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
777                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
778                 if (remote_adv & ADVERTISE_1000XPAUSE)
779                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
780                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
781                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
782
783                 local_adv = new_local_adv;
784                 remote_adv = new_remote_adv;
785         }
786
787         /* See Table 28B-3 of 802.3ab-1999 spec. */
788         if (local_adv & ADVERTISE_PAUSE_CAP) {
789                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
790                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
791                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
792                         }
793                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
794                                 bp->flow_ctrl = FLOW_CTRL_RX;
795                         }
796                 }
797                 else {
798                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
799                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
800                         }
801                 }
802         }
803         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
804                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
805                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
806
807                         bp->flow_ctrl = FLOW_CTRL_TX;
808                 }
809         }
810 }
811
812 static int
813 bnx2_5709s_linkup(struct bnx2 *bp)
814 {
815         u32 val, speed;
816
817         bp->link_up = 1;
818
819         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
820         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
821         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
822
823         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
824                 bp->line_speed = bp->req_line_speed;
825                 bp->duplex = bp->req_duplex;
826                 return 0;
827         }
828         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
829         switch (speed) {
830                 case MII_BNX2_GP_TOP_AN_SPEED_10:
831                         bp->line_speed = SPEED_10;
832                         break;
833                 case MII_BNX2_GP_TOP_AN_SPEED_100:
834                         bp->line_speed = SPEED_100;
835                         break;
836                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
837                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
838                         bp->line_speed = SPEED_1000;
839                         break;
840                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
841                         bp->line_speed = SPEED_2500;
842                         break;
843         }
844         if (val & MII_BNX2_GP_TOP_AN_FD)
845                 bp->duplex = DUPLEX_FULL;
846         else
847                 bp->duplex = DUPLEX_HALF;
848         return 0;
849 }
850
851 static int
852 bnx2_5708s_linkup(struct bnx2 *bp)
853 {
854         u32 val;
855
856         bp->link_up = 1;
857         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
859                 case BCM5708S_1000X_STAT1_SPEED_10:
860                         bp->line_speed = SPEED_10;
861                         break;
862                 case BCM5708S_1000X_STAT1_SPEED_100:
863                         bp->line_speed = SPEED_100;
864                         break;
865                 case BCM5708S_1000X_STAT1_SPEED_1G:
866                         bp->line_speed = SPEED_1000;
867                         break;
868                 case BCM5708S_1000X_STAT1_SPEED_2G5:
869                         bp->line_speed = SPEED_2500;
870                         break;
871         }
872         if (val & BCM5708S_1000X_STAT1_FD)
873                 bp->duplex = DUPLEX_FULL;
874         else
875                 bp->duplex = DUPLEX_HALF;
876
877         return 0;
878 }
879
880 static int
881 bnx2_5706s_linkup(struct bnx2 *bp)
882 {
883         u32 bmcr, local_adv, remote_adv, common;
884
885         bp->link_up = 1;
886         bp->line_speed = SPEED_1000;
887
888         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
889         if (bmcr & BMCR_FULLDPLX) {
890                 bp->duplex = DUPLEX_FULL;
891         }
892         else {
893                 bp->duplex = DUPLEX_HALF;
894         }
895
896         if (!(bmcr & BMCR_ANENABLE)) {
897                 return 0;
898         }
899
900         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
901         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
902
903         common = local_adv & remote_adv;
904         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
905
906                 if (common & ADVERTISE_1000XFULL) {
907                         bp->duplex = DUPLEX_FULL;
908                 }
909                 else {
910                         bp->duplex = DUPLEX_HALF;
911                 }
912         }
913
914         return 0;
915 }
916
917 static int
918 bnx2_copper_linkup(struct bnx2 *bp)
919 {
920         u32 bmcr;
921
922         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
923         if (bmcr & BMCR_ANENABLE) {
924                 u32 local_adv, remote_adv, common;
925
926                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
927                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
928
929                 common = local_adv & (remote_adv >> 2);
930                 if (common & ADVERTISE_1000FULL) {
931                         bp->line_speed = SPEED_1000;
932                         bp->duplex = DUPLEX_FULL;
933                 }
934                 else if (common & ADVERTISE_1000HALF) {
935                         bp->line_speed = SPEED_1000;
936                         bp->duplex = DUPLEX_HALF;
937                 }
938                 else {
939                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
940                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
941
942                         common = local_adv & remote_adv;
943                         if (common & ADVERTISE_100FULL) {
944                                 bp->line_speed = SPEED_100;
945                                 bp->duplex = DUPLEX_FULL;
946                         }
947                         else if (common & ADVERTISE_100HALF) {
948                                 bp->line_speed = SPEED_100;
949                                 bp->duplex = DUPLEX_HALF;
950                         }
951                         else if (common & ADVERTISE_10FULL) {
952                                 bp->line_speed = SPEED_10;
953                                 bp->duplex = DUPLEX_FULL;
954                         }
955                         else if (common & ADVERTISE_10HALF) {
956                                 bp->line_speed = SPEED_10;
957                                 bp->duplex = DUPLEX_HALF;
958                         }
959                         else {
960                                 bp->line_speed = 0;
961                                 bp->link_up = 0;
962                         }
963                 }
964         }
965         else {
966                 if (bmcr & BMCR_SPEED100) {
967                         bp->line_speed = SPEED_100;
968                 }
969                 else {
970                         bp->line_speed = SPEED_10;
971                 }
972                 if (bmcr & BMCR_FULLDPLX) {
973                         bp->duplex = DUPLEX_FULL;
974                 }
975                 else {
976                         bp->duplex = DUPLEX_HALF;
977                 }
978         }
979
980         return 0;
981 }
982
983 static int
984 bnx2_set_mac_link(struct bnx2 *bp)
985 {
986         u32 val;
987
988         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
989         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
990                 (bp->duplex == DUPLEX_HALF)) {
991                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
992         }
993
994         /* Configure the EMAC mode register. */
995         val = REG_RD(bp, BNX2_EMAC_MODE);
996
997         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
998                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
999                 BNX2_EMAC_MODE_25G_MODE);
1000
1001         if (bp->link_up) {
1002                 switch (bp->line_speed) {
1003                         case SPEED_10:
1004                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1005                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1006                                         break;
1007                                 }
1008                                 /* fall through */
1009                         case SPEED_100:
1010                                 val |= BNX2_EMAC_MODE_PORT_MII;
1011                                 break;
1012                         case SPEED_2500:
1013                                 val |= BNX2_EMAC_MODE_25G_MODE;
1014                                 /* fall through */
1015                         case SPEED_1000:
1016                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1017                                 break;
1018                 }
1019         }
1020         else {
1021                 val |= BNX2_EMAC_MODE_PORT_GMII;
1022         }
1023
1024         /* Set the MAC to operate in the appropriate duplex mode. */
1025         if (bp->duplex == DUPLEX_HALF)
1026                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1027         REG_WR(bp, BNX2_EMAC_MODE, val);
1028
1029         /* Enable/disable rx PAUSE. */
1030         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1031
1032         if (bp->flow_ctrl & FLOW_CTRL_RX)
1033                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1034         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1035
1036         /* Enable/disable tx PAUSE. */
1037         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1038         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1039
1040         if (bp->flow_ctrl & FLOW_CTRL_TX)
1041                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1042         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1043
1044         /* Acknowledge the interrupt. */
1045         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1046
1047         return 0;
1048 }
1049
1050 static void
1051 bnx2_enable_bmsr1(struct bnx2 *bp)
1052 {
1053         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1054             (CHIP_NUM(bp) == CHIP_NUM_5709))
1055                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056                                MII_BNX2_BLK_ADDR_GP_STATUS);
1057 }
1058
1059 static void
1060 bnx2_disable_bmsr1(struct bnx2 *bp)
1061 {
1062         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1063             (CHIP_NUM(bp) == CHIP_NUM_5709))
1064                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1065                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1066 }
1067
1068 static int
1069 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1070 {
1071         u32 up1;
1072         int ret = 1;
1073
1074         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1075                 return 0;
1076
1077         if (bp->autoneg & AUTONEG_SPEED)
1078                 bp->advertising |= ADVERTISED_2500baseX_Full;
1079
1080         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1081                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1082
1083         bnx2_read_phy(bp, bp->mii_up1, &up1);
1084         if (!(up1 & BCM5708S_UP1_2G5)) {
1085                 up1 |= BCM5708S_UP1_2G5;
1086                 bnx2_write_phy(bp, bp->mii_up1, up1);
1087                 ret = 0;
1088         }
1089
1090         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1091                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093
1094         return ret;
1095 }
1096
1097 static int
1098 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1099 {
1100         u32 up1;
1101         int ret = 0;
1102
1103         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1104                 return 0;
1105
1106         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1107                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1108
1109         bnx2_read_phy(bp, bp->mii_up1, &up1);
1110         if (up1 & BCM5708S_UP1_2G5) {
1111                 up1 &= ~BCM5708S_UP1_2G5;
1112                 bnx2_write_phy(bp, bp->mii_up1, up1);
1113                 ret = 1;
1114         }
1115
1116         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1117                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1118                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1119
1120         return ret;
1121 }
1122
1123 static void
1124 bnx2_enable_forced_2g5(struct bnx2 *bp)
1125 {
1126         u32 bmcr;
1127
1128         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1129                 return;
1130
1131         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1132                 u32 val;
1133
1134                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1135                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1136                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1137                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1138                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1139                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1146                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1148         }
1149
1150         if (bp->autoneg & AUTONEG_SPEED) {
1151                 bmcr &= ~BMCR_ANENABLE;
1152                 if (bp->req_duplex == DUPLEX_FULL)
1153                         bmcr |= BMCR_FULLDPLX;
1154         }
1155         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1156 }
1157
1158 static void
1159 bnx2_disable_forced_2g5(struct bnx2 *bp)
1160 {
1161         u32 bmcr;
1162
1163         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1164                 return;
1165
1166         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1167                 u32 val;
1168
1169                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1170                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1171                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1172                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1173                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1174
1175                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1176                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1177                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1178
1179         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1180                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1181                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1182         }
1183
1184         if (bp->autoneg & AUTONEG_SPEED)
1185                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1186         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1187 }
1188
1189 static int
1190 bnx2_set_link(struct bnx2 *bp)
1191 {
1192         u32 bmsr;
1193         u8 link_up;
1194
1195         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1196                 bp->link_up = 1;
1197                 return 0;
1198         }
1199
1200         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1201                 return 0;
1202
1203         link_up = bp->link_up;
1204
1205         bnx2_enable_bmsr1(bp);
1206         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1207         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1208         bnx2_disable_bmsr1(bp);
1209
1210         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1211             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1212                 u32 val;
1213
1214                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1215                 if (val & BNX2_EMAC_STATUS_LINK)
1216                         bmsr |= BMSR_LSTATUS;
1217                 else
1218                         bmsr &= ~BMSR_LSTATUS;
1219         }
1220
1221         if (bmsr & BMSR_LSTATUS) {
1222                 bp->link_up = 1;
1223
1224                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1226                                 bnx2_5706s_linkup(bp);
1227                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1228                                 bnx2_5708s_linkup(bp);
1229                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1230                                 bnx2_5709s_linkup(bp);
1231                 }
1232                 else {
1233                         bnx2_copper_linkup(bp);
1234                 }
1235                 bnx2_resolve_flow_ctrl(bp);
1236         }
1237         else {
1238                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1239                     (bp->autoneg & AUTONEG_SPEED))
1240                         bnx2_disable_forced_2g5(bp);
1241
1242                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1243                 bp->link_up = 0;
1244         }
1245
1246         if (bp->link_up != link_up) {
1247                 bnx2_report_link(bp);
1248         }
1249
1250         bnx2_set_mac_link(bp);
1251
1252         return 0;
1253 }
1254
1255 static int
1256 bnx2_reset_phy(struct bnx2 *bp)
1257 {
1258         int i;
1259         u32 reg;
1260
1261         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1262
1263 #define PHY_RESET_MAX_WAIT 100
1264         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1265                 udelay(10);
1266
1267                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1268                 if (!(reg & BMCR_RESET)) {
1269                         udelay(20);
1270                         break;
1271                 }
1272         }
1273         if (i == PHY_RESET_MAX_WAIT) {
1274                 return -EBUSY;
1275         }
1276         return 0;
1277 }
1278
1279 static u32
1280 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1281 {
1282         u32 adv = 0;
1283
1284         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1285                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1286
1287                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1288                         adv = ADVERTISE_1000XPAUSE;
1289                 }
1290                 else {
1291                         adv = ADVERTISE_PAUSE_CAP;
1292                 }
1293         }
1294         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1295                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1296                         adv = ADVERTISE_1000XPSE_ASYM;
1297                 }
1298                 else {
1299                         adv = ADVERTISE_PAUSE_ASYM;
1300                 }
1301         }
1302         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1303                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1304                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1305                 }
1306                 else {
1307                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1308                 }
1309         }
1310         return adv;
1311 }
1312
1313 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1314
1315 static int
1316 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1317 {
1318         u32 speed_arg = 0, pause_adv;
1319
1320         pause_adv = bnx2_phy_get_pause_adv(bp);
1321
1322         if (bp->autoneg & AUTONEG_SPEED) {
1323                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1324                 if (bp->advertising & ADVERTISED_10baseT_Half)
1325                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1326                 if (bp->advertising & ADVERTISED_10baseT_Full)
1327                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1328                 if (bp->advertising & ADVERTISED_100baseT_Half)
1329                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1330                 if (bp->advertising & ADVERTISED_100baseT_Full)
1331                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1332                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1333                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1334                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1335                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1336         } else {
1337                 if (bp->req_line_speed == SPEED_2500)
1338                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1339                 else if (bp->req_line_speed == SPEED_1000)
1340                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1341                 else if (bp->req_line_speed == SPEED_100) {
1342                         if (bp->req_duplex == DUPLEX_FULL)
1343                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1344                         else
1345                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1346                 } else if (bp->req_line_speed == SPEED_10) {
1347                         if (bp->req_duplex == DUPLEX_FULL)
1348                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1349                         else
1350                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1351                 }
1352         }
1353
1354         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1355                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1356         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1357                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1358
1359         if (port == PORT_TP)
1360                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1361                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1362
1363         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1364
1365         spin_unlock_bh(&bp->phy_lock);
1366         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1367         spin_lock_bh(&bp->phy_lock);
1368
1369         return 0;
1370 }
1371
1372 static int
1373 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1374 {
1375         u32 adv, bmcr;
1376         u32 new_adv = 0;
1377
1378         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1379                 return (bnx2_setup_remote_phy(bp, port));
1380
1381         if (!(bp->autoneg & AUTONEG_SPEED)) {
1382                 u32 new_bmcr;
1383                 int force_link_down = 0;
1384
1385                 if (bp->req_line_speed == SPEED_2500) {
1386                         if (!bnx2_test_and_enable_2g5(bp))
1387                                 force_link_down = 1;
1388                 } else if (bp->req_line_speed == SPEED_1000) {
1389                         if (bnx2_test_and_disable_2g5(bp))
1390                                 force_link_down = 1;
1391                 }
1392                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1393                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1394
1395                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1396                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1397                 new_bmcr |= BMCR_SPEED1000;
1398
1399                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1400                         if (bp->req_line_speed == SPEED_2500)
1401                                 bnx2_enable_forced_2g5(bp);
1402                         else if (bp->req_line_speed == SPEED_1000) {
1403                                 bnx2_disable_forced_2g5(bp);
1404                                 new_bmcr &= ~0x2000;
1405                         }
1406
1407                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1408                         if (bp->req_line_speed == SPEED_2500)
1409                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1410                         else
1411                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1412                 }
1413
1414                 if (bp->req_duplex == DUPLEX_FULL) {
1415                         adv |= ADVERTISE_1000XFULL;
1416                         new_bmcr |= BMCR_FULLDPLX;
1417                 }
1418                 else {
1419                         adv |= ADVERTISE_1000XHALF;
1420                         new_bmcr &= ~BMCR_FULLDPLX;
1421                 }
1422                 if ((new_bmcr != bmcr) || (force_link_down)) {
1423                         /* Force a link down visible on the other side */
1424                         if (bp->link_up) {
1425                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1426                                                ~(ADVERTISE_1000XFULL |
1427                                                  ADVERTISE_1000XHALF));
1428                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1429                                         BMCR_ANRESTART | BMCR_ANENABLE);
1430
1431                                 bp->link_up = 0;
1432                                 netif_carrier_off(bp->dev);
1433                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1434                                 bnx2_report_link(bp);
1435                         }
1436                         bnx2_write_phy(bp, bp->mii_adv, adv);
1437                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1438                 } else {
1439                         bnx2_resolve_flow_ctrl(bp);
1440                         bnx2_set_mac_link(bp);
1441                 }
1442                 return 0;
1443         }
1444
1445         bnx2_test_and_enable_2g5(bp);
1446
1447         if (bp->advertising & ADVERTISED_1000baseT_Full)
1448                 new_adv |= ADVERTISE_1000XFULL;
1449
1450         new_adv |= bnx2_phy_get_pause_adv(bp);
1451
1452         bnx2_read_phy(bp, bp->mii_adv, &adv);
1453         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1454
1455         bp->serdes_an_pending = 0;
1456         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1457                 /* Force a link down visible on the other side */
1458                 if (bp->link_up) {
1459                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1460                         spin_unlock_bh(&bp->phy_lock);
1461                         msleep(20);
1462                         spin_lock_bh(&bp->phy_lock);
1463                 }
1464
1465                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1466                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1467                         BMCR_ANENABLE);
1468                 /* Speed up link-up time when the link partner
1469                  * does not autonegotiate which is very common
1470                  * in blade servers. Some blade servers use
1471                  * IPMI for kerboard input and it's important
1472                  * to minimize link disruptions. Autoneg. involves
1473                  * exchanging base pages plus 3 next pages and
1474                  * normally completes in about 120 msec.
1475                  */
1476                 bp->current_interval = SERDES_AN_TIMEOUT;
1477                 bp->serdes_an_pending = 1;
1478                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1479         } else {
1480                 bnx2_resolve_flow_ctrl(bp);
1481                 bnx2_set_mac_link(bp);
1482         }
1483
1484         return 0;
1485 }
1486
1487 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1488         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1489                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1490                 (ADVERTISED_1000baseT_Full)
1491
1492 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1493         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1494         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1495         ADVERTISED_1000baseT_Full)
1496
1497 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1498         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1499
1500 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1501
1502 static void
1503 bnx2_set_default_remote_link(struct bnx2 *bp)
1504 {
1505         u32 link;
1506
1507         if (bp->phy_port == PORT_TP)
1508                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1509         else
1510                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1511
1512         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1513                 bp->req_line_speed = 0;
1514                 bp->autoneg |= AUTONEG_SPEED;
1515                 bp->advertising = ADVERTISED_Autoneg;
1516                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1517                         bp->advertising |= ADVERTISED_10baseT_Half;
1518                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1519                         bp->advertising |= ADVERTISED_10baseT_Full;
1520                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1521                         bp->advertising |= ADVERTISED_100baseT_Half;
1522                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1523                         bp->advertising |= ADVERTISED_100baseT_Full;
1524                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1525                         bp->advertising |= ADVERTISED_1000baseT_Full;
1526                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1527                         bp->advertising |= ADVERTISED_2500baseX_Full;
1528         } else {
1529                 bp->autoneg = 0;
1530                 bp->advertising = 0;
1531                 bp->req_duplex = DUPLEX_FULL;
1532                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1533                         bp->req_line_speed = SPEED_10;
1534                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1535                                 bp->req_duplex = DUPLEX_HALF;
1536                 }
1537                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1538                         bp->req_line_speed = SPEED_100;
1539                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1540                                 bp->req_duplex = DUPLEX_HALF;
1541                 }
1542                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1543                         bp->req_line_speed = SPEED_1000;
1544                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1545                         bp->req_line_speed = SPEED_2500;
1546         }
1547 }
1548
1549 static void
1550 bnx2_set_default_link(struct bnx2 *bp)
1551 {
1552         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1553                 return bnx2_set_default_remote_link(bp);
1554
1555         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1556         bp->req_line_speed = 0;
1557         if (bp->phy_flags & PHY_SERDES_FLAG) {
1558                 u32 reg;
1559
1560                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1561
1562                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1563                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1564                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1565                         bp->autoneg = 0;
1566                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1567                         bp->req_duplex = DUPLEX_FULL;
1568                 }
1569         } else
1570                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1571 }
1572
1573 static void
1574 bnx2_send_heart_beat(struct bnx2 *bp)
1575 {
1576         u32 msg;
1577         u32 addr;
1578
1579         spin_lock(&bp->indirect_lock);
1580         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1581         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1582         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1583         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1584         spin_unlock(&bp->indirect_lock);
1585 }
1586
1587 static void
1588 bnx2_remote_phy_event(struct bnx2 *bp)
1589 {
1590         u32 msg;
1591         u8 link_up = bp->link_up;
1592         u8 old_port;
1593
1594         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1595
1596         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1597                 bnx2_send_heart_beat(bp);
1598
1599         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1600
1601         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1602                 bp->link_up = 0;
1603         else {
1604                 u32 speed;
1605
1606                 bp->link_up = 1;
1607                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1608                 bp->duplex = DUPLEX_FULL;
1609                 switch (speed) {
1610                         case BNX2_LINK_STATUS_10HALF:
1611                                 bp->duplex = DUPLEX_HALF;
1612                         case BNX2_LINK_STATUS_10FULL:
1613                                 bp->line_speed = SPEED_10;
1614                                 break;
1615                         case BNX2_LINK_STATUS_100HALF:
1616                                 bp->duplex = DUPLEX_HALF;
1617                         case BNX2_LINK_STATUS_100BASE_T4:
1618                         case BNX2_LINK_STATUS_100FULL:
1619                                 bp->line_speed = SPEED_100;
1620                                 break;
1621                         case BNX2_LINK_STATUS_1000HALF:
1622                                 bp->duplex = DUPLEX_HALF;
1623                         case BNX2_LINK_STATUS_1000FULL:
1624                                 bp->line_speed = SPEED_1000;
1625                                 break;
1626                         case BNX2_LINK_STATUS_2500HALF:
1627                                 bp->duplex = DUPLEX_HALF;
1628                         case BNX2_LINK_STATUS_2500FULL:
1629                                 bp->line_speed = SPEED_2500;
1630                                 break;
1631                         default:
1632                                 bp->line_speed = 0;
1633                                 break;
1634                 }
1635
1636                 spin_lock(&bp->phy_lock);
1637                 bp->flow_ctrl = 0;
1638                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1639                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1640                         if (bp->duplex == DUPLEX_FULL)
1641                                 bp->flow_ctrl = bp->req_flow_ctrl;
1642                 } else {
1643                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1644                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1645                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1646                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1647                 }
1648
1649                 old_port = bp->phy_port;
1650                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1651                         bp->phy_port = PORT_FIBRE;
1652                 else
1653                         bp->phy_port = PORT_TP;
1654
1655                 if (old_port != bp->phy_port)
1656                         bnx2_set_default_link(bp);
1657
1658                 spin_unlock(&bp->phy_lock);
1659         }
1660         if (bp->link_up != link_up)
1661                 bnx2_report_link(bp);
1662
1663         bnx2_set_mac_link(bp);
1664 }
1665
1666 static int
1667 bnx2_set_remote_link(struct bnx2 *bp)
1668 {
1669         u32 evt_code;
1670
1671         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1672         switch (evt_code) {
1673                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1674                         bnx2_remote_phy_event(bp);
1675                         break;
1676                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1677                 default:
1678                         bnx2_send_heart_beat(bp);
1679                         break;
1680         }
1681         return 0;
1682 }
1683
1684 static int
1685 bnx2_setup_copper_phy(struct bnx2 *bp)
1686 {
1687         u32 bmcr;
1688         u32 new_bmcr;
1689
1690         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1691
1692         if (bp->autoneg & AUTONEG_SPEED) {
1693                 u32 adv_reg, adv1000_reg;
1694                 u32 new_adv_reg = 0;
1695                 u32 new_adv1000_reg = 0;
1696
1697                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1698                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1699                         ADVERTISE_PAUSE_ASYM);
1700
1701                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1702                 adv1000_reg &= PHY_ALL_1000_SPEED;
1703
1704                 if (bp->advertising & ADVERTISED_10baseT_Half)
1705                         new_adv_reg |= ADVERTISE_10HALF;
1706                 if (bp->advertising & ADVERTISED_10baseT_Full)
1707                         new_adv_reg |= ADVERTISE_10FULL;
1708                 if (bp->advertising & ADVERTISED_100baseT_Half)
1709                         new_adv_reg |= ADVERTISE_100HALF;
1710                 if (bp->advertising & ADVERTISED_100baseT_Full)
1711                         new_adv_reg |= ADVERTISE_100FULL;
1712                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1713                         new_adv1000_reg |= ADVERTISE_1000FULL;
1714
1715                 new_adv_reg |= ADVERTISE_CSMA;
1716
1717                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1718
1719                 if ((adv1000_reg != new_adv1000_reg) ||
1720                         (adv_reg != new_adv_reg) ||
1721                         ((bmcr & BMCR_ANENABLE) == 0)) {
1722
1723                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1724                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1725                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1726                                 BMCR_ANENABLE);
1727                 }
1728                 else if (bp->link_up) {
1729                         /* Flow ctrl may have changed from auto to forced */
1730                         /* or vice-versa. */
1731
1732                         bnx2_resolve_flow_ctrl(bp);
1733                         bnx2_set_mac_link(bp);
1734                 }
1735                 return 0;
1736         }
1737
1738         new_bmcr = 0;
1739         if (bp->req_line_speed == SPEED_100) {
1740                 new_bmcr |= BMCR_SPEED100;
1741         }
1742         if (bp->req_duplex == DUPLEX_FULL) {
1743                 new_bmcr |= BMCR_FULLDPLX;
1744         }
1745         if (new_bmcr != bmcr) {
1746                 u32 bmsr;
1747
1748                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1749                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1750
1751                 if (bmsr & BMSR_LSTATUS) {
1752                         /* Force link down */
1753                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1754                         spin_unlock_bh(&bp->phy_lock);
1755                         msleep(50);
1756                         spin_lock_bh(&bp->phy_lock);
1757
1758                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1759                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1760                 }
1761
1762                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1763
1764                 /* Normally, the new speed is setup after the link has
1765                  * gone down and up again. In some cases, link will not go
1766                  * down so we need to set up the new speed here.
1767                  */
1768                 if (bmsr & BMSR_LSTATUS) {
1769                         bp->line_speed = bp->req_line_speed;
1770                         bp->duplex = bp->req_duplex;
1771                         bnx2_resolve_flow_ctrl(bp);
1772                         bnx2_set_mac_link(bp);
1773                 }
1774         } else {
1775                 bnx2_resolve_flow_ctrl(bp);
1776                 bnx2_set_mac_link(bp);
1777         }
1778         return 0;
1779 }
1780
1781 static int
1782 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1783 {
1784         if (bp->loopback == MAC_LOOPBACK)
1785                 return 0;
1786
1787         if (bp->phy_flags & PHY_SERDES_FLAG) {
1788                 return (bnx2_setup_serdes_phy(bp, port));
1789         }
1790         else {
1791                 return (bnx2_setup_copper_phy(bp));
1792         }
1793 }
1794
1795 static int
1796 bnx2_init_5709s_phy(struct bnx2 *bp)
1797 {
1798         u32 val;
1799
1800         bp->mii_bmcr = MII_BMCR + 0x10;
1801         bp->mii_bmsr = MII_BMSR + 0x10;
1802         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1803         bp->mii_adv = MII_ADVERTISE + 0x10;
1804         bp->mii_lpa = MII_LPA + 0x10;
1805         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1806
1807         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1808         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1809
1810         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1811         bnx2_reset_phy(bp);
1812
1813         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1814
1815         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1816         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1817         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1818         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1819
1820         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1821         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1822         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1823                 val |= BCM5708S_UP1_2G5;
1824         else
1825                 val &= ~BCM5708S_UP1_2G5;
1826         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1827
1828         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1829         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1830         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1831         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1832
1833         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1834
1835         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1836               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1837         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1838
1839         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1840
1841         return 0;
1842 }
1843
1844 static int
1845 bnx2_init_5708s_phy(struct bnx2 *bp)
1846 {
1847         u32 val;
1848
1849         bnx2_reset_phy(bp);
1850
1851         bp->mii_up1 = BCM5708S_UP1;
1852
1853         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1854         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1855         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1856
1857         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1858         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1859         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1860
1861         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1862         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1863         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1864
1865         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1866                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1867                 val |= BCM5708S_UP1_2G5;
1868                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1869         }
1870
1871         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1872             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1873             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1874                 /* increase tx signal amplitude */
1875                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1876                                BCM5708S_BLK_ADDR_TX_MISC);
1877                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1878                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1879                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1880                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1881         }
1882
1883         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1884               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1885
1886         if (val) {
1887                 u32 is_backplane;
1888
1889                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1890                                           BNX2_SHARED_HW_CFG_CONFIG);
1891                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1892                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1893                                        BCM5708S_BLK_ADDR_TX_MISC);
1894                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1895                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1896                                        BCM5708S_BLK_ADDR_DIG);
1897                 }
1898         }
1899         return 0;
1900 }
1901
1902 static int
1903 bnx2_init_5706s_phy(struct bnx2 *bp)
1904 {
1905         bnx2_reset_phy(bp);
1906
1907         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1908
1909         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1910                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1911
1912         if (bp->dev->mtu > 1500) {
1913                 u32 val;
1914
1915                 /* Set extended packet length bit */
1916                 bnx2_write_phy(bp, 0x18, 0x7);
1917                 bnx2_read_phy(bp, 0x18, &val);
1918                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1919
1920                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1921                 bnx2_read_phy(bp, 0x1c, &val);
1922                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1923         }
1924         else {
1925                 u32 val;
1926
1927                 bnx2_write_phy(bp, 0x18, 0x7);
1928                 bnx2_read_phy(bp, 0x18, &val);
1929                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1930
1931                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1932                 bnx2_read_phy(bp, 0x1c, &val);
1933                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1934         }
1935
1936         return 0;
1937 }
1938
1939 static int
1940 bnx2_init_copper_phy(struct bnx2 *bp)
1941 {
1942         u32 val;
1943
1944         bnx2_reset_phy(bp);
1945
1946         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1947                 bnx2_write_phy(bp, 0x18, 0x0c00);
1948                 bnx2_write_phy(bp, 0x17, 0x000a);
1949                 bnx2_write_phy(bp, 0x15, 0x310b);
1950                 bnx2_write_phy(bp, 0x17, 0x201f);
1951                 bnx2_write_phy(bp, 0x15, 0x9506);
1952                 bnx2_write_phy(bp, 0x17, 0x401f);
1953                 bnx2_write_phy(bp, 0x15, 0x14e2);
1954                 bnx2_write_phy(bp, 0x18, 0x0400);
1955         }
1956
1957         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1958                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1959                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1960                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1961                 val &= ~(1 << 8);
1962                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1963         }
1964
1965         if (bp->dev->mtu > 1500) {
1966                 /* Set extended packet length bit */
1967                 bnx2_write_phy(bp, 0x18, 0x7);
1968                 bnx2_read_phy(bp, 0x18, &val);
1969                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1970
1971                 bnx2_read_phy(bp, 0x10, &val);
1972                 bnx2_write_phy(bp, 0x10, val | 0x1);
1973         }
1974         else {
1975                 bnx2_write_phy(bp, 0x18, 0x7);
1976                 bnx2_read_phy(bp, 0x18, &val);
1977                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1978
1979                 bnx2_read_phy(bp, 0x10, &val);
1980                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1981         }
1982
1983         /* ethernet@wirespeed */
1984         bnx2_write_phy(bp, 0x18, 0x7007);
1985         bnx2_read_phy(bp, 0x18, &val);
1986         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1987         return 0;
1988 }
1989
1990
1991 static int
1992 bnx2_init_phy(struct bnx2 *bp)
1993 {
1994         u32 val;
1995         int rc = 0;
1996
1997         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1998         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1999
2000         bp->mii_bmcr = MII_BMCR;
2001         bp->mii_bmsr = MII_BMSR;
2002         bp->mii_bmsr1 = MII_BMSR;
2003         bp->mii_adv = MII_ADVERTISE;
2004         bp->mii_lpa = MII_LPA;
2005
2006         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2007
2008         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
2009                 goto setup_phy;
2010
2011         bnx2_read_phy(bp, MII_PHYSID1, &val);
2012         bp->phy_id = val << 16;
2013         bnx2_read_phy(bp, MII_PHYSID2, &val);
2014         bp->phy_id |= val & 0xffff;
2015
2016         if (bp->phy_flags & PHY_SERDES_FLAG) {
2017                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2018                         rc = bnx2_init_5706s_phy(bp);
2019                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2020                         rc = bnx2_init_5708s_phy(bp);
2021                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2022                         rc = bnx2_init_5709s_phy(bp);
2023         }
2024         else {
2025                 rc = bnx2_init_copper_phy(bp);
2026         }
2027
2028 setup_phy:
2029         if (!rc)
2030                 rc = bnx2_setup_phy(bp, bp->phy_port);
2031
2032         return rc;
2033 }
2034
2035 static int
2036 bnx2_set_mac_loopback(struct bnx2 *bp)
2037 {
2038         u32 mac_mode;
2039
2040         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2041         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2042         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2043         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2044         bp->link_up = 1;
2045         return 0;
2046 }
2047
2048 static int bnx2_test_link(struct bnx2 *);
2049
2050 static int
2051 bnx2_set_phy_loopback(struct bnx2 *bp)
2052 {
2053         u32 mac_mode;
2054         int rc, i;
2055
2056         spin_lock_bh(&bp->phy_lock);
2057         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2058                             BMCR_SPEED1000);
2059         spin_unlock_bh(&bp->phy_lock);
2060         if (rc)
2061                 return rc;
2062
2063         for (i = 0; i < 10; i++) {
2064                 if (bnx2_test_link(bp) == 0)
2065                         break;
2066                 msleep(100);
2067         }
2068
2069         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2070         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2071                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2072                       BNX2_EMAC_MODE_25G_MODE);
2073
2074         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2075         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2076         bp->link_up = 1;
2077         return 0;
2078 }
2079
2080 static int
2081 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2082 {
2083         int i;
2084         u32 val;
2085
2086         bp->fw_wr_seq++;
2087         msg_data |= bp->fw_wr_seq;
2088
2089         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2090
2091         /* wait for an acknowledgement. */
2092         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2093                 msleep(10);
2094
2095                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2096
2097                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2098                         break;
2099         }
2100         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2101                 return 0;
2102
2103         /* If we timed out, inform the firmware that this is the case. */
2104         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2105                 if (!silent)
2106                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2107                                             "%x\n", msg_data);
2108
2109                 msg_data &= ~BNX2_DRV_MSG_CODE;
2110                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2111
2112                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2113
2114                 return -EBUSY;
2115         }
2116
2117         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2118                 return -EIO;
2119
2120         return 0;
2121 }
2122
2123 static int
2124 bnx2_init_5709_context(struct bnx2 *bp)
2125 {
2126         int i, ret = 0;
2127         u32 val;
2128
2129         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2130         val |= (BCM_PAGE_BITS - 8) << 16;
2131         REG_WR(bp, BNX2_CTX_COMMAND, val);
2132         for (i = 0; i < 10; i++) {
2133                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2134                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2135                         break;
2136                 udelay(2);
2137         }
2138         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2139                 return -EBUSY;
2140
2141         for (i = 0; i < bp->ctx_pages; i++) {
2142                 int j;
2143
2144                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2145                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2146                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2147                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2148                        (u64) bp->ctx_blk_mapping[i] >> 32);
2149                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2150                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2151                 for (j = 0; j < 10; j++) {
2152
2153                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2154                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2155                                 break;
2156                         udelay(5);
2157                 }
2158                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2159                         ret = -EBUSY;
2160                         break;
2161                 }
2162         }
2163         return ret;
2164 }
2165
2166 static void
2167 bnx2_init_context(struct bnx2 *bp)
2168 {
2169         u32 vcid;
2170
2171         vcid = 96;
2172         while (vcid) {
2173                 u32 vcid_addr, pcid_addr, offset;
2174                 int i;
2175
2176                 vcid--;
2177
2178                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2179                         u32 new_vcid;
2180
2181                         vcid_addr = GET_PCID_ADDR(vcid);
2182                         if (vcid & 0x8) {
2183                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2184                         }
2185                         else {
2186                                 new_vcid = vcid;
2187                         }
2188                         pcid_addr = GET_PCID_ADDR(new_vcid);
2189                 }
2190                 else {
2191                         vcid_addr = GET_CID_ADDR(vcid);
2192                         pcid_addr = vcid_addr;
2193                 }
2194
2195                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2196                         vcid_addr += (i << PHY_CTX_SHIFT);
2197                         pcid_addr += (i << PHY_CTX_SHIFT);
2198
2199                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2200                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2201
2202                         /* Zero out the context. */
2203                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2204                                 CTX_WR(bp, vcid_addr, offset, 0);
2205                 }
2206         }
2207 }
2208
2209 static int
2210 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2211 {
2212         u16 *good_mbuf;
2213         u32 good_mbuf_cnt;
2214         u32 val;
2215
2216         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2217         if (good_mbuf == NULL) {
2218                 printk(KERN_ERR PFX "Failed to allocate memory in "
2219                                     "bnx2_alloc_bad_rbuf\n");
2220                 return -ENOMEM;
2221         }
2222
2223         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2224                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2225
2226         good_mbuf_cnt = 0;
2227
2228         /* Allocate a bunch of mbufs and save the good ones in an array. */
2229         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2230         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2231                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2232
2233                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2234
2235                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2236
2237                 /* The addresses with Bit 9 set are bad memory blocks. */
2238                 if (!(val & (1 << 9))) {
2239                         good_mbuf[good_mbuf_cnt] = (u16) val;
2240                         good_mbuf_cnt++;
2241                 }
2242
2243                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2244         }
2245
2246         /* Free the good ones back to the mbuf pool thus discarding
2247          * all the bad ones. */
2248         while (good_mbuf_cnt) {
2249                 good_mbuf_cnt--;
2250
2251                 val = good_mbuf[good_mbuf_cnt];
2252                 val = (val << 9) | val | 1;
2253
2254                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2255         }
2256         kfree(good_mbuf);
2257         return 0;
2258 }
2259
2260 static void
2261 bnx2_set_mac_addr(struct bnx2 *bp)
2262 {
2263         u32 val;
2264         u8 *mac_addr = bp->dev->dev_addr;
2265
2266         val = (mac_addr[0] << 8) | mac_addr[1];
2267
2268         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2269
2270         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2271                 (mac_addr[4] << 8) | mac_addr[5];
2272
2273         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2274 }
2275
2276 static inline int
2277 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2278 {
2279         dma_addr_t mapping;
2280         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2281         struct rx_bd *rxbd =
2282                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2283         struct page *page = alloc_page(GFP_ATOMIC);
2284
2285         if (!page)
2286                 return -ENOMEM;
2287         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2288                                PCI_DMA_FROMDEVICE);
2289         rx_pg->page = page;
2290         pci_unmap_addr_set(rx_pg, mapping, mapping);
2291         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2292         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2293         return 0;
2294 }
2295
2296 static void
2297 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2298 {
2299         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2300         struct page *page = rx_pg->page;
2301
2302         if (!page)
2303                 return;
2304
2305         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2306                        PCI_DMA_FROMDEVICE);
2307
2308         __free_page(page);
2309         rx_pg->page = NULL;
2310 }
2311
2312 static inline int
2313 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2314 {
2315         struct sk_buff *skb;
2316         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2317         dma_addr_t mapping;
2318         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2319         unsigned long align;
2320
2321         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2322         if (skb == NULL) {
2323                 return -ENOMEM;
2324         }
2325
2326         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2327                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2328
2329         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2330                 PCI_DMA_FROMDEVICE);
2331
2332         rx_buf->skb = skb;
2333         pci_unmap_addr_set(rx_buf, mapping, mapping);
2334
2335         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2336         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2337
2338         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2339
2340         return 0;
2341 }
2342
2343 static int
2344 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2345 {
2346         struct status_block *sblk = bnapi->status_blk;
2347         u32 new_link_state, old_link_state;
2348         int is_set = 1;
2349
2350         new_link_state = sblk->status_attn_bits & event;
2351         old_link_state = sblk->status_attn_bits_ack & event;
2352         if (new_link_state != old_link_state) {
2353                 if (new_link_state)
2354                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2355                 else
2356                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2357         } else
2358                 is_set = 0;
2359
2360         return is_set;
2361 }
2362
2363 static void
2364 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2365 {
2366         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2367                 spin_lock(&bp->phy_lock);
2368                 bnx2_set_link(bp);
2369                 spin_unlock(&bp->phy_lock);
2370         }
2371         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2372                 bnx2_set_remote_link(bp);
2373
2374 }
2375
2376 static inline u16
2377 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2378 {
2379         u16 cons;
2380
2381         cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2382
2383         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2384                 cons++;
2385         return cons;
2386 }
2387
2388 static void
2389 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2390 {
2391         u16 hw_cons, sw_cons, sw_ring_cons;
2392         int tx_free_bd = 0;
2393
2394         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2395         sw_cons = bnapi->tx_cons;
2396
2397         while (sw_cons != hw_cons) {
2398                 struct sw_bd *tx_buf;
2399                 struct sk_buff *skb;
2400                 int i, last;
2401
2402                 sw_ring_cons = TX_RING_IDX(sw_cons);
2403
2404                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2405                 skb = tx_buf->skb;
2406
2407                 /* partial BD completions possible with TSO packets */
2408                 if (skb_is_gso(skb)) {
2409                         u16 last_idx, last_ring_idx;
2410
2411                         last_idx = sw_cons +
2412                                 skb_shinfo(skb)->nr_frags + 1;
2413                         last_ring_idx = sw_ring_cons +
2414                                 skb_shinfo(skb)->nr_frags + 1;
2415                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2416                                 last_idx++;
2417                         }
2418                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2419                                 break;
2420                         }
2421                 }
2422
2423                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2424                         skb_headlen(skb), PCI_DMA_TODEVICE);
2425
2426                 tx_buf->skb = NULL;
2427                 last = skb_shinfo(skb)->nr_frags;
2428
2429                 for (i = 0; i < last; i++) {
2430                         sw_cons = NEXT_TX_BD(sw_cons);
2431
2432                         pci_unmap_page(bp->pdev,
2433                                 pci_unmap_addr(
2434                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2435                                         mapping),
2436                                 skb_shinfo(skb)->frags[i].size,
2437                                 PCI_DMA_TODEVICE);
2438                 }
2439
2440                 sw_cons = NEXT_TX_BD(sw_cons);
2441
2442                 tx_free_bd += last + 1;
2443
2444                 dev_kfree_skb(skb);
2445
2446                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2447         }
2448
2449         bnapi->hw_tx_cons = hw_cons;
2450         bnapi->tx_cons = sw_cons;
2451         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2452          * before checking for netif_queue_stopped().  Without the
2453          * memory barrier, there is a small possibility that bnx2_start_xmit()
2454          * will miss it and cause the queue to be stopped forever.
2455          */
2456         smp_mb();
2457
2458         if (unlikely(netif_queue_stopped(bp->dev)) &&
2459                      (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2460                 netif_tx_lock(bp->dev);
2461                 if ((netif_queue_stopped(bp->dev)) &&
2462                     (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2463                         netif_wake_queue(bp->dev);
2464                 netif_tx_unlock(bp->dev);
2465         }
2466 }
2467
2468 static void
2469 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2470                         struct sk_buff *skb, int count)
2471 {
2472         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2473         struct rx_bd *cons_bd, *prod_bd;
2474         dma_addr_t mapping;
2475         int i;
2476         u16 hw_prod = bnapi->rx_pg_prod, prod;
2477         u16 cons = bnapi->rx_pg_cons;
2478
2479         for (i = 0; i < count; i++) {
2480                 prod = RX_PG_RING_IDX(hw_prod);
2481
2482                 prod_rx_pg = &bp->rx_pg_ring[prod];
2483                 cons_rx_pg = &bp->rx_pg_ring[cons];
2484                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2485                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2486
2487                 if (i == 0 && skb) {
2488                         struct page *page;
2489                         struct skb_shared_info *shinfo;
2490
2491                         shinfo = skb_shinfo(skb);
2492                         shinfo->nr_frags--;
2493                         page = shinfo->frags[shinfo->nr_frags].page;
2494                         shinfo->frags[shinfo->nr_frags].page = NULL;
2495                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2496                                                PCI_DMA_FROMDEVICE);
2497                         cons_rx_pg->page = page;
2498                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2499                         dev_kfree_skb(skb);
2500                 }
2501                 if (prod != cons) {
2502                         prod_rx_pg->page = cons_rx_pg->page;
2503                         cons_rx_pg->page = NULL;
2504                         pci_unmap_addr_set(prod_rx_pg, mapping,
2505                                 pci_unmap_addr(cons_rx_pg, mapping));
2506
2507                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2508                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2509
2510                 }
2511                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2512                 hw_prod = NEXT_RX_BD(hw_prod);
2513         }
2514         bnapi->rx_pg_prod = hw_prod;
2515         bnapi->rx_pg_cons = cons;
2516 }
2517
2518 static inline void
2519 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2520         u16 cons, u16 prod)
2521 {
2522         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2523         struct rx_bd *cons_bd, *prod_bd;
2524
2525         cons_rx_buf = &bp->rx_buf_ring[cons];
2526         prod_rx_buf = &bp->rx_buf_ring[prod];
2527
2528         pci_dma_sync_single_for_device(bp->pdev,
2529                 pci_unmap_addr(cons_rx_buf, mapping),
2530                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2531
2532         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2533
2534         prod_rx_buf->skb = skb;
2535
2536         if (cons == prod)
2537                 return;
2538
2539         pci_unmap_addr_set(prod_rx_buf, mapping,
2540                         pci_unmap_addr(cons_rx_buf, mapping));
2541
2542         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2543         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2544         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2545         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2546 }
2547
2548 static int
2549 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2550             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2551             u32 ring_idx)
2552 {
2553         int err;
2554         u16 prod = ring_idx & 0xffff;
2555
2556         err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2557         if (unlikely(err)) {
2558                 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2559                 if (hdr_len) {
2560                         unsigned int raw_len = len + 4;
2561                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2562
2563                         bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2564                 }
2565                 return err;
2566         }
2567
2568         skb_reserve(skb, bp->rx_offset);
2569         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2570                          PCI_DMA_FROMDEVICE);
2571
2572         if (hdr_len == 0) {
2573                 skb_put(skb, len);
2574                 return 0;
2575         } else {
2576                 unsigned int i, frag_len, frag_size, pages;
2577                 struct sw_pg *rx_pg;
2578                 u16 pg_cons = bnapi->rx_pg_cons;
2579                 u16 pg_prod = bnapi->rx_pg_prod;
2580
2581                 frag_size = len + 4 - hdr_len;
2582                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2583                 skb_put(skb, hdr_len);
2584
2585                 for (i = 0; i < pages; i++) {
2586                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2587                         if (unlikely(frag_len <= 4)) {
2588                                 unsigned int tail = 4 - frag_len;
2589
2590                                 bnapi->rx_pg_cons = pg_cons;
2591                                 bnapi->rx_pg_prod = pg_prod;
2592                                 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2593                                                         pages - i);
2594                                 skb->len -= tail;
2595                                 if (i == 0) {
2596                                         skb->tail -= tail;
2597                                 } else {
2598                                         skb_frag_t *frag =
2599                                                 &skb_shinfo(skb)->frags[i - 1];
2600                                         frag->size -= tail;
2601                                         skb->data_len -= tail;
2602                                         skb->truesize -= tail;
2603                                 }
2604                                 return 0;
2605                         }
2606                         rx_pg = &bp->rx_pg_ring[pg_cons];
2607
2608                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2609                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2610
2611                         if (i == pages - 1)
2612                                 frag_len -= 4;
2613
2614                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2615                         rx_pg->page = NULL;
2616
2617                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2618                         if (unlikely(err)) {
2619                                 bnapi->rx_pg_cons = pg_cons;
2620                                 bnapi->rx_pg_prod = pg_prod;
2621                                 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2622                                                         pages - i);
2623                                 return err;
2624                         }
2625
2626                         frag_size -= frag_len;
2627                         skb->data_len += frag_len;
2628                         skb->truesize += frag_len;
2629                         skb->len += frag_len;
2630
2631                         pg_prod = NEXT_RX_BD(pg_prod);
2632                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2633                 }
2634                 bnapi->rx_pg_prod = pg_prod;
2635                 bnapi->rx_pg_cons = pg_cons;
2636         }
2637         return 0;
2638 }
2639
2640 static inline u16
2641 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2642 {
2643         u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2644
2645         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2646                 cons++;
2647         return cons;
2648 }
2649
2650 static int
2651 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2652 {
2653         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2654         struct l2_fhdr *rx_hdr;
2655         int rx_pkt = 0, pg_ring_used = 0;
2656
2657         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2658         sw_cons = bnapi->rx_cons;
2659         sw_prod = bnapi->rx_prod;
2660
2661         /* Memory barrier necessary as speculative reads of the rx
2662          * buffer can be ahead of the index in the status block
2663          */
2664         rmb();
2665         while (sw_cons != hw_cons) {
2666                 unsigned int len, hdr_len;
2667                 u32 status;
2668                 struct sw_bd *rx_buf;
2669                 struct sk_buff *skb;
2670                 dma_addr_t dma_addr;
2671
2672                 sw_ring_cons = RX_RING_IDX(sw_cons);
2673                 sw_ring_prod = RX_RING_IDX(sw_prod);
2674
2675                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2676                 skb = rx_buf->skb;
2677
2678                 rx_buf->skb = NULL;
2679
2680                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2681
2682                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2683                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2684
2685                 rx_hdr = (struct l2_fhdr *) skb->data;
2686                 len = rx_hdr->l2_fhdr_pkt_len;
2687
2688                 if ((status = rx_hdr->l2_fhdr_status) &
2689                         (L2_FHDR_ERRORS_BAD_CRC |
2690                         L2_FHDR_ERRORS_PHY_DECODE |
2691                         L2_FHDR_ERRORS_ALIGNMENT |
2692                         L2_FHDR_ERRORS_TOO_SHORT |
2693                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2694
2695                         bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2696                                           sw_ring_prod);
2697                         goto next_rx;
2698                 }
2699                 hdr_len = 0;
2700                 if (status & L2_FHDR_STATUS_SPLIT) {
2701                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2702                         pg_ring_used = 1;
2703                 } else if (len > bp->rx_jumbo_thresh) {
2704                         hdr_len = bp->rx_jumbo_thresh;
2705                         pg_ring_used = 1;
2706                 }
2707
2708                 len -= 4;
2709
2710                 if (len <= bp->rx_copy_thresh) {
2711                         struct sk_buff *new_skb;
2712
2713                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2714                         if (new_skb == NULL) {
2715                                 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2716                                                   sw_ring_prod);
2717                                 goto next_rx;
2718                         }
2719
2720                         /* aligned copy */
2721                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2722                                       new_skb->data, len + 2);
2723                         skb_reserve(new_skb, 2);
2724                         skb_put(new_skb, len);
2725
2726                         bnx2_reuse_rx_skb(bp, bnapi, skb,
2727                                 sw_ring_cons, sw_ring_prod);
2728
2729                         skb = new_skb;
2730                 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2731                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2732                         goto next_rx;
2733
2734                 skb->protocol = eth_type_trans(skb, bp->dev);
2735
2736                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2737                         (ntohs(skb->protocol) != 0x8100)) {
2738
2739                         dev_kfree_skb(skb);
2740                         goto next_rx;
2741
2742                 }
2743
2744                 skb->ip_summed = CHECKSUM_NONE;
2745                 if (bp->rx_csum &&
2746                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2747                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2748
2749                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2750                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2751                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2752                 }
2753
2754 #ifdef BCM_VLAN
2755                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2756                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2757                                 rx_hdr->l2_fhdr_vlan_tag);
2758                 }
2759                 else
2760 #endif
2761                         netif_receive_skb(skb);
2762
2763                 bp->dev->last_rx = jiffies;
2764                 rx_pkt++;
2765
2766 next_rx:
2767                 sw_cons = NEXT_RX_BD(sw_cons);
2768                 sw_prod = NEXT_RX_BD(sw_prod);
2769
2770                 if ((rx_pkt == budget))
2771                         break;
2772
2773                 /* Refresh hw_cons to see if there is new work */
2774                 if (sw_cons == hw_cons) {
2775                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2776                         rmb();
2777                 }
2778         }
2779         bnapi->rx_cons = sw_cons;
2780         bnapi->rx_prod = sw_prod;
2781
2782         if (pg_ring_used)
2783                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2784                          bnapi->rx_pg_prod);
2785
2786         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2787
2788         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2789
2790         mmiowb();
2791
2792         return rx_pkt;
2793
2794 }
2795
2796 /* MSI ISR - The only difference between this and the INTx ISR
2797  * is that the MSI interrupt is always serviced.
2798  */
2799 static irqreturn_t
2800 bnx2_msi(int irq, void *dev_instance)
2801 {
2802         struct net_device *dev = dev_instance;
2803         struct bnx2 *bp = netdev_priv(dev);
2804         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2805
2806         prefetch(bnapi->status_blk);
2807         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2808                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2809                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2810
2811         /* Return here if interrupt is disabled. */
2812         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2813                 return IRQ_HANDLED;
2814
2815         netif_rx_schedule(dev, &bnapi->napi);
2816
2817         return IRQ_HANDLED;
2818 }
2819
2820 static irqreturn_t
2821 bnx2_msi_1shot(int irq, void *dev_instance)
2822 {
2823         struct net_device *dev = dev_instance;
2824         struct bnx2 *bp = netdev_priv(dev);
2825         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2826
2827         prefetch(bnapi->status_blk);
2828
2829         /* Return here if interrupt is disabled. */
2830         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2831                 return IRQ_HANDLED;
2832
2833         netif_rx_schedule(dev, &bnapi->napi);
2834
2835         return IRQ_HANDLED;
2836 }
2837
2838 static irqreturn_t
2839 bnx2_interrupt(int irq, void *dev_instance)
2840 {
2841         struct net_device *dev = dev_instance;
2842         struct bnx2 *bp = netdev_priv(dev);
2843         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2844         struct status_block *sblk = bnapi->status_blk;
2845
2846         /* When using INTx, it is possible for the interrupt to arrive
2847          * at the CPU before the status block posted prior to the
2848          * interrupt. Reading a register will flush the status block.
2849          * When using MSI, the MSI message will always complete after
2850          * the status block write.
2851          */
2852         if ((sblk->status_idx == bnapi->last_status_idx) &&
2853             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2854              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2855                 return IRQ_NONE;
2856
2857         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2858                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2859                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2860
2861         /* Read back to deassert IRQ immediately to avoid too many
2862          * spurious interrupts.
2863          */
2864         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2865
2866         /* Return here if interrupt is shared and is disabled. */
2867         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2868                 return IRQ_HANDLED;
2869
2870         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2871                 bnapi->last_status_idx = sblk->status_idx;
2872                 __netif_rx_schedule(dev, &bnapi->napi);
2873         }
2874
2875         return IRQ_HANDLED;
2876 }
2877
2878 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2879                                  STATUS_ATTN_BITS_TIMER_ABORT)
2880
2881 static inline int
2882 bnx2_has_work(struct bnx2_napi *bnapi)
2883 {
2884         struct bnx2 *bp = bnapi->bp;
2885         struct status_block *sblk = bp->status_blk;
2886
2887         if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2888             (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2889                 return 1;
2890
2891         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2892             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2893                 return 1;
2894
2895         return 0;
2896 }
2897
2898 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2899                           int work_done, int budget)
2900 {
2901         struct status_block *sblk = bnapi->status_blk;
2902         u32 status_attn_bits = sblk->status_attn_bits;
2903         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2904
2905         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2906             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2907
2908                 bnx2_phy_int(bp, bnapi);
2909
2910                 /* This is needed to take care of transient status
2911                  * during link changes.
2912                  */
2913                 REG_WR(bp, BNX2_HC_COMMAND,
2914                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2915                 REG_RD(bp, BNX2_HC_COMMAND);
2916         }
2917
2918         if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
2919                 bnx2_tx_int(bp, bnapi);
2920
2921         if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
2922                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2923
2924         return work_done;
2925 }
2926
2927 static int bnx2_poll(struct napi_struct *napi, int budget)
2928 {
2929         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2930         struct bnx2 *bp = bnapi->bp;
2931         int work_done = 0;
2932         struct status_block *sblk = bnapi->status_blk;
2933
2934         while (1) {
2935                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
2936
2937                 if (unlikely(work_done >= budget))
2938                         break;
2939
2940                 /* bnapi->last_status_idx is used below to tell the hw how
2941                  * much work has been processed, so we must read it before
2942                  * checking for more work.
2943                  */
2944                 bnapi->last_status_idx = sblk->status_idx;
2945                 rmb();
2946                 if (likely(!bnx2_has_work(bnapi))) {
2947                         netif_rx_complete(bp->dev, napi);
2948                         if (likely(bp->flags & USING_MSI_OR_MSIX_FLAG)) {
2949                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2950                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2951                                        bnapi->last_status_idx);
2952                                 break;
2953                         }
2954                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2955                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2956                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2957                                bnapi->last_status_idx);
2958
2959                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2960                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2961                                bnapi->last_status_idx);
2962                         break;
2963                 }
2964         }
2965
2966         return work_done;
2967 }
2968
2969 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2970  * from set_multicast.
2971  */
2972 static void
2973 bnx2_set_rx_mode(struct net_device *dev)
2974 {
2975         struct bnx2 *bp = netdev_priv(dev);
2976         u32 rx_mode, sort_mode;
2977         int i;
2978
2979         spin_lock_bh(&bp->phy_lock);
2980
2981         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2982                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2983         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2984 #ifdef BCM_VLAN
2985         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2986                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2987 #else
2988         if (!(bp->flags & ASF_ENABLE_FLAG))
2989                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2990 #endif
2991         if (dev->flags & IFF_PROMISC) {
2992                 /* Promiscuous mode. */
2993                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2994                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2995                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2996         }
2997         else if (dev->flags & IFF_ALLMULTI) {
2998                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2999                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3000                                0xffffffff);
3001                 }
3002                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3003         }
3004         else {
3005                 /* Accept one or more multicast(s). */
3006                 struct dev_mc_list *mclist;
3007                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3008                 u32 regidx;
3009                 u32 bit;
3010                 u32 crc;
3011
3012                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3013
3014                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3015                      i++, mclist = mclist->next) {
3016
3017                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3018                         bit = crc & 0xff;
3019                         regidx = (bit & 0xe0) >> 5;
3020                         bit &= 0x1f;
3021                         mc_filter[regidx] |= (1 << bit);
3022                 }
3023
3024                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3025                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3026                                mc_filter[i]);
3027                 }
3028
3029                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3030         }
3031
3032         if (rx_mode != bp->rx_mode) {
3033                 bp->rx_mode = rx_mode;
3034                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3035         }
3036
3037         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3038         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3039         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3040
3041         spin_unlock_bh(&bp->phy_lock);
3042 }
3043
3044 static void
3045 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3046         u32 rv2p_proc)
3047 {
3048         int i;
3049         u32 val;
3050
3051
3052         for (i = 0; i < rv2p_code_len; i += 8) {
3053                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
3054                 rv2p_code++;
3055                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
3056                 rv2p_code++;
3057
3058                 if (rv2p_proc == RV2P_PROC1) {
3059                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3060                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3061                 }
3062                 else {
3063                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3064                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3065                 }
3066         }
3067
3068         /* Reset the processor, un-stall is done later. */
3069         if (rv2p_proc == RV2P_PROC1) {
3070                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3071         }
3072         else {
3073                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3074         }
3075 }
3076
3077 static int
3078 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3079 {
3080         u32 offset;
3081         u32 val;
3082         int rc;
3083
3084         /* Halt the CPU. */
3085         val = REG_RD_IND(bp, cpu_reg->mode);
3086         val |= cpu_reg->mode_value_halt;
3087         REG_WR_IND(bp, cpu_reg->mode, val);
3088         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3089
3090         /* Load the Text area. */
3091         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3092         if (fw->gz_text) {
3093                 int j;
3094
3095                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3096                                        fw->gz_text_len);
3097                 if (rc < 0)
3098                         return rc;
3099
3100                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3101                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3102                 }
3103         }
3104
3105         /* Load the Data area. */
3106         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3107         if (fw->data) {
3108                 int j;
3109
3110                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3111                         REG_WR_IND(bp, offset, fw->data[j]);
3112                 }
3113         }
3114
3115         /* Load the SBSS area. */
3116         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3117         if (fw->sbss_len) {
3118                 int j;
3119
3120                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3121                         REG_WR_IND(bp, offset, 0);
3122                 }
3123         }
3124
3125         /* Load the BSS area. */
3126         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3127         if (fw->bss_len) {
3128                 int j;
3129
3130                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3131                         REG_WR_IND(bp, offset, 0);
3132                 }
3133         }
3134
3135         /* Load the Read-Only area. */
3136         offset = cpu_reg->spad_base +
3137                 (fw->rodata_addr - cpu_reg->mips_view_base);
3138         if (fw->rodata) {
3139                 int j;
3140
3141                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3142                         REG_WR_IND(bp, offset, fw->rodata[j]);
3143                 }
3144         }
3145
3146         /* Clear the pre-fetch instruction. */
3147         REG_WR_IND(bp, cpu_reg->inst, 0);
3148         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3149
3150         /* Start the CPU. */
3151         val = REG_RD_IND(bp, cpu_reg->mode);
3152         val &= ~cpu_reg->mode_value_halt;
3153         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3154         REG_WR_IND(bp, cpu_reg->mode, val);
3155
3156         return 0;
3157 }
3158
3159 static int
3160 bnx2_init_cpus(struct bnx2 *bp)
3161 {
3162         struct cpu_reg cpu_reg;
3163         struct fw_info *fw;
3164         int rc, rv2p_len;
3165         void *text, *rv2p;
3166
3167         /* Initialize the RV2P processor. */
3168         text = vmalloc(FW_BUF_SIZE);
3169         if (!text)
3170                 return -ENOMEM;
3171         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3172                 rv2p = bnx2_xi_rv2p_proc1;
3173                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3174         } else {
3175                 rv2p = bnx2_rv2p_proc1;
3176                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3177         }
3178         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3179         if (rc < 0)
3180                 goto init_cpu_err;
3181
3182         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3183
3184         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3185                 rv2p = bnx2_xi_rv2p_proc2;
3186                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3187         } else {
3188                 rv2p = bnx2_rv2p_proc2;
3189                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3190         }
3191         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3192         if (rc < 0)
3193                 goto init_cpu_err;
3194
3195         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3196
3197         /* Initialize the RX Processor. */
3198         cpu_reg.mode = BNX2_RXP_CPU_MODE;
3199         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3200         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3201         cpu_reg.state = BNX2_RXP_CPU_STATE;
3202         cpu_reg.state_value_clear = 0xffffff;
3203         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3204         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3205         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3206         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3207         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3208         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3209         cpu_reg.mips_view_base = 0x8000000;
3210
3211         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3212                 fw = &bnx2_rxp_fw_09;
3213         else
3214                 fw = &bnx2_rxp_fw_06;
3215
3216         fw->text = text;
3217         rc = load_cpu_fw(bp, &cpu_reg, fw);
3218         if (rc)
3219                 goto init_cpu_err;
3220
3221         /* Initialize the TX Processor. */
3222         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3223         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3224         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3225         cpu_reg.state = BNX2_TXP_CPU_STATE;
3226         cpu_reg.state_value_clear = 0xffffff;
3227         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3228         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3229         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3230         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3231         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3232         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3233         cpu_reg.mips_view_base = 0x8000000;
3234
3235         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3236                 fw = &bnx2_txp_fw_09;
3237         else
3238                 fw = &bnx2_txp_fw_06;
3239
3240         fw->text = text;
3241         rc = load_cpu_fw(bp, &cpu_reg, fw);
3242         if (rc)
3243                 goto init_cpu_err;
3244
3245         /* Initialize the TX Patch-up Processor. */
3246         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3247         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3248         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3249         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3250         cpu_reg.state_value_clear = 0xffffff;
3251         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3252         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3253         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3254         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3255         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3256         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3257         cpu_reg.mips_view_base = 0x8000000;
3258
3259         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3260                 fw = &bnx2_tpat_fw_09;
3261         else
3262                 fw = &bnx2_tpat_fw_06;
3263
3264         fw->text = text;
3265         rc = load_cpu_fw(bp, &cpu_reg, fw);
3266         if (rc)
3267                 goto init_cpu_err;
3268
3269         /* Initialize the Completion Processor. */
3270         cpu_reg.mode = BNX2_COM_CPU_MODE;
3271         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3272         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3273         cpu_reg.state = BNX2_COM_CPU_STATE;
3274         cpu_reg.state_value_clear = 0xffffff;
3275         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3276         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3277         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3278         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3279         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3280         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3281         cpu_reg.mips_view_base = 0x8000000;
3282
3283         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3284                 fw = &bnx2_com_fw_09;
3285         else
3286                 fw = &bnx2_com_fw_06;
3287
3288         fw->text = text;
3289         rc = load_cpu_fw(bp, &cpu_reg, fw);
3290         if (rc)
3291                 goto init_cpu_err;
3292
3293         /* Initialize the Command Processor. */
3294         cpu_reg.mode = BNX2_CP_CPU_MODE;
3295         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3296         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3297         cpu_reg.state = BNX2_CP_CPU_STATE;
3298         cpu_reg.state_value_clear = 0xffffff;
3299         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3300         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3301         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3302         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3303         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3304         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3305         cpu_reg.mips_view_base = 0x8000000;
3306
3307         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3308                 fw = &bnx2_cp_fw_09;
3309         else
3310                 fw = &bnx2_cp_fw_06;
3311
3312         fw->text = text;
3313         rc = load_cpu_fw(bp, &cpu_reg, fw);
3314
3315 init_cpu_err:
3316         vfree(text);
3317         return rc;
3318 }
3319
3320 static int
3321 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3322 {
3323         u16 pmcsr;
3324
3325         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3326
3327         switch (state) {
3328         case PCI_D0: {
3329                 u32 val;
3330
3331                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3332                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3333                         PCI_PM_CTRL_PME_STATUS);
3334
3335                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3336                         /* delay required during transition out of D3hot */
3337                         msleep(20);
3338
3339                 val = REG_RD(bp, BNX2_EMAC_MODE);
3340                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3341                 val &= ~BNX2_EMAC_MODE_MPKT;
3342                 REG_WR(bp, BNX2_EMAC_MODE, val);
3343
3344                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3345                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3346                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3347                 break;
3348         }
3349         case PCI_D3hot: {
3350                 int i;
3351                 u32 val, wol_msg;
3352
3353                 if (bp->wol) {
3354                         u32 advertising;
3355                         u8 autoneg;
3356
3357                         autoneg = bp->autoneg;
3358                         advertising = bp->advertising;
3359
3360                         if (bp->phy_port == PORT_TP) {
3361                                 bp->autoneg = AUTONEG_SPEED;
3362                                 bp->advertising = ADVERTISED_10baseT_Half |
3363                                         ADVERTISED_10baseT_Full |
3364                                         ADVERTISED_100baseT_Half |
3365                                         ADVERTISED_100baseT_Full |
3366                                         ADVERTISED_Autoneg;
3367                         }
3368
3369                         spin_lock_bh(&bp->phy_lock);
3370                         bnx2_setup_phy(bp, bp->phy_port);
3371                         spin_unlock_bh(&bp->phy_lock);
3372
3373                         bp->autoneg = autoneg;
3374                         bp->advertising = advertising;
3375
3376                         bnx2_set_mac_addr(bp);
3377
3378                         val = REG_RD(bp, BNX2_EMAC_MODE);
3379
3380                         /* Enable port mode. */
3381                         val &= ~BNX2_EMAC_MODE_PORT;
3382                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3383                                BNX2_EMAC_MODE_ACPI_RCVD |
3384                                BNX2_EMAC_MODE_MPKT;
3385                         if (bp->phy_port == PORT_TP)
3386                                 val |= BNX2_EMAC_MODE_PORT_MII;
3387                         else {
3388                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3389                                 if (bp->line_speed == SPEED_2500)
3390                                         val |= BNX2_EMAC_MODE_25G_MODE;
3391                         }
3392
3393                         REG_WR(bp, BNX2_EMAC_MODE, val);
3394
3395                         /* receive all multicast */
3396                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3397                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3398                                        0xffffffff);
3399                         }
3400                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3401                                BNX2_EMAC_RX_MODE_SORT_MODE);
3402
3403                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3404                               BNX2_RPM_SORT_USER0_MC_EN;
3405                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3406                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3407                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3408                                BNX2_RPM_SORT_USER0_ENA);
3409
3410                         /* Need to enable EMAC and RPM for WOL. */
3411                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3412                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3413                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3414                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3415
3416                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3417                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3418                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3419
3420                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3421                 }
3422                 else {
3423                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3424                 }
3425
3426                 if (!(bp->flags & NO_WOL_FLAG))
3427                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3428
3429                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3430                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3431                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3432
3433                         if (bp->wol)
3434                                 pmcsr |= 3;
3435                 }
3436                 else {
3437                         pmcsr |= 3;
3438                 }
3439                 if (bp->wol) {
3440                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3441                 }
3442                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3443                                       pmcsr);
3444
3445                 /* No more memory access after this point until
3446                  * device is brought back to D0.
3447                  */
3448                 udelay(50);
3449                 break;
3450         }
3451         default:
3452                 return -EINVAL;
3453         }
3454         return 0;
3455 }
3456
3457 static int
3458 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3459 {
3460         u32 val;
3461         int j;
3462
3463         /* Request access to the flash interface. */
3464         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3465         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3466                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3467                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3468                         break;
3469
3470                 udelay(5);
3471         }
3472
3473         if (j >= NVRAM_TIMEOUT_COUNT)
3474                 return -EBUSY;
3475
3476         return 0;
3477 }
3478
3479 static int
3480 bnx2_release_nvram_lock(struct bnx2 *bp)
3481 {
3482         int j;
3483         u32 val;
3484
3485         /* Relinquish nvram interface. */
3486         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3487
3488         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3489                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3490                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3491                         break;
3492
3493                 udelay(5);
3494         }
3495
3496         if (j >= NVRAM_TIMEOUT_COUNT)
3497                 return -EBUSY;
3498
3499         return 0;
3500 }
3501
3502
3503 static int
3504 bnx2_enable_nvram_write(struct bnx2 *bp)
3505 {
3506         u32 val;
3507
3508         val = REG_RD(bp, BNX2_MISC_CFG);
3509         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3510
3511         if (bp->flash_info->flags & BNX2_NV_WREN) {
3512                 int j;
3513
3514                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3515                 REG_WR(bp, BNX2_NVM_COMMAND,
3516                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3517
3518                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3519                         udelay(5);
3520
3521                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3522                         if (val & BNX2_NVM_COMMAND_DONE)
3523                                 break;
3524                 }
3525
3526                 if (j >= NVRAM_TIMEOUT_COUNT)
3527                         return -EBUSY;
3528         }
3529         return 0;
3530 }
3531
3532 static void
3533 bnx2_disable_nvram_write(struct bnx2 *bp)
3534 {
3535         u32 val;
3536
3537         val = REG_RD(bp, BNX2_MISC_CFG);
3538         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3539 }
3540
3541
3542 static void
3543 bnx2_enable_nvram_access(struct bnx2 *bp)
3544 {
3545         u32 val;
3546
3547         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3548         /* Enable both bits, even on read. */
3549         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3550                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3551 }
3552
3553 static void
3554 bnx2_disable_nvram_access(struct bnx2 *bp)
3555 {
3556         u32 val;
3557
3558         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3559         /* Disable both bits, even after read. */
3560         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3561                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3562                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3563 }
3564
3565 static int
3566 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3567 {
3568         u32 cmd;
3569         int j;
3570
3571         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3572                 /* Buffered flash, no erase needed */
3573                 return 0;
3574
3575         /* Build an erase command */
3576         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3577               BNX2_NVM_COMMAND_DOIT;
3578
3579         /* Need to clear DONE bit separately. */
3580         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3581
3582         /* Address of the NVRAM to read from. */
3583         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3584
3585         /* Issue an erase command. */
3586         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3587
3588         /* Wait for completion. */
3589         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3590                 u32 val;
3591
3592                 udelay(5);
3593
3594                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3595                 if (val & BNX2_NVM_COMMAND_DONE)
3596                         break;
3597         }
3598
3599         if (j >= NVRAM_TIMEOUT_COUNT)
3600                 return -EBUSY;
3601
3602         return 0;
3603 }
3604
3605 static int
3606 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3607 {
3608         u32 cmd;
3609         int j;
3610
3611         /* Build the command word. */
3612         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3613
3614         /* Calculate an offset of a buffered flash, not needed for 5709. */
3615         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3616                 offset = ((offset / bp->flash_info->page_size) <<
3617                            bp->flash_info->page_bits) +
3618                           (offset % bp->flash_info->page_size);
3619         }
3620
3621         /* Need to clear DONE bit separately. */
3622         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3623
3624         /* Address of the NVRAM to read from. */
3625         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3626
3627         /* Issue a read command. */
3628         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3629
3630         /* Wait for completion. */
3631         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3632                 u32 val;
3633
3634                 udelay(5);
3635
3636                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3637                 if (val & BNX2_NVM_COMMAND_DONE) {
3638                         val = REG_RD(bp, BNX2_NVM_READ);
3639
3640                         val = be32_to_cpu(val);
3641                         memcpy(ret_val, &val, 4);
3642                         break;
3643                 }
3644         }
3645         if (j >= NVRAM_TIMEOUT_COUNT)
3646                 return -EBUSY;
3647
3648         return 0;
3649 }
3650
3651
3652 static int
3653 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3654 {
3655         u32 cmd, val32;
3656         int j;
3657
3658         /* Build the command word. */
3659         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3660
3661         /* Calculate an offset of a buffered flash, not needed for 5709. */
3662         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3663                 offset = ((offset / bp->flash_info->page_size) <<
3664                           bp->flash_info->page_bits) +
3665                          (offset % bp->flash_info->page_size);
3666         }
3667
3668         /* Need to clear DONE bit separately. */
3669         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3670
3671         memcpy(&val32, val, 4);
3672         val32 = cpu_to_be32(val32);
3673
3674         /* Write the data. */
3675         REG_WR(bp, BNX2_NVM_WRITE, val32);
3676
3677         /* Address of the NVRAM to write to. */
3678         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3679
3680         /* Issue the write command. */
3681         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3682
3683         /* Wait for completion. */
3684         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3685                 udelay(5);
3686
3687                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3688                         break;
3689         }
3690         if (j >= NVRAM_TIMEOUT_COUNT)
3691                 return -EBUSY;
3692
3693         return 0;
3694 }
3695
3696 static int
3697 bnx2_init_nvram(struct bnx2 *bp)
3698 {
3699         u32 val;
3700         int j, entry_count, rc = 0;
3701         struct flash_spec *flash;
3702
3703         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3704                 bp->flash_info = &flash_5709;
3705                 goto get_flash_size;
3706         }
3707
3708         /* Determine the selected interface. */
3709         val = REG_RD(bp, BNX2_NVM_CFG1);
3710
3711         entry_count = ARRAY_SIZE(flash_table);
3712
3713         if (val & 0x40000000) {
3714
3715                 /* Flash interface has been reconfigured */
3716                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3717                      j++, flash++) {
3718                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3719                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3720                                 bp->flash_info = flash;
3721                                 break;
3722                         }
3723                 }
3724         }
3725         else {
3726                 u32 mask;
3727                 /* Not yet been reconfigured */
3728
3729                 if (val & (1 << 23))
3730                         mask = FLASH_BACKUP_STRAP_MASK;
3731                 else
3732                         mask = FLASH_STRAP_MASK;
3733
3734                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3735                         j++, flash++) {
3736
3737                         if ((val & mask) == (flash->strapping & mask)) {
3738                                 bp->flash_info = flash;
3739
3740                                 /* Request access to the flash interface. */
3741                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3742                                         return rc;
3743
3744                                 /* Enable access to flash interface */
3745                                 bnx2_enable_nvram_access(bp);
3746
3747                                 /* Reconfigure the flash interface */
3748                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3749                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3750                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3751                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3752
3753                                 /* Disable access to flash interface */
3754                                 bnx2_disable_nvram_access(bp);
3755                                 bnx2_release_nvram_lock(bp);
3756
3757                                 break;
3758                         }
3759                 }
3760         } /* if (val & 0x40000000) */
3761
3762         if (j == entry_count) {
3763                 bp->flash_info = NULL;
3764                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3765                 return -ENODEV;
3766         }
3767
3768 get_flash_size:
3769         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3770         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3771         if (val)
3772                 bp->flash_size = val;
3773         else
3774                 bp->flash_size = bp->flash_info->total_size;
3775
3776         return rc;
3777 }
3778
3779 static int
3780 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3781                 int buf_size)
3782 {
3783         int rc = 0;
3784         u32 cmd_flags, offset32, len32, extra;
3785
3786         if (buf_size == 0)
3787                 return 0;
3788
3789         /* Request access to the flash interface. */
3790         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3791                 return rc;
3792
3793         /* Enable access to flash interface */
3794         bnx2_enable_nvram_access(bp);
3795
3796         len32 = buf_size;
3797         offset32 = offset;
3798         extra = 0;
3799
3800         cmd_flags = 0;
3801
3802         if (offset32 & 3) {
3803                 u8 buf[4];
3804                 u32 pre_len;
3805
3806                 offset32 &= ~3;
3807                 pre_len = 4 - (offset & 3);
3808
3809                 if (pre_len >= len32) {
3810                         pre_len = len32;
3811                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3812                                     BNX2_NVM_COMMAND_LAST;
3813                 }
3814                 else {
3815                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3816                 }
3817
3818                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3819
3820                 if (rc)
3821                         return rc;
3822
3823                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3824
3825                 offset32 += 4;
3826                 ret_buf += pre_len;
3827                 len32 -= pre_len;
3828         }
3829         if (len32 & 3) {
3830                 extra = 4 - (len32 & 3);
3831                 len32 = (len32 + 4) & ~3;
3832         }
3833
3834         if (len32 == 4) {
3835                 u8 buf[4];
3836
3837                 if (cmd_flags)
3838                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3839                 else
3840                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3841                                     BNX2_NVM_COMMAND_LAST;
3842
3843                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3844
3845                 memcpy(ret_buf, buf, 4 - extra);
3846         }
3847         else if (len32 > 0) {
3848                 u8 buf[4];
3849
3850                 /* Read the first word. */
3851                 if (cmd_flags)
3852                         cmd_flags = 0;
3853                 else
3854                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3855
3856                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3857
3858                 /* Advance to the next dword. */
3859                 offset32 += 4;
3860                 ret_buf += 4;
3861                 len32 -= 4;
3862
3863                 while (len32 > 4 && rc == 0) {
3864                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3865
3866                         /* Advance to the next dword. */
3867                         offset32 += 4;
3868                         ret_buf += 4;
3869                         len32 -= 4;
3870                 }
3871
3872                 if (rc)
3873                         return rc;
3874
3875                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3876                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3877
3878                 memcpy(ret_buf, buf, 4 - extra);
3879         }
3880
3881         /* Disable access to flash interface */
3882         bnx2_disable_nvram_access(bp);
3883
3884         bnx2_release_nvram_lock(bp);
3885
3886         return rc;
3887 }
3888
3889 static int
3890 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3891                 int buf_size)
3892 {
3893         u32 written, offset32, len32;
3894         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3895         int rc = 0;
3896         int align_start, align_end;
3897
3898         buf = data_buf;
3899         offset32 = offset;
3900         len32 = buf_size;
3901         align_start = align_end = 0;
3902
3903         if ((align_start = (offset32 & 3))) {
3904                 offset32 &= ~3;
3905                 len32 += align_start;
3906                 if (len32 < 4)
3907                         len32 = 4;
3908                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3909                         return rc;
3910         }
3911
3912         if (len32 & 3) {
3913                 align_end = 4 - (len32 & 3);
3914                 len32 += align_end;
3915                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3916                         return rc;
3917         }
3918
3919         if (align_start || align_end) {
3920                 align_buf = kmalloc(len32, GFP_KERNEL);
3921                 if (align_buf == NULL)
3922                         return -ENOMEM;
3923                 if (align_start) {
3924                         memcpy(align_buf, start, 4);
3925                 }
3926                 if (align_end) {
3927                         memcpy(align_buf + len32 - 4, end, 4);
3928                 }
3929                 memcpy(align_buf + align_start, data_buf, buf_size);
3930                 buf = align_buf;
3931         }
3932
3933         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3934                 flash_buffer = kmalloc(264, GFP_KERNEL);
3935                 if (flash_buffer == NULL) {
3936                         rc = -ENOMEM;
3937                         goto nvram_write_end;
3938                 }
3939         }
3940
3941         written = 0;
3942         while ((written < len32) && (rc == 0)) {
3943                 u32 page_start, page_end, data_start, data_end;
3944                 u32 addr, cmd_flags;
3945                 int i;
3946
3947                 /* Find the page_start addr */
3948                 page_start = offset32 + written;
3949                 page_start -= (page_start % bp->flash_info->page_size);
3950                 /* Find the page_end addr */
3951                 page_end = page_start + bp->flash_info->page_size;
3952                 /* Find the data_start addr */
3953                 data_start = (written == 0) ? offset32 : page_start;
3954                 /* Find the data_end addr */
3955                 data_end = (page_end > offset32 + len32) ?
3956                         (offset32 + len32) : page_end;
3957
3958                 /* Request access to the flash interface. */
3959                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3960                         goto nvram_write_end;
3961
3962                 /* Enable access to flash interface */
3963                 bnx2_enable_nvram_access(bp);
3964
3965                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3966                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3967                         int j;
3968
3969                         /* Read the whole page into the buffer
3970                          * (non-buffer flash only) */
3971                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3972                                 if (j == (bp->flash_info->page_size - 4)) {
3973                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3974                                 }
3975                                 rc = bnx2_nvram_read_dword(bp,
3976                                         page_start + j,
3977                                         &flash_buffer[j],
3978                                         cmd_flags);
3979
3980                                 if (rc)
3981                                         goto nvram_write_end;
3982
3983                                 cmd_flags = 0;
3984                         }
3985                 }
3986
3987                 /* Enable writes to flash interface (unlock write-protect) */
3988                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3989                         goto nvram_write_end;
3990
3991                 /* Loop to write back the buffer data from page_start to
3992                  * data_start */
3993                 i = 0;
3994                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3995                         /* Erase the page */
3996                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3997                                 goto nvram_write_end;
3998
3999                         /* Re-enable the write again for the actual write */
4000                         bnx2_enable_nvram_write(bp);
4001
4002                         for (addr = page_start; addr < data_start;
4003                                 addr += 4, i += 4) {
4004
4005                                 rc = bnx2_nvram_write_dword(bp, addr,
4006                                         &flash_buffer[i], cmd_flags);
4007
4008                                 if (rc != 0)
4009                                         goto nvram_write_end;
4010
4011                                 cmd_flags = 0;
4012                         }
4013                 }
4014
4015                 /* Loop to write the new data from data_start to data_end */
4016                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4017                         if ((addr == page_end - 4) ||
4018                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4019                                  (addr == data_end - 4))) {
4020
4021                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4022                         }
4023                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4024                                 cmd_flags);
4025
4026                         if (rc != 0)
4027                                 goto nvram_write_end;
4028
4029                         cmd_flags = 0;
4030                         buf += 4;
4031                 }
4032
4033                 /* Loop to write back the buffer data from data_end
4034                  * to page_end */
4035                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4036                         for (addr = data_end; addr < page_end;
4037                                 addr += 4, i += 4) {
4038
4039                                 if (addr == page_end-4) {
4040                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4041                                 }
4042                                 rc = bnx2_nvram_write_dword(bp, addr,
4043                                         &flash_buffer[i], cmd_flags);
4044
4045                                 if (rc != 0)
4046                                         goto nvram_write_end;
4047
4048                                 cmd_flags = 0;
4049                         }
4050                 }
4051
4052                 /* Disable writes to flash interface (lock write-protect) */
4053                 bnx2_disable_nvram_write(bp);
4054
4055                 /* Disable access to flash interface */
4056                 bnx2_disable_nvram_access(bp);
4057                 bnx2_release_nvram_lock(bp);
4058
4059                 /* Increment written */
4060                 written += data_end - data_start;
4061         }
4062
4063 nvram_write_end:
4064         kfree(flash_buffer);
4065         kfree(align_buf);
4066         return rc;
4067 }
4068
4069 static void
4070 bnx2_init_remote_phy(struct bnx2 *bp)
4071 {
4072         u32 val;
4073
4074         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4075         if (!(bp->phy_flags & PHY_SERDES_FLAG))
4076                 return;
4077
4078         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4079         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4080                 return;
4081
4082         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4083                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4084
4085                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4086                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4087                         bp->phy_port = PORT_FIBRE;
4088                 else
4089                         bp->phy_port = PORT_TP;
4090
4091                 if (netif_running(bp->dev)) {
4092                         u32 sig;
4093
4094                         if (val & BNX2_LINK_STATUS_LINK_UP) {
4095                                 bp->link_up = 1;
4096                                 netif_carrier_on(bp->dev);
4097                         } else {
4098                                 bp->link_up = 0;
4099                                 netif_carrier_off(bp->dev);
4100                         }
4101                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4102                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4103                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4104                                    sig);
4105                 }
4106         }
4107 }
4108
4109 static void
4110 bnx2_setup_msix_tbl(struct bnx2 *bp)
4111 {
4112         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4113
4114         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4115         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4116 }
4117
4118 static int
4119 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4120 {
4121         u32 val;
4122         int i, rc = 0;
4123         u8 old_port;
4124
4125         /* Wait for the current PCI transaction to complete before
4126          * issuing a reset. */
4127         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4128                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4129                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4130                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4131                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4132         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4133         udelay(5);
4134
4135         /* Wait for the firmware to tell us it is ok to issue a reset. */
4136         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4137
4138         /* Deposit a driver reset signature so the firmware knows that
4139          * this is a soft reset. */
4140         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4141                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
4142
4143         /* Do a dummy read to force the chip to complete all current transaction
4144          * before we issue a reset. */
4145         val = REG_RD(bp, BNX2_MISC_ID);
4146
4147         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4148                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4149                 REG_RD(bp, BNX2_MISC_COMMAND);
4150                 udelay(5);
4151
4152                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4153                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4154
4155                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4156
4157         } else {
4158                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4159                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4160                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4161
4162                 /* Chip reset. */
4163                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4164
4165                 /* Reading back any register after chip reset will hang the
4166                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4167                  * of margin for write posting.
4168                  */
4169                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4170                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4171                         msleep(20);
4172
4173                 /* Reset takes approximate 30 usec */
4174                 for (i = 0; i < 10; i++) {
4175                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4176                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4177                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4178                                 break;
4179                         udelay(10);
4180                 }
4181
4182                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4183                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4184                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4185                         return -EBUSY;
4186                 }
4187         }
4188
4189         /* Make sure byte swapping is properly configured. */
4190         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4191         if (val != 0x01020304) {
4192                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4193                 return -ENODEV;
4194         }
4195
4196         /* Wait for the firmware to finish its initialization. */
4197         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4198         if (rc)
4199                 return rc;
4200
4201         spin_lock_bh(&bp->phy_lock);
4202         old_port = bp->phy_port;
4203         bnx2_init_remote_phy(bp);
4204         if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4205                 bnx2_set_default_remote_link(bp);
4206         spin_unlock_bh(&bp->phy_lock);
4207
4208         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4209                 /* Adjust the voltage regular to two steps lower.  The default
4210                  * of this register is 0x0000000e. */
4211                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4212
4213                 /* Remove bad rbuf memory from the free pool. */
4214                 rc = bnx2_alloc_bad_rbuf(bp);
4215         }
4216
4217         if (bp->flags & USING_MSIX_FLAG)
4218                 bnx2_setup_msix_tbl(bp);
4219
4220         return rc;
4221 }
4222
4223 static int
4224 bnx2_init_chip(struct bnx2 *bp)
4225 {
4226         u32 val;
4227         int rc, i;
4228
4229         /* Make sure the interrupt is not active. */
4230         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4231
4232         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4233               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4234 #ifdef __BIG_ENDIAN
4235               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4236 #endif
4237               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4238               DMA_READ_CHANS << 12 |
4239               DMA_WRITE_CHANS << 16;
4240
4241         val |= (0x2 << 20) | (1 << 11);
4242
4243         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4244                 val |= (1 << 23);
4245
4246         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4247             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4248                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4249
4250         REG_WR(bp, BNX2_DMA_CONFIG, val);
4251
4252         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4253                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4254                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4255                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4256         }
4257
4258         if (bp->flags & PCIX_FLAG) {
4259                 u16 val16;
4260
4261                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4262                                      &val16);
4263                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4264                                       val16 & ~PCI_X_CMD_ERO);
4265         }
4266
4267         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4268                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4269                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4270                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4271
4272         /* Initialize context mapping and zero out the quick contexts.  The
4273          * context block must have already been enabled. */
4274         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4275                 rc = bnx2_init_5709_context(bp);
4276                 if (rc)
4277                         return rc;
4278         } else
4279                 bnx2_init_context(bp);
4280
4281         if ((rc = bnx2_init_cpus(bp)) != 0)
4282                 return rc;
4283
4284         bnx2_init_nvram(bp);
4285
4286         bnx2_set_mac_addr(bp);
4287
4288         val = REG_RD(bp, BNX2_MQ_CONFIG);
4289         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4290         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4291         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4292                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4293
4294         REG_WR(bp, BNX2_MQ_CONFIG, val);
4295
4296         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4297         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4298         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4299
4300         val = (BCM_PAGE_BITS - 8) << 24;
4301         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4302
4303         /* Configure page size. */
4304         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4305         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4306         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4307         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4308
4309         val = bp->mac_addr[0] +
4310               (bp->mac_addr[1] << 8) +
4311               (bp->mac_addr[2] << 16) +
4312               bp->mac_addr[3] +
4313               (bp->mac_addr[4] << 8) +
4314               (bp->mac_addr[5] << 16);
4315         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4316
4317         /* Program the MTU.  Also include 4 bytes for CRC32. */
4318         val = bp->dev->mtu + ETH_HLEN + 4;
4319         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4320                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4321         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4322
4323         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4324                 bp->bnx2_napi[i].last_status_idx = 0;
4325
4326         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4327
4328         /* Set up how to generate a link change interrupt. */
4329         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4330
4331         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4332                (u64) bp->status_blk_mapping & 0xffffffff);
4333         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4334
4335         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4336                (u64) bp->stats_blk_mapping & 0xffffffff);
4337         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4338                (u64) bp->stats_blk_mapping >> 32);
4339
4340         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4341                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4342
4343         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4344                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4345
4346         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4347                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4348
4349         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4350
4351         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4352
4353         REG_WR(bp, BNX2_HC_COM_TICKS,
4354                (bp->com_ticks_int << 16) | bp->com_ticks);
4355
4356         REG_WR(bp, BNX2_HC_CMD_TICKS,
4357                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4358
4359         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4360                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4361         else
4362                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4363         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4364
4365         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4366                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4367         else {
4368                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4369                       BNX2_HC_CONFIG_COLLECT_STATS;
4370         }
4371
4372         if (bp->flags & ONE_SHOT_MSI_FLAG)
4373                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4374
4375         REG_WR(bp, BNX2_HC_CONFIG, val);
4376
4377         /* Clear internal stats counters. */
4378         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4379
4380         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4381
4382         /* Initialize the receive filter. */
4383         bnx2_set_rx_mode(bp->dev);
4384
4385         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4386                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4387                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4388                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4389         }
4390         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4391                           0);
4392
4393         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4394         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4395
4396         udelay(20);
4397
4398         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4399
4400         return rc;
4401 }
4402
4403 static void
4404 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4405 {
4406         u32 val, offset0, offset1, offset2, offset3;
4407
4408         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4409                 offset0 = BNX2_L2CTX_TYPE_XI;
4410                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4411                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4412                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4413         } else {
4414                 offset0 = BNX2_L2CTX_TYPE;
4415                 offset1 = BNX2_L2CTX_CMD_TYPE;
4416                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4417                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4418         }
4419         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4420         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4421
4422         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4423         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4424
4425         val = (u64) bp->tx_desc_mapping >> 32;
4426         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4427
4428         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4429         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4430 }
4431
4432 static void
4433 bnx2_init_tx_ring(struct bnx2 *bp)
4434 {
4435         struct tx_bd *txbd;
4436         u32 cid;
4437         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4438
4439         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4440
4441         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4442
4443         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4444         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4445
4446         bp->tx_prod = 0;
4447         bnapi->tx_cons = 0;
4448         bnapi->hw_tx_cons = 0;
4449         bp->tx_prod_bseq = 0;
4450
4451         cid = TX_CID;
4452         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4453         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4454
4455         bnx2_init_tx_context(bp, cid);
4456 }
4457
4458 static void
4459 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4460                      int num_rings)
4461 {
4462         int i;
4463         struct rx_bd *rxbd;
4464
4465         for (i = 0; i < num_rings; i++) {
4466                 int j;
4467
4468                 rxbd = &rx_ring[i][0];
4469                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4470                         rxbd->rx_bd_len = buf_size;
4471                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4472                 }
4473                 if (i == (num_rings - 1))
4474                         j = 0;
4475                 else
4476                         j = i + 1;
4477                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4478                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4479         }
4480 }
4481
4482 static void
4483 bnx2_init_rx_ring(struct bnx2 *bp)
4484 {
4485         int i;
4486         u16 prod, ring_prod;
4487         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4488         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4489
4490         bnapi->rx_prod = 0;
4491         bnapi->rx_cons = 0;
4492         bnapi->rx_prod_bseq = 0;
4493         bnapi->rx_pg_prod = 0;
4494         bnapi->rx_pg_cons = 0;
4495
4496         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4497                              bp->rx_buf_use_size, bp->rx_max_ring);
4498
4499         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4500         if (bp->rx_pg_ring_size) {
4501                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4502                                      bp->rx_pg_desc_mapping,
4503                                      PAGE_SIZE, bp->rx_max_pg_ring);
4504                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4505                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4506                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4507                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4508
4509                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4510                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4511
4512                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4513                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4514
4515                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4516                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4517         }
4518
4519         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4520         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4521         val |= 0x02 << 8;
4522         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4523
4524         val = (u64) bp->rx_desc_mapping[0] >> 32;
4525         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4526
4527         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4528         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4529
4530         ring_prod = prod = bnapi->rx_pg_prod;
4531         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4532                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4533                         break;
4534                 prod = NEXT_RX_BD(prod);
4535                 ring_prod = RX_PG_RING_IDX(prod);
4536         }
4537         bnapi->rx_pg_prod = prod;
4538
4539         ring_prod = prod = bnapi->rx_prod;
4540         for (i = 0; i < bp->rx_ring_size; i++) {
4541                 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4542                         break;
4543                 }
4544                 prod = NEXT_RX_BD(prod);
4545                 ring_prod = RX_RING_IDX(prod);
4546         }
4547         bnapi->rx_prod = prod;
4548
4549         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4550                  bnapi->rx_pg_prod);
4551         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4552
4553         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4554 }
4555
4556 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4557 {
4558         u32 max, num_rings = 1;
4559
4560         while (ring_size > MAX_RX_DESC_CNT) {
4561                 ring_size -= MAX_RX_DESC_CNT;
4562                 num_rings++;
4563         }
4564         /* round to next power of 2 */
4565         max = max_size;
4566         while ((max & num_rings) == 0)
4567                 max >>= 1;
4568
4569         if (num_rings != max)
4570                 max <<= 1;
4571
4572         return max;
4573 }
4574
4575 static void
4576 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4577 {
4578         u32 rx_size, rx_space, jumbo_size;
4579
4580         /* 8 for CRC and VLAN */
4581         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4582
4583         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4584                 sizeof(struct skb_shared_info);
4585
4586         bp->rx_copy_thresh = RX_COPY_THRESH;
4587         bp->rx_pg_ring_size = 0;
4588         bp->rx_max_pg_ring = 0;
4589         bp->rx_max_pg_ring_idx = 0;
4590         if (rx_space > PAGE_SIZE) {
4591                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4592
4593                 jumbo_size = size * pages;
4594                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4595                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4596
4597                 bp->rx_pg_ring_size = jumbo_size;
4598                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4599                                                         MAX_RX_PG_RINGS);
4600                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4601                 rx_size = RX_COPY_THRESH + bp->rx_offset;
4602                 bp->rx_copy_thresh = 0;
4603         }
4604
4605         bp->rx_buf_use_size = rx_size;
4606         /* hw alignment */
4607         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4608         bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4609         bp->rx_ring_size = size;
4610         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4611         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4612 }
4613
4614 static void
4615 bnx2_free_tx_skbs(struct bnx2 *bp)
4616 {
4617         int i;
4618
4619         if (bp->tx_buf_ring == NULL)
4620                 return;
4621
4622         for (i = 0; i < TX_DESC_CNT; ) {
4623                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4624                 struct sk_buff *skb = tx_buf->skb;
4625                 int j, last;
4626
4627                 if (skb == NULL) {
4628                         i++;
4629                         continue;
4630                 }
4631
4632                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4633                         skb_headlen(skb), PCI_DMA_TODEVICE);
4634
4635                 tx_buf->skb = NULL;
4636
4637                 last = skb_shinfo(skb)->nr_frags;
4638                 for (j = 0; j < last; j++) {
4639                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4640                         pci_unmap_page(bp->pdev,
4641                                 pci_unmap_addr(tx_buf, mapping),
4642                                 skb_shinfo(skb)->frags[j].size,
4643                                 PCI_DMA_TODEVICE);
4644                 }
4645                 dev_kfree_skb(skb);
4646                 i += j + 1;
4647         }
4648
4649 }
4650
4651 static void
4652 bnx2_free_rx_skbs(struct bnx2 *bp)
4653 {
4654         int i;
4655
4656         if (bp->rx_buf_ring == NULL)
4657                 return;
4658
4659         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4660                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4661                 struct sk_buff *skb = rx_buf->skb;
4662
4663                 if (skb == NULL)
4664                         continue;
4665
4666                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4667                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4668
4669                 rx_buf->skb = NULL;
4670
4671                 dev_kfree_skb(skb);
4672         }
4673         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4674                 bnx2_free_rx_page(bp, i);
4675 }
4676
4677 static void
4678 bnx2_free_skbs(struct bnx2 *bp)
4679 {
4680         bnx2_free_tx_skbs(bp);
4681         bnx2_free_rx_skbs(bp);
4682 }
4683
4684 static int
4685 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4686 {
4687         int rc;
4688
4689         rc = bnx2_reset_chip(bp, reset_code);
4690         bnx2_free_skbs(bp);
4691         if (rc)
4692                 return rc;
4693
4694         if ((rc = bnx2_init_chip(bp)) != 0)
4695                 return rc;
4696
4697         bnx2_init_tx_ring(bp);
4698         bnx2_init_rx_ring(bp);
4699         return 0;
4700 }
4701
4702 static int
4703 bnx2_init_nic(struct bnx2 *bp)
4704 {
4705         int rc;
4706
4707         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4708                 return rc;
4709
4710         spin_lock_bh(&bp->phy_lock);
4711         bnx2_init_phy(bp);
4712         bnx2_set_link(bp);
4713         spin_unlock_bh(&bp->phy_lock);
4714         return 0;
4715 }
4716
4717 static int
4718 bnx2_test_registers(struct bnx2 *bp)
4719 {
4720         int ret;
4721         int i, is_5709;
4722         static const struct {
4723                 u16   offset;
4724                 u16   flags;
4725 #define BNX2_FL_NOT_5709        1
4726                 u32   rw_mask;
4727                 u32   ro_mask;
4728         } reg_tbl[] = {
4729                 { 0x006c, 0, 0x00000000, 0x0000003f },
4730                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4731                 { 0x0094, 0, 0x00000000, 0x00000000 },
4732
4733                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4734                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4735                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4736                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4737                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4738                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4739                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4740                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4741                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4742
4743                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4744                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4745                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4746                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4747                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4748                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4749
4750                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4751                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4752                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4753
4754                 { 0x1000, 0, 0x00000000, 0x00000001 },
4755                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4756
4757                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4758                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4759                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4760                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4761                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4762                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4763                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4764                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4765                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4766                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4767
4768                 { 0x1800, 0, 0x00000000, 0x00000001 },
4769                 { 0x1804, 0, 0x00000000, 0x00000003 },
4770
4771                 { 0x2800, 0, 0x00000000, 0x00000001 },
4772                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4773                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4774                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4775                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4776                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4777                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4778                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4779                 { 0x2840, 0, 0x00000000, 0xffffffff },
4780                 { 0x2844, 0, 0x00000000, 0xffffffff },
4781                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4782                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4783
4784                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4785                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4786
4787                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4788                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4789                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4790                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4791                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4792                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4793                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4794                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4795                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4796
4797                 { 0x5004, 0, 0x00000000, 0x0000007f },
4798                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4799
4800                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4801                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4802                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4803                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4804                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4805                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4806                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4807                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4808                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4809
4810                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4811                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4812                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4813                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4814                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4815                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4816                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4817                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4818                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4819                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4820                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4821                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4822                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4823                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4824                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4825                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4826                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4827                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4828                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4829                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4830                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4831                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4832                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4833
4834                 { 0xffff, 0, 0x00000000, 0x00000000 },
4835         };
4836
4837         ret = 0;
4838         is_5709 = 0;
4839         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4840                 is_5709 = 1;
4841
4842         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4843                 u32 offset, rw_mask, ro_mask, save_val, val;
4844                 u16 flags = reg_tbl[i].flags;
4845
4846                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4847                         continue;
4848
4849                 offset = (u32) reg_tbl[i].offset;
4850                 rw_mask = reg_tbl[i].rw_mask;
4851                 ro_mask = reg_tbl[i].ro_mask;
4852
4853                 save_val = readl(bp->regview + offset);
4854
4855                 writel(0, bp->regview + offset);
4856
4857                 val = readl(bp->regview + offset);
4858                 if ((val & rw_mask) != 0) {
4859                         goto reg_test_err;
4860                 }
4861
4862                 if ((val & ro_mask) != (save_val & ro_mask)) {
4863                         goto reg_test_err;
4864                 }
4865
4866                 writel(0xffffffff, bp->regview + offset);
4867
4868                 val = readl(bp->regview + offset);
4869                 if ((val & rw_mask) != rw_mask) {
4870                         goto reg_test_err;
4871                 }
4872
4873                 if ((val & ro_mask) != (save_val & ro_mask)) {
4874                         goto reg_test_err;
4875                 }
4876
4877                 writel(save_val, bp->regview + offset);
4878                 continue;
4879
4880 reg_test_err:
4881                 writel(save_val, bp->regview + offset);
4882                 ret = -ENODEV;
4883                 break;
4884         }
4885         return ret;
4886 }
4887
4888 static int
4889 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4890 {
4891         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4892                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4893         int i;
4894
4895         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4896                 u32 offset;
4897
4898                 for (offset = 0; offset < size; offset += 4) {
4899
4900                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4901
4902                         if (REG_RD_IND(bp, start + offset) !=
4903                                 test_pattern[i]) {
4904                                 return -ENODEV;
4905                         }
4906                 }
4907         }
4908         return 0;
4909 }
4910
4911 static int
4912 bnx2_test_memory(struct bnx2 *bp)
4913 {
4914         int ret = 0;
4915         int i;
4916         static struct mem_entry {
4917                 u32   offset;
4918                 u32   len;
4919         } mem_tbl_5706[] = {
4920                 { 0x60000,  0x4000 },
4921                 { 0xa0000,  0x3000 },
4922                 { 0xe0000,  0x4000 },
4923                 { 0x120000, 0x4000 },
4924                 { 0x1a0000, 0x4000 },
4925                 { 0x160000, 0x4000 },
4926                 { 0xffffffff, 0    },
4927         },
4928         mem_tbl_5709[] = {
4929                 { 0x60000,  0x4000 },
4930                 { 0xa0000,  0x3000 },
4931                 { 0xe0000,  0x4000 },
4932                 { 0x120000, 0x4000 },
4933                 { 0x1a0000, 0x4000 },
4934                 { 0xffffffff, 0    },
4935         };
4936         struct mem_entry *mem_tbl;
4937
4938         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4939                 mem_tbl = mem_tbl_5709;
4940         else
4941                 mem_tbl = mem_tbl_5706;
4942
4943         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4944                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4945                         mem_tbl[i].len)) != 0) {
4946                         return ret;
4947                 }
4948         }
4949
4950         return ret;
4951 }
4952
4953 #define BNX2_MAC_LOOPBACK       0
4954 #define BNX2_PHY_LOOPBACK       1
4955
4956 static int
4957 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4958 {
4959         unsigned int pkt_size, num_pkts, i;
4960         struct sk_buff *skb, *rx_skb;
4961         unsigned char *packet;
4962         u16 rx_start_idx, rx_idx;
4963         dma_addr_t map;
4964         struct tx_bd *txbd;
4965         struct sw_bd *rx_buf;
4966         struct l2_fhdr *rx_hdr;
4967         int ret = -ENODEV;
4968         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4969
4970         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4971                 bp->loopback = MAC_LOOPBACK;
4972                 bnx2_set_mac_loopback(bp);
4973         }
4974         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4975                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4976                         return 0;
4977
4978                 bp->loopback = PHY_LOOPBACK;
4979                 bnx2_set_phy_loopback(bp);
4980         }
4981         else
4982                 return -EINVAL;
4983
4984         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
4985         skb = netdev_alloc_skb(bp->dev, pkt_size);
4986         if (!skb)
4987                 return -ENOMEM;
4988         packet = skb_put(skb, pkt_size);
4989         memcpy(packet, bp->dev->dev_addr, 6);
4990         memset(packet + 6, 0x0, 8);
4991         for (i = 14; i < pkt_size; i++)
4992                 packet[i] = (unsigned char) (i & 0xff);
4993
4994         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4995                 PCI_DMA_TODEVICE);
4996
4997         REG_WR(bp, BNX2_HC_COMMAND,
4998                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4999
5000         REG_RD(bp, BNX2_HC_COMMAND);
5001
5002         udelay(5);
5003         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5004
5005         num_pkts = 0;
5006
5007         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5008
5009         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5010         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5011         txbd->tx_bd_mss_nbytes = pkt_size;
5012         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5013
5014         num_pkts++;
5015         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5016         bp->tx_prod_bseq += pkt_size;
5017
5018         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5019         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5020
5021         udelay(100);
5022
5023         REG_WR(bp, BNX2_HC_COMMAND,
5024                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5025
5026         REG_RD(bp, BNX2_HC_COMMAND);
5027
5028         udelay(5);
5029
5030         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5031         dev_kfree_skb(skb);
5032
5033         if (bnx2_get_hw_tx_cons(bnapi) != bp->tx_prod)
5034                 goto loopback_test_done;
5035
5036         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5037         if (rx_idx != rx_start_idx + num_pkts) {
5038                 goto loopback_test_done;
5039         }
5040
5041         rx_buf = &bp->rx_buf_ring[rx_start_idx];
5042         rx_skb = rx_buf->skb;
5043
5044         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5045         skb_reserve(rx_skb, bp->rx_offset);
5046
5047         pci_dma_sync_single_for_cpu(bp->pdev,
5048                 pci_unmap_addr(rx_buf, mapping),
5049                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5050
5051         if (rx_hdr->l2_fhdr_status &
5052                 (L2_FHDR_ERRORS_BAD_CRC |
5053                 L2_FHDR_ERRORS_PHY_DECODE |
5054                 L2_FHDR_ERRORS_ALIGNMENT |
5055                 L2_FHDR_ERRORS_TOO_SHORT |
5056                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5057
5058                 goto loopback_test_done;
5059         }
5060
5061         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5062                 goto loopback_test_done;
5063         }
5064
5065         for (i = 14; i < pkt_size; i++) {
5066                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5067                         goto loopback_test_done;
5068                 }
5069         }
5070
5071         ret = 0;
5072
5073 loopback_test_done:
5074         bp->loopback = 0;
5075         return ret;
5076 }
5077
5078 #define BNX2_MAC_LOOPBACK_FAILED        1
5079 #define BNX2_PHY_LOOPBACK_FAILED        2
5080 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5081                                          BNX2_PHY_LOOPBACK_FAILED)
5082
5083 static int
5084 bnx2_test_loopback(struct bnx2 *bp)
5085 {
5086         int rc = 0;
5087
5088         if (!netif_running(bp->dev))
5089                 return BNX2_LOOPBACK_FAILED;
5090
5091         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5092         spin_lock_bh(&bp->phy_lock);
5093         bnx2_init_phy(bp);
5094         spin_unlock_bh(&bp->phy_lock);
5095         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5096                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5097         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5098                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5099         return rc;
5100 }
5101
5102 #define NVRAM_SIZE 0x200
5103 #define CRC32_RESIDUAL 0xdebb20e3
5104
5105 static int
5106 bnx2_test_nvram(struct bnx2 *bp)
5107 {
5108         u32 buf[NVRAM_SIZE / 4];
5109         u8 *data = (u8 *) buf;
5110         int rc = 0;
5111         u32 magic, csum;
5112
5113         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5114                 goto test_nvram_done;
5115
5116         magic = be32_to_cpu(buf[0]);
5117         if (magic != 0x669955aa) {
5118                 rc = -ENODEV;
5119                 goto test_nvram_done;
5120         }
5121
5122         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5123                 goto test_nvram_done;
5124
5125         csum = ether_crc_le(0x100, data);
5126         if (csum != CRC32_RESIDUAL) {
5127                 rc = -ENODEV;
5128                 goto test_nvram_done;
5129         }
5130
5131         csum = ether_crc_le(0x100, data + 0x100);
5132         if (csum != CRC32_RESIDUAL) {
5133                 rc = -ENODEV;
5134         }
5135
5136 test_nvram_done:
5137         return rc;
5138 }
5139
5140 static int
5141 bnx2_test_link(struct bnx2 *bp)
5142 {
5143         u32 bmsr;
5144
5145         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5146                 if (bp->link_up)
5147                         return 0;
5148                 return -ENODEV;
5149         }
5150         spin_lock_bh(&bp->phy_lock);
5151         bnx2_enable_bmsr1(bp);
5152         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5153         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5154         bnx2_disable_bmsr1(bp);
5155         spin_unlock_bh(&bp->phy_lock);
5156
5157         if (bmsr & BMSR_LSTATUS) {
5158                 return 0;
5159         }
5160         return -ENODEV;
5161 }
5162
5163 static int
5164 bnx2_test_intr(struct bnx2 *bp)
5165 {
5166         int i;
5167         u16 status_idx;
5168
5169         if (!netif_running(bp->dev))
5170                 return -ENODEV;
5171
5172         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5173
5174         /* This register is not touched during run-time. */
5175         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5176         REG_RD(bp, BNX2_HC_COMMAND);
5177
5178         for (i = 0; i < 10; i++) {
5179                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5180                         status_idx) {
5181
5182                         break;
5183                 }
5184
5185                 msleep_interruptible(10);
5186         }
5187         if (i < 10)
5188                 return 0;
5189
5190         return -ENODEV;
5191 }
5192
5193 static void
5194 bnx2_5706_serdes_timer(struct bnx2 *bp)
5195 {
5196         spin_lock(&bp->phy_lock);
5197         if (bp->serdes_an_pending)
5198                 bp->serdes_an_pending--;
5199         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5200                 u32 bmcr;
5201
5202                 bp->current_interval = bp->timer_interval;
5203
5204                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5205
5206                 if (bmcr & BMCR_ANENABLE) {
5207                         u32 phy1, phy2;
5208
5209                         bnx2_write_phy(bp, 0x1c, 0x7c00);
5210                         bnx2_read_phy(bp, 0x1c, &phy1);
5211
5212                         bnx2_write_phy(bp, 0x17, 0x0f01);
5213                         bnx2_read_phy(bp, 0x15, &phy2);
5214                         bnx2_write_phy(bp, 0x17, 0x0f01);
5215                         bnx2_read_phy(bp, 0x15, &phy2);
5216
5217                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
5218                                 !(phy2 & 0x20)) {       /* no CONFIG */
5219
5220                                 bmcr &= ~BMCR_ANENABLE;
5221                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5222                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5223                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5224                         }
5225                 }
5226         }
5227         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5228                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5229                 u32 phy2;
5230
5231                 bnx2_write_phy(bp, 0x17, 0x0f01);
5232                 bnx2_read_phy(bp, 0x15, &phy2);
5233                 if (phy2 & 0x20) {
5234                         u32 bmcr;
5235
5236                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5237                         bmcr |= BMCR_ANENABLE;
5238                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5239
5240                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5241                 }
5242         } else
5243                 bp->current_interval = bp->timer_interval;
5244
5245         spin_unlock(&bp->phy_lock);
5246 }
5247
5248 static void
5249 bnx2_5708_serdes_timer(struct bnx2 *bp)
5250 {
5251         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5252                 return;
5253
5254         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5255                 bp->serdes_an_pending = 0;
5256                 return;
5257         }
5258
5259         spin_lock(&bp->phy_lock);
5260         if (bp->serdes_an_pending)
5261                 bp->serdes_an_pending--;
5262         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5263                 u32 bmcr;
5264
5265                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5266                 if (bmcr & BMCR_ANENABLE) {
5267                         bnx2_enable_forced_2g5(bp);
5268                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5269                 } else {
5270                         bnx2_disable_forced_2g5(bp);
5271                         bp->serdes_an_pending = 2;
5272                         bp->current_interval = bp->timer_interval;
5273                 }
5274
5275         } else
5276                 bp->current_interval = bp->timer_interval;
5277
5278         spin_unlock(&bp->phy_lock);
5279 }
5280
5281 static void
5282 bnx2_timer(unsigned long data)
5283 {
5284         struct bnx2 *bp = (struct bnx2 *) data;
5285
5286         if (!netif_running(bp->dev))
5287                 return;
5288
5289         if (atomic_read(&bp->intr_sem) != 0)
5290                 goto bnx2_restart_timer;
5291
5292         bnx2_send_heart_beat(bp);
5293
5294         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5295
5296         /* workaround occasional corrupted counters */
5297         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5298                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5299                                             BNX2_HC_COMMAND_STATS_NOW);
5300
5301         if (bp->phy_flags & PHY_SERDES_FLAG) {
5302                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5303                         bnx2_5706_serdes_timer(bp);
5304                 else
5305                         bnx2_5708_serdes_timer(bp);
5306         }
5307
5308 bnx2_restart_timer:
5309         mod_timer(&bp->timer, jiffies + bp->current_interval);
5310 }
5311
5312 static int
5313 bnx2_request_irq(struct bnx2 *bp)
5314 {
5315         struct net_device *dev = bp->dev;
5316         unsigned long flags;
5317         struct bnx2_irq *irq;
5318         int rc = 0, i;
5319
5320         if (bp->flags & USING_MSI_OR_MSIX_FLAG)
5321                 flags = 0;
5322         else
5323                 flags = IRQF_SHARED;
5324
5325         for (i = 0; i < bp->irq_nvecs; i++) {
5326                 irq = &bp->irq_tbl[i];
5327                 rc = request_irq(irq->vector, irq->handler, flags, dev->name,
5328                                  dev);
5329                 if (rc)
5330                         break;
5331                 irq->requested = 1;
5332         }
5333         return rc;
5334 }
5335
5336 static void
5337 bnx2_free_irq(struct bnx2 *bp)
5338 {
5339         struct net_device *dev = bp->dev;
5340         struct bnx2_irq *irq;
5341         int i;
5342
5343         for (i = 0; i < bp->irq_nvecs; i++) {
5344                 irq = &bp->irq_tbl[i];
5345                 if (irq->requested)
5346                         free_irq(irq->vector, dev);
5347                 irq->requested = 0;
5348         }
5349         if (bp->flags & USING_MSI_FLAG)
5350                 pci_disable_msi(bp->pdev);
5351         else if (bp->flags & USING_MSIX_FLAG)
5352                 pci_disable_msix(bp->pdev);
5353
5354         bp->flags &= ~(USING_MSI_OR_MSIX_FLAG | ONE_SHOT_MSI_FLAG);
5355 }
5356
5357 static void
5358 bnx2_enable_msix(struct bnx2 *bp)
5359 {
5360         bnx2_setup_msix_tbl(bp);
5361         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5362         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5363         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5364 }
5365
5366 static void
5367 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5368 {
5369         bp->irq_tbl[0].handler = bnx2_interrupt;
5370         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5371         bp->irq_nvecs = 1;
5372         bp->irq_tbl[0].vector = bp->pdev->irq;
5373
5374         if ((bp->flags & MSIX_CAP_FLAG) && !dis_msi)
5375                 bnx2_enable_msix(bp);
5376
5377         if ((bp->flags & MSI_CAP_FLAG) && !dis_msi &&
5378             !(bp->flags & USING_MSIX_FLAG)) {
5379                 if (pci_enable_msi(bp->pdev) == 0) {
5380                         bp->flags |= USING_MSI_FLAG;
5381                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5382                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5383                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5384                         } else
5385                                 bp->irq_tbl[0].handler = bnx2_msi;
5386
5387                         bp->irq_tbl[0].vector = bp->pdev->irq;
5388                 }
5389         }
5390 }
5391
5392 /* Called with rtnl_lock */
5393 static int
5394 bnx2_open(struct net_device *dev)
5395 {
5396         struct bnx2 *bp = netdev_priv(dev);
5397         int rc;
5398
5399         netif_carrier_off(dev);
5400
5401         bnx2_set_power_state(bp, PCI_D0);
5402         bnx2_disable_int(bp);
5403
5404         rc = bnx2_alloc_mem(bp);
5405         if (rc)
5406                 return rc;
5407
5408         bnx2_setup_int_mode(bp, disable_msi);
5409         bnx2_napi_enable(bp);
5410         rc = bnx2_request_irq(bp);
5411
5412         if (rc) {
5413                 bnx2_napi_disable(bp);
5414                 bnx2_free_mem(bp);
5415                 return rc;
5416         }
5417
5418         rc = bnx2_init_nic(bp);
5419
5420         if (rc) {
5421                 bnx2_napi_disable(bp);
5422                 bnx2_free_irq(bp);
5423                 bnx2_free_skbs(bp);
5424                 bnx2_free_mem(bp);
5425                 return rc;
5426         }
5427
5428         mod_timer(&bp->timer, jiffies + bp->current_interval);
5429
5430         atomic_set(&bp->intr_sem, 0);
5431
5432         bnx2_enable_int(bp);
5433
5434         if (bp->flags & USING_MSI_FLAG) {
5435                 /* Test MSI to make sure it is working
5436                  * If MSI test fails, go back to INTx mode
5437                  */
5438                 if (bnx2_test_intr(bp) != 0) {
5439                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5440                                " using MSI, switching to INTx mode. Please"
5441                                " report this failure to the PCI maintainer"
5442                                " and include system chipset information.\n",
5443                                bp->dev->name);
5444
5445                         bnx2_disable_int(bp);
5446                         bnx2_free_irq(bp);
5447
5448                         bnx2_setup_int_mode(bp, 1);
5449
5450                         rc = bnx2_init_nic(bp);
5451
5452                         if (!rc)
5453                                 rc = bnx2_request_irq(bp);
5454
5455                         if (rc) {
5456                                 bnx2_napi_disable(bp);
5457                                 bnx2_free_skbs(bp);
5458                                 bnx2_free_mem(bp);
5459                                 del_timer_sync(&bp->timer);
5460                                 return rc;
5461                         }
5462                         bnx2_enable_int(bp);
5463                 }
5464         }
5465         if (bp->flags & USING_MSI_FLAG) {
5466                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5467         }
5468
5469         netif_start_queue(dev);
5470
5471         return 0;
5472 }
5473
5474 static void
5475 bnx2_reset_task(struct work_struct *work)
5476 {
5477         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5478
5479         if (!netif_running(bp->dev))
5480                 return;
5481
5482         bp->in_reset_task = 1;
5483         bnx2_netif_stop(bp);
5484
5485         bnx2_init_nic(bp);
5486
5487         atomic_set(&bp->intr_sem, 1);
5488         bnx2_netif_start(bp);
5489         bp->in_reset_task = 0;
5490 }
5491
5492 static void
5493 bnx2_tx_timeout(struct net_device *dev)
5494 {
5495         struct bnx2 *bp = netdev_priv(dev);
5496
5497         /* This allows the netif to be shutdown gracefully before resetting */
5498         schedule_work(&bp->reset_task);
5499 }
5500
5501 #ifdef BCM_VLAN
5502 /* Called with rtnl_lock */
5503 static void
5504 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5505 {
5506         struct bnx2 *bp = netdev_priv(dev);
5507
5508         bnx2_netif_stop(bp);
5509
5510         bp->vlgrp = vlgrp;
5511         bnx2_set_rx_mode(dev);
5512
5513         bnx2_netif_start(bp);
5514 }
5515 #endif
5516
5517 /* Called with netif_tx_lock.
5518  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5519  * netif_wake_queue().
5520  */
5521 static int
5522 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5523 {
5524         struct bnx2 *bp = netdev_priv(dev);
5525         dma_addr_t mapping;
5526         struct tx_bd *txbd;
5527         struct sw_bd *tx_buf;
5528         u32 len, vlan_tag_flags, last_frag, mss;
5529         u16 prod, ring_prod;
5530         int i;
5531         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5532
5533         if (unlikely(bnx2_tx_avail(bp, bnapi) <
5534             (skb_shinfo(skb)->nr_frags + 1))) {
5535                 netif_stop_queue(dev);
5536                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5537                         dev->name);
5538
5539                 return NETDEV_TX_BUSY;
5540         }
5541         len = skb_headlen(skb);
5542         prod = bp->tx_prod;
5543         ring_prod = TX_RING_IDX(prod);
5544
5545         vlan_tag_flags = 0;
5546         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5547                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5548         }
5549
5550         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5551                 vlan_tag_flags |=
5552                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5553         }
5554         if ((mss = skb_shinfo(skb)->gso_size)) {
5555                 u32 tcp_opt_len, ip_tcp_len;
5556                 struct iphdr *iph;
5557
5558                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5559
5560                 tcp_opt_len = tcp_optlen(skb);
5561
5562                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5563                         u32 tcp_off = skb_transport_offset(skb) -
5564                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5565
5566                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5567                                           TX_BD_FLAGS_SW_FLAGS;
5568                         if (likely(tcp_off == 0))
5569                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5570                         else {
5571                                 tcp_off >>= 3;
5572                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5573                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5574                                                   ((tcp_off & 0x10) <<
5575                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5576                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5577                         }
5578                 } else {
5579                         if (skb_header_cloned(skb) &&
5580                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5581                                 dev_kfree_skb(skb);
5582                                 return NETDEV_TX_OK;
5583                         }
5584
5585                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5586
5587                         iph = ip_hdr(skb);
5588                         iph->check = 0;
5589                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5590                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5591                                                                  iph->daddr, 0,
5592                                                                  IPPROTO_TCP,
5593                                                                  0);
5594                         if (tcp_opt_len || (iph->ihl > 5)) {
5595                                 vlan_tag_flags |= ((iph->ihl - 5) +
5596                                                    (tcp_opt_len >> 2)) << 8;
5597                         }
5598                 }
5599         } else
5600                 mss = 0;
5601
5602         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5603
5604         tx_buf = &bp->tx_buf_ring[ring_prod];
5605         tx_buf->skb = skb;
5606         pci_unmap_addr_set(tx_buf, mapping, mapping);
5607
5608         txbd = &bp->tx_desc_ring[ring_prod];
5609
5610         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5611         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5612         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5613         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5614
5615         last_frag = skb_shinfo(skb)->nr_frags;
5616
5617         for (i = 0; i < last_frag; i++) {
5618                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5619
5620                 prod = NEXT_TX_BD(prod);
5621                 ring_prod = TX_RING_IDX(prod);
5622                 txbd = &bp->tx_desc_ring[ring_prod];
5623
5624                 len = frag->size;
5625                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5626                         len, PCI_DMA_TODEVICE);
5627                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5628                                 mapping, mapping);
5629
5630                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5631                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5632                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5633                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5634
5635         }
5636         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5637
5638         prod = NEXT_TX_BD(prod);
5639         bp->tx_prod_bseq += skb->len;
5640
5641         REG_WR16(bp, bp->tx_bidx_addr, prod);
5642         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5643
5644         mmiowb();
5645
5646         bp->tx_prod = prod;
5647         dev->trans_start = jiffies;
5648
5649         if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5650                 netif_stop_queue(dev);
5651                 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5652                         netif_wake_queue(dev);
5653         }
5654
5655         return NETDEV_TX_OK;
5656 }
5657
5658 /* Called with rtnl_lock */
5659 static int
5660 bnx2_close(struct net_device *dev)
5661 {
5662         struct bnx2 *bp = netdev_priv(dev);
5663         u32 reset_code;
5664
5665         /* Calling flush_scheduled_work() may deadlock because
5666          * linkwatch_event() may be on the workqueue and it will try to get
5667          * the rtnl_lock which we are holding.
5668          */
5669         while (bp->in_reset_task)
5670                 msleep(1);
5671
5672         bnx2_disable_int_sync(bp);
5673         bnx2_napi_disable(bp);
5674         del_timer_sync(&bp->timer);
5675         if (bp->flags & NO_WOL_FLAG)
5676                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5677         else if (bp->wol)
5678                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5679         else
5680                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5681         bnx2_reset_chip(bp, reset_code);
5682         bnx2_free_irq(bp);
5683         bnx2_free_skbs(bp);
5684         bnx2_free_mem(bp);
5685         bp->link_up = 0;
5686         netif_carrier_off(bp->dev);
5687         bnx2_set_power_state(bp, PCI_D3hot);
5688         return 0;
5689 }
5690
5691 #define GET_NET_STATS64(ctr)                                    \
5692         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5693         (unsigned long) (ctr##_lo)
5694
5695 #define GET_NET_STATS32(ctr)            \
5696         (ctr##_lo)
5697
5698 #if (BITS_PER_LONG == 64)
5699 #define GET_NET_STATS   GET_NET_STATS64
5700 #else
5701 #define GET_NET_STATS   GET_NET_STATS32
5702 #endif
5703
5704 static struct net_device_stats *
5705 bnx2_get_stats(struct net_device *dev)
5706 {
5707         struct bnx2 *bp = netdev_priv(dev);
5708         struct statistics_block *stats_blk = bp->stats_blk;
5709         struct net_device_stats *net_stats = &bp->net_stats;
5710
5711         if (bp->stats_blk == NULL) {
5712                 return net_stats;
5713         }
5714         net_stats->rx_packets =
5715                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5716                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5717                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5718
5719         net_stats->tx_packets =
5720                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5721                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5722                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5723
5724         net_stats->rx_bytes =
5725                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5726
5727         net_stats->tx_bytes =
5728                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5729
5730         net_stats->multicast =
5731                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5732
5733         net_stats->collisions =
5734                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5735
5736         net_stats->rx_length_errors =
5737                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5738                 stats_blk->stat_EtherStatsOverrsizePkts);
5739
5740         net_stats->rx_over_errors =
5741                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5742
5743         net_stats->rx_frame_errors =
5744                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5745
5746         net_stats->rx_crc_errors =
5747                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5748
5749         net_stats->rx_errors = net_stats->rx_length_errors +
5750                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5751                 net_stats->rx_crc_errors;
5752
5753         net_stats->tx_aborted_errors =
5754                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5755                 stats_blk->stat_Dot3StatsLateCollisions);
5756
5757         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5758             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5759                 net_stats->tx_carrier_errors = 0;
5760         else {
5761                 net_stats->tx_carrier_errors =
5762                         (unsigned long)
5763                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5764         }
5765
5766         net_stats->tx_errors =
5767                 (unsigned long)
5768                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5769                 +
5770                 net_stats->tx_aborted_errors +
5771                 net_stats->tx_carrier_errors;
5772
5773         net_stats->rx_missed_errors =
5774                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5775                 stats_blk->stat_FwRxDrop);
5776
5777         return net_stats;
5778 }
5779
5780 /* All ethtool functions called with rtnl_lock */
5781
5782 static int
5783 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5784 {
5785         struct bnx2 *bp = netdev_priv(dev);
5786         int support_serdes = 0, support_copper = 0;
5787
5788         cmd->supported = SUPPORTED_Autoneg;
5789         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5790                 support_serdes = 1;
5791                 support_copper = 1;
5792         } else if (bp->phy_port == PORT_FIBRE)
5793                 support_serdes = 1;
5794         else
5795                 support_copper = 1;
5796
5797         if (support_serdes) {
5798                 cmd->supported |= SUPPORTED_1000baseT_Full |
5799                         SUPPORTED_FIBRE;
5800                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5801                         cmd->supported |= SUPPORTED_2500baseX_Full;
5802
5803         }
5804         if (support_copper) {
5805                 cmd->supported |= SUPPORTED_10baseT_Half |
5806                         SUPPORTED_10baseT_Full |
5807                         SUPPORTED_100baseT_Half |
5808                         SUPPORTED_100baseT_Full |
5809                         SUPPORTED_1000baseT_Full |
5810                         SUPPORTED_TP;
5811
5812         }
5813
5814         spin_lock_bh(&bp->phy_lock);
5815         cmd->port = bp->phy_port;
5816         cmd->advertising = bp->advertising;
5817
5818         if (bp->autoneg & AUTONEG_SPEED) {
5819                 cmd->autoneg = AUTONEG_ENABLE;
5820         }
5821         else {
5822                 cmd->autoneg = AUTONEG_DISABLE;
5823         }
5824
5825         if (netif_carrier_ok(dev)) {
5826                 cmd->speed = bp->line_speed;
5827                 cmd->duplex = bp->duplex;
5828         }
5829         else {
5830                 cmd->speed = -1;
5831                 cmd->duplex = -1;
5832         }
5833         spin_unlock_bh(&bp->phy_lock);
5834
5835         cmd->transceiver = XCVR_INTERNAL;
5836         cmd->phy_address = bp->phy_addr;
5837
5838         return 0;
5839 }
5840
5841 static int
5842 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5843 {
5844         struct bnx2 *bp = netdev_priv(dev);
5845         u8 autoneg = bp->autoneg;
5846         u8 req_duplex = bp->req_duplex;
5847         u16 req_line_speed = bp->req_line_speed;
5848         u32 advertising = bp->advertising;
5849         int err = -EINVAL;
5850
5851         spin_lock_bh(&bp->phy_lock);
5852
5853         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5854                 goto err_out_unlock;
5855
5856         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5857                 goto err_out_unlock;
5858
5859         if (cmd->autoneg == AUTONEG_ENABLE) {
5860                 autoneg |= AUTONEG_SPEED;
5861
5862                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5863
5864                 /* allow advertising 1 speed */
5865                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5866                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5867                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5868                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5869
5870                         if (cmd->port == PORT_FIBRE)
5871                                 goto err_out_unlock;
5872
5873                         advertising = cmd->advertising;
5874
5875                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5876                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5877                             (cmd->port == PORT_TP))
5878                                 goto err_out_unlock;
5879                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5880                         advertising = cmd->advertising;
5881                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5882                         goto err_out_unlock;
5883                 else {
5884                         if (cmd->port == PORT_FIBRE)
5885                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5886                         else
5887                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5888                 }
5889                 advertising |= ADVERTISED_Autoneg;
5890         }
5891         else {
5892                 if (cmd->port == PORT_FIBRE) {
5893                         if ((cmd->speed != SPEED_1000 &&
5894                              cmd->speed != SPEED_2500) ||
5895                             (cmd->duplex != DUPLEX_FULL))
5896                                 goto err_out_unlock;
5897
5898                         if (cmd->speed == SPEED_2500 &&
5899                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5900                                 goto err_out_unlock;
5901                 }
5902                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5903                         goto err_out_unlock;
5904
5905                 autoneg &= ~AUTONEG_SPEED;
5906                 req_line_speed = cmd->speed;
5907                 req_duplex = cmd->duplex;
5908                 advertising = 0;
5909         }
5910
5911         bp->autoneg = autoneg;
5912         bp->advertising = advertising;
5913         bp->req_line_speed = req_line_speed;
5914         bp->req_duplex = req_duplex;
5915
5916         err = bnx2_setup_phy(bp, cmd->port);
5917
5918 err_out_unlock:
5919         spin_unlock_bh(&bp->phy_lock);
5920
5921         return err;
5922 }
5923
5924 static void
5925 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5926 {
5927         struct bnx2 *bp = netdev_priv(dev);
5928
5929         strcpy(info->driver, DRV_MODULE_NAME);
5930         strcpy(info->version, DRV_MODULE_VERSION);
5931         strcpy(info->bus_info, pci_name(bp->pdev));
5932         strcpy(info->fw_version, bp->fw_version);
5933 }
5934
5935 #define BNX2_REGDUMP_LEN                (32 * 1024)
5936
5937 static int
5938 bnx2_get_regs_len(struct net_device *dev)
5939 {
5940         return BNX2_REGDUMP_LEN;
5941 }
5942
5943 static void
5944 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5945 {
5946         u32 *p = _p, i, offset;
5947         u8 *orig_p = _p;
5948         struct bnx2 *bp = netdev_priv(dev);
5949         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5950                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5951                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5952                                  0x1040, 0x1048, 0x1080, 0x10a4,
5953                                  0x1400, 0x1490, 0x1498, 0x14f0,
5954                                  0x1500, 0x155c, 0x1580, 0x15dc,
5955                                  0x1600, 0x1658, 0x1680, 0x16d8,
5956                                  0x1800, 0x1820, 0x1840, 0x1854,
5957                                  0x1880, 0x1894, 0x1900, 0x1984,
5958                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5959                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5960                                  0x2000, 0x2030, 0x23c0, 0x2400,
5961                                  0x2800, 0x2820, 0x2830, 0x2850,
5962                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5963                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5964                                  0x4080, 0x4090, 0x43c0, 0x4458,
5965                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5966                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5967                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5968                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5969                                  0x6800, 0x6848, 0x684c, 0x6860,
5970                                  0x6888, 0x6910, 0x8000 };
5971
5972         regs->version = 0;
5973
5974         memset(p, 0, BNX2_REGDUMP_LEN);
5975
5976         if (!netif_running(bp->dev))
5977                 return;
5978
5979         i = 0;
5980         offset = reg_boundaries[0];
5981         p += offset;
5982         while (offset < BNX2_REGDUMP_LEN) {
5983                 *p++ = REG_RD(bp, offset);
5984                 offset += 4;
5985                 if (offset == reg_boundaries[i + 1]) {
5986                         offset = reg_boundaries[i + 2];
5987                         p = (u32 *) (orig_p + offset);
5988                         i += 2;
5989                 }
5990         }
5991 }
5992
5993 static void
5994 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5995 {
5996         struct bnx2 *bp = netdev_priv(dev);
5997
5998         if (bp->flags & NO_WOL_FLAG) {
5999                 wol->supported = 0;
6000                 wol->wolopts = 0;
6001         }
6002         else {
6003                 wol->supported = WAKE_MAGIC;
6004                 if (bp->wol)
6005                         wol->wolopts = WAKE_MAGIC;
6006                 else
6007                         wol->wolopts = 0;
6008         }
6009         memset(&wol->sopass, 0, sizeof(wol->sopass));
6010 }
6011
6012 static int
6013 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6014 {
6015         struct bnx2 *bp = netdev_priv(dev);
6016
6017         if (wol->wolopts & ~WAKE_MAGIC)
6018                 return -EINVAL;
6019
6020         if (wol->wolopts & WAKE_MAGIC) {
6021                 if (bp->flags & NO_WOL_FLAG)
6022                         return -EINVAL;
6023
6024                 bp->wol = 1;
6025         }
6026         else {
6027                 bp->wol = 0;
6028         }
6029         return 0;
6030 }
6031
6032 static int
6033 bnx2_nway_reset(struct net_device *dev)
6034 {
6035         struct bnx2 *bp = netdev_priv(dev);
6036         u32 bmcr;
6037
6038         if (!(bp->autoneg & AUTONEG_SPEED)) {
6039                 return -EINVAL;
6040         }
6041
6042         spin_lock_bh(&bp->phy_lock);
6043
6044         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
6045                 int rc;
6046
6047                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6048                 spin_unlock_bh(&bp->phy_lock);
6049                 return rc;
6050         }
6051
6052         /* Force a link down visible on the other side */
6053         if (bp->phy_flags & PHY_SERDES_FLAG) {
6054                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6055                 spin_unlock_bh(&bp->phy_lock);
6056
6057                 msleep(20);
6058
6059                 spin_lock_bh(&bp->phy_lock);
6060
6061                 bp->current_interval = SERDES_AN_TIMEOUT;
6062                 bp->serdes_an_pending = 1;
6063                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6064         }
6065
6066         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6067         bmcr &= ~BMCR_LOOPBACK;
6068         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6069
6070         spin_unlock_bh(&bp->phy_lock);
6071
6072         return 0;
6073 }
6074
6075 static int
6076 bnx2_get_eeprom_len(struct net_device *dev)
6077 {
6078         struct bnx2 *bp = netdev_priv(dev);
6079
6080         if (bp->flash_info == NULL)
6081                 return 0;
6082
6083         return (int) bp->flash_size;
6084 }
6085
6086 static int
6087 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6088                 u8 *eebuf)
6089 {
6090         struct bnx2 *bp = netdev_priv(dev);
6091         int rc;
6092
6093         /* parameters already validated in ethtool_get_eeprom */
6094
6095         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6096
6097         return rc;
6098 }
6099
6100 static int
6101 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6102                 u8 *eebuf)
6103 {
6104         struct bnx2 *bp = netdev_priv(dev);
6105         int rc;
6106
6107         /* parameters already validated in ethtool_set_eeprom */
6108
6109         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6110
6111         return rc;
6112 }
6113
6114 static int
6115 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6116 {
6117         struct bnx2 *bp = netdev_priv(dev);
6118
6119         memset(coal, 0, sizeof(struct ethtool_coalesce));
6120
6121         coal->rx_coalesce_usecs = bp->rx_ticks;
6122         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6123         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6124         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6125
6126         coal->tx_coalesce_usecs = bp->tx_ticks;
6127         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6128         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6129         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6130
6131         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6132
6133         return 0;
6134 }
6135
6136 static int
6137 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6138 {
6139         struct bnx2 *bp = netdev_priv(dev);
6140
6141         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6142         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6143
6144         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6145         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6146
6147         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6148         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6149
6150         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6151         if (bp->rx_quick_cons_trip_int > 0xff)
6152                 bp->rx_quick_cons_trip_int = 0xff;
6153
6154         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6155         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6156
6157         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6158         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6159
6160         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6161         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6162
6163         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6164         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6165                 0xff;
6166
6167         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6168         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6169                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6170                         bp->stats_ticks = USEC_PER_SEC;
6171         }
6172         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6173                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6174         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6175
6176         if (netif_running(bp->dev)) {
6177                 bnx2_netif_stop(bp);
6178                 bnx2_init_nic(bp);
6179                 bnx2_netif_start(bp);
6180         }
6181
6182         return 0;
6183 }
6184
6185 static void
6186 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6187 {
6188         struct bnx2 *bp = netdev_priv(dev);
6189
6190         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6191         ering->rx_mini_max_pending = 0;
6192         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6193
6194         ering->rx_pending = bp->rx_ring_size;
6195         ering->rx_mini_pending = 0;
6196         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6197
6198         ering->tx_max_pending = MAX_TX_DESC_CNT;
6199         ering->tx_pending = bp->tx_ring_size;
6200 }
6201
6202 static int
6203 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6204 {
6205         if (netif_running(bp->dev)) {
6206                 bnx2_netif_stop(bp);
6207                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6208                 bnx2_free_skbs(bp);
6209                 bnx2_free_mem(bp);
6210         }
6211
6212         bnx2_set_rx_ring_size(bp, rx);
6213         bp->tx_ring_size = tx;
6214
6215         if (netif_running(bp->dev)) {
6216                 int rc;
6217
6218                 rc = bnx2_alloc_mem(bp);
6219                 if (rc)
6220                         return rc;
6221                 bnx2_init_nic(bp);
6222                 bnx2_netif_start(bp);
6223         }
6224         return 0;
6225 }
6226
6227 static int
6228 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6229 {
6230         struct bnx2 *bp = netdev_priv(dev);
6231         int rc;
6232
6233         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6234                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6235                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6236
6237                 return -EINVAL;
6238         }
6239         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6240         return rc;
6241 }
6242
6243 static void
6244 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6245 {
6246         struct bnx2 *bp = netdev_priv(dev);
6247
6248         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6249         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6250         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6251 }
6252
6253 static int
6254 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6255 {
6256         struct bnx2 *bp = netdev_priv(dev);
6257
6258         bp->req_flow_ctrl = 0;
6259         if (epause->rx_pause)
6260                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6261         if (epause->tx_pause)
6262                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6263
6264         if (epause->autoneg) {
6265                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6266         }
6267         else {
6268                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6269         }
6270
6271         spin_lock_bh(&bp->phy_lock);
6272
6273         bnx2_setup_phy(bp, bp->phy_port);
6274
6275         spin_unlock_bh(&bp->phy_lock);
6276
6277         return 0;
6278 }
6279
6280 static u32
6281 bnx2_get_rx_csum(struct net_device *dev)
6282 {
6283         struct bnx2 *bp = netdev_priv(dev);
6284
6285         return bp->rx_csum;
6286 }
6287
6288 static int
6289 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6290 {
6291         struct bnx2 *bp = netdev_priv(dev);
6292
6293         bp->rx_csum = data;
6294         return 0;
6295 }
6296
6297 static int
6298 bnx2_set_tso(struct net_device *dev, u32 data)
6299 {
6300         struct bnx2 *bp = netdev_priv(dev);
6301
6302         if (data) {
6303                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6304                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6305                         dev->features |= NETIF_F_TSO6;
6306         } else
6307                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6308                                    NETIF_F_TSO_ECN);
6309         return 0;
6310 }
6311
6312 #define BNX2_NUM_STATS 46
6313
6314 static struct {
6315         char string[ETH_GSTRING_LEN];
6316 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6317         { "rx_bytes" },
6318         { "rx_error_bytes" },
6319         { "tx_bytes" },
6320         { "tx_error_bytes" },
6321         { "rx_ucast_packets" },
6322         { "rx_mcast_packets" },
6323         { "rx_bcast_packets" },
6324         { "tx_ucast_packets" },
6325         { "tx_mcast_packets" },
6326         { "tx_bcast_packets" },
6327         { "tx_mac_errors" },
6328         { "tx_carrier_errors" },
6329         { "rx_crc_errors" },
6330         { "rx_align_errors" },
6331         { "tx_single_collisions" },
6332         { "tx_multi_collisions" },
6333         { "tx_deferred" },
6334         { "tx_excess_collisions" },
6335         { "tx_late_collisions" },
6336         { "tx_total_collisions" },
6337         { "rx_fragments" },
6338         { "rx_jabbers" },
6339         { "rx_undersize_packets" },
6340         { "rx_oversize_packets" },
6341         { "rx_64_byte_packets" },
6342         { "rx_65_to_127_byte_packets" },
6343         { "rx_128_to_255_byte_packets" },
6344         { "rx_256_to_511_byte_packets" },
6345         { "rx_512_to_1023_byte_packets" },
6346         { "rx_1024_to_1522_byte_packets" },
6347         { "rx_1523_to_9022_byte_packets" },
6348         { "tx_64_byte_packets" },
6349         { "tx_65_to_127_byte_packets" },
6350         { "tx_128_to_255_byte_packets" },
6351         { "tx_256_to_511_byte_packets" },
6352         { "tx_512_to_1023_byte_packets" },
6353         { "tx_1024_to_1522_byte_packets" },
6354         { "tx_1523_to_9022_byte_packets" },
6355         { "rx_xon_frames" },
6356         { "rx_xoff_frames" },
6357         { "tx_xon_frames" },
6358         { "tx_xoff_frames" },
6359         { "rx_mac_ctrl_frames" },
6360         { "rx_filtered_packets" },
6361         { "rx_discards" },
6362         { "rx_fw_discards" },
6363 };
6364
6365 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6366
6367 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6368     STATS_OFFSET32(stat_IfHCInOctets_hi),
6369     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6370     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6371     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6372     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6373     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6374     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6375     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6376     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6377     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6378     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6379     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6380     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6381     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6382     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6383     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6384     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6385     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6386     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6387     STATS_OFFSET32(stat_EtherStatsCollisions),
6388     STATS_OFFSET32(stat_EtherStatsFragments),
6389     STATS_OFFSET32(stat_EtherStatsJabbers),
6390     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6391     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6392     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6393     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6394     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6395     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6396     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6397     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6398     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6399     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6400     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6401     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6402     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6403     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6404     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6405     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6406     STATS_OFFSET32(stat_XonPauseFramesReceived),
6407     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6408     STATS_OFFSET32(stat_OutXonSent),
6409     STATS_OFFSET32(stat_OutXoffSent),
6410     STATS_OFFSET32(stat_MacControlFramesReceived),
6411     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6412     STATS_OFFSET32(stat_IfInMBUFDiscards),
6413     STATS_OFFSET32(stat_FwRxDrop),
6414 };
6415
6416 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6417  * skipped because of errata.
6418  */
6419 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6420         8,0,8,8,8,8,8,8,8,8,
6421         4,0,4,4,4,4,4,4,4,4,
6422         4,4,4,4,4,4,4,4,4,4,
6423         4,4,4,4,4,4,4,4,4,4,
6424         4,4,4,4,4,4,
6425 };
6426
6427 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6428         8,0,8,8,8,8,8,8,8,8,
6429         4,4,4,4,4,4,4,4,4,4,
6430         4,4,4,4,4,4,4,4,4,4,
6431         4,4,4,4,4,4,4,4,4,4,
6432         4,4,4,4,4,4,
6433 };
6434
6435 #define BNX2_NUM_TESTS 6
6436
6437 static struct {
6438         char string[ETH_GSTRING_LEN];
6439 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6440         { "register_test (offline)" },
6441         { "memory_test (offline)" },
6442         { "loopback_test (offline)" },
6443         { "nvram_test (online)" },
6444         { "interrupt_test (online)" },
6445         { "link_test (online)" },
6446 };
6447
6448 static int
6449 bnx2_get_sset_count(struct net_device *dev, int sset)
6450 {
6451         switch (sset) {
6452         case ETH_SS_TEST:
6453                 return BNX2_NUM_TESTS;
6454         case ETH_SS_STATS:
6455                 return BNX2_NUM_STATS;
6456         default:
6457                 return -EOPNOTSUPP;
6458         }
6459 }
6460
6461 static void
6462 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6463 {
6464         struct bnx2 *bp = netdev_priv(dev);
6465
6466         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6467         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6468                 int i;
6469
6470                 bnx2_netif_stop(bp);
6471                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6472                 bnx2_free_skbs(bp);
6473
6474                 if (bnx2_test_registers(bp) != 0) {
6475                         buf[0] = 1;
6476                         etest->flags |= ETH_TEST_FL_FAILED;
6477                 }
6478                 if (bnx2_test_memory(bp) != 0) {
6479                         buf[1] = 1;
6480                         etest->flags |= ETH_TEST_FL_FAILED;
6481                 }
6482                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6483                         etest->flags |= ETH_TEST_FL_FAILED;
6484
6485                 if (!netif_running(bp->dev)) {
6486                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6487                 }
6488                 else {
6489                         bnx2_init_nic(bp);
6490                         bnx2_netif_start(bp);
6491                 }
6492
6493                 /* wait for link up */
6494                 for (i = 0; i < 7; i++) {
6495                         if (bp->link_up)
6496                                 break;
6497                         msleep_interruptible(1000);
6498                 }
6499         }
6500
6501         if (bnx2_test_nvram(bp) != 0) {
6502                 buf[3] = 1;
6503                 etest->flags |= ETH_TEST_FL_FAILED;
6504         }
6505         if (bnx2_test_intr(bp) != 0) {
6506                 buf[4] = 1;
6507                 etest->flags |= ETH_TEST_FL_FAILED;
6508         }
6509
6510         if (bnx2_test_link(bp) != 0) {
6511                 buf[5] = 1;
6512                 etest->flags |= ETH_TEST_FL_FAILED;
6513
6514         }
6515 }
6516
6517 static void
6518 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6519 {
6520         switch (stringset) {
6521         case ETH_SS_STATS:
6522                 memcpy(buf, bnx2_stats_str_arr,
6523                         sizeof(bnx2_stats_str_arr));
6524                 break;
6525         case ETH_SS_TEST:
6526                 memcpy(buf, bnx2_tests_str_arr,
6527                         sizeof(bnx2_tests_str_arr));
6528                 break;
6529         }
6530 }
6531
6532 static void
6533 bnx2_get_ethtool_stats(struct net_device *dev,
6534                 struct ethtool_stats *stats, u64 *buf)
6535 {
6536         struct bnx2 *bp = netdev_priv(dev);
6537         int i;
6538         u32 *hw_stats = (u32 *) bp->stats_blk;
6539         u8 *stats_len_arr = NULL;
6540
6541         if (hw_stats == NULL) {
6542                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6543                 return;
6544         }
6545
6546         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6547             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6548             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6549             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6550                 stats_len_arr = bnx2_5706_stats_len_arr;
6551         else
6552                 stats_len_arr = bnx2_5708_stats_len_arr;
6553
6554         for (i = 0; i < BNX2_NUM_STATS; i++) {
6555                 if (stats_len_arr[i] == 0) {
6556                         /* skip this counter */
6557                         buf[i] = 0;
6558                         continue;
6559                 }
6560                 if (stats_len_arr[i] == 4) {
6561                         /* 4-byte counter */
6562                         buf[i] = (u64)
6563                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6564                         continue;
6565                 }
6566                 /* 8-byte counter */
6567                 buf[i] = (((u64) *(hw_stats +
6568                                         bnx2_stats_offset_arr[i])) << 32) +
6569                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6570         }
6571 }
6572
6573 static int
6574 bnx2_phys_id(struct net_device *dev, u32 data)
6575 {
6576         struct bnx2 *bp = netdev_priv(dev);
6577         int i;
6578         u32 save;
6579
6580         if (data == 0)
6581                 data = 2;
6582
6583         save = REG_RD(bp, BNX2_MISC_CFG);
6584         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6585
6586         for (i = 0; i < (data * 2); i++) {
6587                 if ((i % 2) == 0) {
6588                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6589                 }
6590                 else {
6591                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6592                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6593                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6594                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6595                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6596                                 BNX2_EMAC_LED_TRAFFIC);
6597                 }
6598                 msleep_interruptible(500);
6599                 if (signal_pending(current))
6600                         break;
6601         }
6602         REG_WR(bp, BNX2_EMAC_LED, 0);
6603         REG_WR(bp, BNX2_MISC_CFG, save);
6604         return 0;
6605 }
6606
6607 static int
6608 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6609 {
6610         struct bnx2 *bp = netdev_priv(dev);
6611
6612         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6613                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6614         else
6615                 return (ethtool_op_set_tx_csum(dev, data));
6616 }
6617
6618 static const struct ethtool_ops bnx2_ethtool_ops = {
6619         .get_settings           = bnx2_get_settings,
6620         .set_settings           = bnx2_set_settings,
6621         .get_drvinfo            = bnx2_get_drvinfo,
6622         .get_regs_len           = bnx2_get_regs_len,
6623         .get_regs               = bnx2_get_regs,
6624         .get_wol                = bnx2_get_wol,
6625         .set_wol                = bnx2_set_wol,
6626         .nway_reset             = bnx2_nway_reset,
6627         .get_link               = ethtool_op_get_link,
6628         .get_eeprom_len         = bnx2_get_eeprom_len,
6629         .get_eeprom             = bnx2_get_eeprom,
6630         .set_eeprom             = bnx2_set_eeprom,
6631         .get_coalesce           = bnx2_get_coalesce,
6632         .set_coalesce           = bnx2_set_coalesce,
6633         .get_ringparam          = bnx2_get_ringparam,
6634         .set_ringparam          = bnx2_set_ringparam,
6635         .get_pauseparam         = bnx2_get_pauseparam,
6636         .set_pauseparam         = bnx2_set_pauseparam,
6637         .get_rx_csum            = bnx2_get_rx_csum,
6638         .set_rx_csum            = bnx2_set_rx_csum,
6639         .set_tx_csum            = bnx2_set_tx_csum,
6640         .set_sg                 = ethtool_op_set_sg,
6641         .set_tso                = bnx2_set_tso,
6642         .self_test              = bnx2_self_test,
6643         .get_strings            = bnx2_get_strings,
6644         .phys_id                = bnx2_phys_id,
6645         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6646         .get_sset_count         = bnx2_get_sset_count,
6647 };
6648
6649 /* Called with rtnl_lock */
6650 static int
6651 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6652 {
6653         struct mii_ioctl_data *data = if_mii(ifr);
6654         struct bnx2 *bp = netdev_priv(dev);
6655         int err;
6656
6657         switch(cmd) {
6658         case SIOCGMIIPHY:
6659                 data->phy_id = bp->phy_addr;
6660
6661                 /* fallthru */
6662         case SIOCGMIIREG: {
6663                 u32 mii_regval;
6664
6665                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6666                         return -EOPNOTSUPP;
6667
6668                 if (!netif_running(dev))
6669                         return -EAGAIN;
6670
6671                 spin_lock_bh(&bp->phy_lock);
6672                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6673                 spin_unlock_bh(&bp->phy_lock);
6674
6675                 data->val_out = mii_regval;
6676
6677                 return err;
6678         }
6679
6680         case SIOCSMIIREG:
6681                 if (!capable(CAP_NET_ADMIN))
6682                         return -EPERM;
6683
6684                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6685                         return -EOPNOTSUPP;
6686
6687                 if (!netif_running(dev))
6688                         return -EAGAIN;
6689
6690                 spin_lock_bh(&bp->phy_lock);
6691                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6692                 spin_unlock_bh(&bp->phy_lock);
6693
6694                 return err;
6695
6696         default:
6697                 /* do nothing */
6698                 break;
6699         }
6700         return -EOPNOTSUPP;
6701 }
6702
6703 /* Called with rtnl_lock */
6704 static int
6705 bnx2_change_mac_addr(struct net_device *dev, void *p)
6706 {
6707         struct sockaddr *addr = p;
6708         struct bnx2 *bp = netdev_priv(dev);
6709
6710         if (!is_valid_ether_addr(addr->sa_data))
6711                 return -EINVAL;
6712
6713         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6714         if (netif_running(dev))
6715                 bnx2_set_mac_addr(bp);
6716
6717         return 0;
6718 }
6719
6720 /* Called with rtnl_lock */
6721 static int
6722 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6723 {
6724         struct bnx2 *bp = netdev_priv(dev);
6725
6726         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6727                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6728                 return -EINVAL;
6729
6730         dev->mtu = new_mtu;
6731         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6732 }
6733
6734 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6735 static void
6736 poll_bnx2(struct net_device *dev)
6737 {
6738         struct bnx2 *bp = netdev_priv(dev);
6739
6740         disable_irq(bp->pdev->irq);
6741         bnx2_interrupt(bp->pdev->irq, dev);
6742         enable_irq(bp->pdev->irq);
6743 }
6744 #endif
6745
6746 static void __devinit
6747 bnx2_get_5709_media(struct bnx2 *bp)
6748 {
6749         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6750         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6751         u32 strap;
6752
6753         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6754                 return;
6755         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6756                 bp->phy_flags |= PHY_SERDES_FLAG;
6757                 return;
6758         }
6759
6760         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6761                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6762         else
6763                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6764
6765         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6766                 switch (strap) {
6767                 case 0x4:
6768                 case 0x5:
6769                 case 0x6:
6770                         bp->phy_flags |= PHY_SERDES_FLAG;
6771                         return;
6772                 }
6773         } else {
6774                 switch (strap) {
6775                 case 0x1:
6776                 case 0x2:
6777                 case 0x4:
6778                         bp->phy_flags |= PHY_SERDES_FLAG;
6779                         return;
6780                 }
6781         }
6782 }
6783
6784 static void __devinit
6785 bnx2_get_pci_speed(struct bnx2 *bp)
6786 {
6787         u32 reg;
6788
6789         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6790         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6791                 u32 clkreg;
6792
6793                 bp->flags |= PCIX_FLAG;
6794
6795                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6796
6797                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6798                 switch (clkreg) {
6799                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6800                         bp->bus_speed_mhz = 133;
6801                         break;
6802
6803                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6804                         bp->bus_speed_mhz = 100;
6805                         break;
6806
6807                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6808                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6809                         bp->bus_speed_mhz = 66;
6810                         break;
6811
6812                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6813                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6814                         bp->bus_speed_mhz = 50;
6815                         break;
6816
6817                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6818                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6819                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6820                         bp->bus_speed_mhz = 33;
6821                         break;
6822                 }
6823         }
6824         else {
6825                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6826                         bp->bus_speed_mhz = 66;
6827                 else
6828                         bp->bus_speed_mhz = 33;
6829         }
6830
6831         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6832                 bp->flags |= PCI_32BIT_FLAG;
6833
6834 }
6835
6836 static int __devinit
6837 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6838 {
6839         struct bnx2 *bp;
6840         unsigned long mem_len;
6841         int rc, i, j;
6842         u32 reg;
6843         u64 dma_mask, persist_dma_mask;
6844
6845         SET_NETDEV_DEV(dev, &pdev->dev);
6846         bp = netdev_priv(dev);
6847
6848         bp->flags = 0;
6849         bp->phy_flags = 0;
6850
6851         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6852         rc = pci_enable_device(pdev);
6853         if (rc) {
6854                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6855                 goto err_out;
6856         }
6857
6858         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6859                 dev_err(&pdev->dev,
6860                         "Cannot find PCI device base address, aborting.\n");
6861                 rc = -ENODEV;
6862                 goto err_out_disable;
6863         }
6864
6865         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6866         if (rc) {
6867                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6868                 goto err_out_disable;
6869         }
6870
6871         pci_set_master(pdev);
6872
6873         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6874         if (bp->pm_cap == 0) {
6875                 dev_err(&pdev->dev,
6876                         "Cannot find power management capability, aborting.\n");
6877                 rc = -EIO;
6878                 goto err_out_release;
6879         }
6880
6881         bp->dev = dev;
6882         bp->pdev = pdev;
6883
6884         spin_lock_init(&bp->phy_lock);
6885         spin_lock_init(&bp->indirect_lock);
6886         INIT_WORK(&bp->reset_task, bnx2_reset_task);
6887
6888         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6889         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6890         dev->mem_end = dev->mem_start + mem_len;
6891         dev->irq = pdev->irq;
6892
6893         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6894
6895         if (!bp->regview) {
6896                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6897                 rc = -ENOMEM;
6898                 goto err_out_release;
6899         }
6900
6901         /* Configure byte swap and enable write to the reg_window registers.
6902          * Rely on CPU to do target byte swapping on big endian systems
6903          * The chip's target access swapping will not swap all accesses
6904          */
6905         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6906                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6907                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6908
6909         bnx2_set_power_state(bp, PCI_D0);
6910
6911         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6912
6913         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6914                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6915                         dev_err(&pdev->dev,
6916                                 "Cannot find PCIE capability, aborting.\n");
6917                         rc = -EIO;
6918                         goto err_out_unmap;
6919                 }
6920                 bp->flags |= PCIE_FLAG;
6921         } else {
6922                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6923                 if (bp->pcix_cap == 0) {
6924                         dev_err(&pdev->dev,
6925                                 "Cannot find PCIX capability, aborting.\n");
6926                         rc = -EIO;
6927                         goto err_out_unmap;
6928                 }
6929         }
6930
6931         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
6932                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
6933                         bp->flags |= MSIX_CAP_FLAG;
6934         }
6935
6936         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6937                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6938                         bp->flags |= MSI_CAP_FLAG;
6939         }
6940
6941         /* 5708 cannot support DMA addresses > 40-bit.  */
6942         if (CHIP_NUM(bp) == CHIP_NUM_5708)
6943                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6944         else
6945                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6946
6947         /* Configure DMA attributes. */
6948         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6949                 dev->features |= NETIF_F_HIGHDMA;
6950                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6951                 if (rc) {
6952                         dev_err(&pdev->dev,
6953                                 "pci_set_consistent_dma_mask failed, aborting.\n");
6954                         goto err_out_unmap;
6955                 }
6956         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6957                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6958                 goto err_out_unmap;
6959         }
6960
6961         if (!(bp->flags & PCIE_FLAG))
6962                 bnx2_get_pci_speed(bp);
6963
6964         /* 5706A0 may falsely detect SERR and PERR. */
6965         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6966                 reg = REG_RD(bp, PCI_COMMAND);
6967                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6968                 REG_WR(bp, PCI_COMMAND, reg);
6969         }
6970         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6971                 !(bp->flags & PCIX_FLAG)) {
6972
6973                 dev_err(&pdev->dev,
6974                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
6975                 goto err_out_unmap;
6976         }
6977
6978         bnx2_init_nvram(bp);
6979
6980         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6981
6982         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6983             BNX2_SHM_HDR_SIGNATURE_SIG) {
6984                 u32 off = PCI_FUNC(pdev->devfn) << 2;
6985
6986                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6987         } else
6988                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6989
6990         /* Get the permanent MAC address.  First we need to make sure the
6991          * firmware is actually running.
6992          */
6993         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6994
6995         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6996             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6997                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6998                 rc = -ENODEV;
6999                 goto err_out_unmap;
7000         }
7001
7002         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
7003         for (i = 0, j = 0; i < 3; i++) {
7004                 u8 num, k, skip0;
7005
7006                 num = (u8) (reg >> (24 - (i * 8)));
7007                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7008                         if (num >= k || !skip0 || k == 1) {
7009                                 bp->fw_version[j++] = (num / k) + '0';
7010                                 skip0 = 0;
7011                         }
7012                 }
7013                 if (i != 2)
7014                         bp->fw_version[j++] = '.';
7015         }
7016         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
7017         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7018                 bp->wol = 1;
7019
7020         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7021                 bp->flags |= ASF_ENABLE_FLAG;
7022
7023                 for (i = 0; i < 30; i++) {
7024                         reg = REG_RD_IND(bp, bp->shmem_base +
7025                                              BNX2_BC_STATE_CONDITION);
7026                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7027                                 break;
7028                         msleep(10);
7029                 }
7030         }
7031         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
7032         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7033         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7034             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7035                 int i;
7036                 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
7037
7038                 bp->fw_version[j++] = ' ';
7039                 for (i = 0; i < 3; i++) {
7040                         reg = REG_RD_IND(bp, addr + i * 4);
7041                         reg = swab32(reg);
7042                         memcpy(&bp->fw_version[j], &reg, 4);
7043                         j += 4;
7044                 }
7045         }
7046
7047         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
7048         bp->mac_addr[0] = (u8) (reg >> 8);
7049         bp->mac_addr[1] = (u8) reg;
7050
7051         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
7052         bp->mac_addr[2] = (u8) (reg >> 24);
7053         bp->mac_addr[3] = (u8) (reg >> 16);
7054         bp->mac_addr[4] = (u8) (reg >> 8);
7055         bp->mac_addr[5] = (u8) reg;
7056
7057         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7058
7059         bp->tx_ring_size = MAX_TX_DESC_CNT;
7060         bnx2_set_rx_ring_size(bp, 255);
7061
7062         bp->rx_csum = 1;
7063
7064         bp->tx_quick_cons_trip_int = 20;
7065         bp->tx_quick_cons_trip = 20;
7066         bp->tx_ticks_int = 80;
7067         bp->tx_ticks = 80;
7068
7069         bp->rx_quick_cons_trip_int = 6;
7070         bp->rx_quick_cons_trip = 6;
7071         bp->rx_ticks_int = 18;
7072         bp->rx_ticks = 18;
7073
7074         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7075
7076         bp->timer_interval =  HZ;
7077         bp->current_interval =  HZ;
7078
7079         bp->phy_addr = 1;
7080
7081         /* Disable WOL support if we are running on a SERDES chip. */
7082         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7083                 bnx2_get_5709_media(bp);
7084         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7085                 bp->phy_flags |= PHY_SERDES_FLAG;
7086
7087         bp->phy_port = PORT_TP;
7088         if (bp->phy_flags & PHY_SERDES_FLAG) {
7089                 bp->phy_port = PORT_FIBRE;
7090                 reg = REG_RD_IND(bp, bp->shmem_base +
7091                                      BNX2_SHARED_HW_CFG_CONFIG);
7092                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7093                         bp->flags |= NO_WOL_FLAG;
7094                         bp->wol = 0;
7095                 }
7096                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
7097                         bp->phy_addr = 2;
7098                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7099                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
7100                 }
7101                 bnx2_init_remote_phy(bp);
7102
7103         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7104                    CHIP_NUM(bp) == CHIP_NUM_5708)
7105                 bp->phy_flags |= PHY_CRC_FIX_FLAG;
7106         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7107                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7108                   CHIP_REV(bp) == CHIP_REV_Bx))
7109                 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
7110
7111         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7112             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7113             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7114                 bp->flags |= NO_WOL_FLAG;
7115                 bp->wol = 0;
7116         }
7117
7118         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7119                 bp->tx_quick_cons_trip_int =
7120                         bp->tx_quick_cons_trip;
7121                 bp->tx_ticks_int = bp->tx_ticks;
7122                 bp->rx_quick_cons_trip_int =
7123                         bp->rx_quick_cons_trip;
7124                 bp->rx_ticks_int = bp->rx_ticks;
7125                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7126                 bp->com_ticks_int = bp->com_ticks;
7127                 bp->cmd_ticks_int = bp->cmd_ticks;
7128         }
7129
7130         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7131          *
7132          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7133          * with byte enables disabled on the unused 32-bit word.  This is legal
7134          * but causes problems on the AMD 8132 which will eventually stop
7135          * responding after a while.
7136          *
7137          * AMD believes this incompatibility is unique to the 5706, and
7138          * prefers to locally disable MSI rather than globally disabling it.
7139          */
7140         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7141                 struct pci_dev *amd_8132 = NULL;
7142
7143                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7144                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7145                                                   amd_8132))) {
7146
7147                         if (amd_8132->revision >= 0x10 &&
7148                             amd_8132->revision <= 0x13) {
7149                                 disable_msi = 1;
7150                                 pci_dev_put(amd_8132);
7151                                 break;
7152                         }
7153                 }
7154         }
7155
7156         bnx2_set_default_link(bp);
7157         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7158
7159         init_timer(&bp->timer);
7160         bp->timer.expires = RUN_AT(bp->timer_interval);
7161         bp->timer.data = (unsigned long) bp;
7162         bp->timer.function = bnx2_timer;
7163
7164         return 0;
7165
7166 err_out_unmap:
7167         if (bp->regview) {
7168                 iounmap(bp->regview);
7169                 bp->regview = NULL;
7170         }
7171
7172 err_out_release:
7173         pci_release_regions(pdev);
7174
7175 err_out_disable:
7176         pci_disable_device(pdev);
7177         pci_set_drvdata(pdev, NULL);
7178
7179 err_out:
7180         return rc;
7181 }
7182
7183 static char * __devinit
7184 bnx2_bus_string(struct bnx2 *bp, char *str)
7185 {
7186         char *s = str;
7187
7188         if (bp->flags & PCIE_FLAG) {
7189                 s += sprintf(s, "PCI Express");
7190         } else {
7191                 s += sprintf(s, "PCI");
7192                 if (bp->flags & PCIX_FLAG)
7193                         s += sprintf(s, "-X");
7194                 if (bp->flags & PCI_32BIT_FLAG)
7195                         s += sprintf(s, " 32-bit");
7196                 else
7197                         s += sprintf(s, " 64-bit");
7198                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7199         }
7200         return str;
7201 }
7202
7203 static int __devinit
7204 bnx2_init_napi(struct bnx2 *bp)
7205 {
7206         int i;
7207         struct bnx2_napi *bnapi;
7208
7209         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7210                 bnapi = &bp->bnx2_napi[i];
7211                 bnapi->bp = bp;
7212         }
7213         netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7214 }
7215
7216 static int __devinit
7217 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7218 {
7219         static int version_printed = 0;
7220         struct net_device *dev = NULL;
7221         struct bnx2 *bp;
7222         int rc;
7223         char str[40];
7224         DECLARE_MAC_BUF(mac);
7225
7226         if (version_printed++ == 0)
7227                 printk(KERN_INFO "%s", version);
7228
7229         /* dev zeroed in init_etherdev */
7230         dev = alloc_etherdev(sizeof(*bp));
7231
7232         if (!dev)
7233                 return -ENOMEM;
7234
7235         rc = bnx2_init_board(pdev, dev);
7236         if (rc < 0) {
7237                 free_netdev(dev);
7238                 return rc;
7239         }
7240
7241         dev->open = bnx2_open;
7242         dev->hard_start_xmit = bnx2_start_xmit;
7243         dev->stop = bnx2_close;
7244         dev->get_stats = bnx2_get_stats;
7245         dev->set_multicast_list = bnx2_set_rx_mode;
7246         dev->do_ioctl = bnx2_ioctl;
7247         dev->set_mac_address = bnx2_change_mac_addr;
7248         dev->change_mtu = bnx2_change_mtu;
7249         dev->tx_timeout = bnx2_tx_timeout;
7250         dev->watchdog_timeo = TX_TIMEOUT;
7251 #ifdef BCM_VLAN
7252         dev->vlan_rx_register = bnx2_vlan_rx_register;
7253 #endif
7254         dev->ethtool_ops = &bnx2_ethtool_ops;
7255
7256         bp = netdev_priv(dev);
7257         bnx2_init_napi(bp);
7258
7259 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7260         dev->poll_controller = poll_bnx2;
7261 #endif
7262
7263         pci_set_drvdata(pdev, dev);
7264
7265         memcpy(dev->dev_addr, bp->mac_addr, 6);
7266         memcpy(dev->perm_addr, bp->mac_addr, 6);
7267         bp->name = board_info[ent->driver_data].name;
7268
7269         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7270         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7271                 dev->features |= NETIF_F_IPV6_CSUM;
7272
7273 #ifdef BCM_VLAN
7274         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7275 #endif
7276         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7277         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7278                 dev->features |= NETIF_F_TSO6;
7279
7280         if ((rc = register_netdev(dev))) {
7281                 dev_err(&pdev->dev, "Cannot register net device\n");
7282                 if (bp->regview)
7283                         iounmap(bp->regview);
7284                 pci_release_regions(pdev);
7285                 pci_disable_device(pdev);
7286                 pci_set_drvdata(pdev, NULL);
7287                 free_netdev(dev);
7288                 return rc;
7289         }
7290
7291         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7292                 "IRQ %d, node addr %s\n",
7293                 dev->name,
7294                 bp->name,
7295                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7296                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7297                 bnx2_bus_string(bp, str),
7298                 dev->base_addr,
7299                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7300
7301         return 0;
7302 }
7303
7304 static void __devexit
7305 bnx2_remove_one(struct pci_dev *pdev)
7306 {
7307         struct net_device *dev = pci_get_drvdata(pdev);
7308         struct bnx2 *bp = netdev_priv(dev);
7309
7310         flush_scheduled_work();
7311
7312         unregister_netdev(dev);
7313
7314         if (bp->regview)
7315                 iounmap(bp->regview);
7316
7317         free_netdev(dev);
7318         pci_release_regions(pdev);
7319         pci_disable_device(pdev);
7320         pci_set_drvdata(pdev, NULL);
7321 }
7322
7323 static int
7324 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7325 {
7326         struct net_device *dev = pci_get_drvdata(pdev);
7327         struct bnx2 *bp = netdev_priv(dev);
7328         u32 reset_code;
7329
7330         /* PCI register 4 needs to be saved whether netif_running() or not.
7331          * MSI address and data need to be saved if using MSI and
7332          * netif_running().
7333          */
7334         pci_save_state(pdev);
7335         if (!netif_running(dev))
7336                 return 0;
7337
7338         flush_scheduled_work();
7339         bnx2_netif_stop(bp);
7340         netif_device_detach(dev);
7341         del_timer_sync(&bp->timer);
7342         if (bp->flags & NO_WOL_FLAG)
7343                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7344         else if (bp->wol)
7345                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7346         else
7347                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7348         bnx2_reset_chip(bp, reset_code);
7349         bnx2_free_skbs(bp);
7350         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7351         return 0;
7352 }
7353
7354 static int
7355 bnx2_resume(struct pci_dev *pdev)
7356 {
7357         struct net_device *dev = pci_get_drvdata(pdev);
7358         struct bnx2 *bp = netdev_priv(dev);
7359
7360         pci_restore_state(pdev);
7361         if (!netif_running(dev))
7362                 return 0;
7363
7364         bnx2_set_power_state(bp, PCI_D0);
7365         netif_device_attach(dev);
7366         bnx2_init_nic(bp);
7367         bnx2_netif_start(bp);
7368         return 0;
7369 }
7370
7371 static struct pci_driver bnx2_pci_driver = {
7372         .name           = DRV_MODULE_NAME,
7373         .id_table       = bnx2_pci_tbl,
7374         .probe          = bnx2_init_one,
7375         .remove         = __devexit_p(bnx2_remove_one),
7376         .suspend        = bnx2_suspend,
7377         .resume         = bnx2_resume,
7378 };
7379
7380 static int __init bnx2_init(void)
7381 {
7382         return pci_register_driver(&bnx2_pci_driver);
7383 }
7384
7385 static void __exit bnx2_cleanup(void)
7386 {
7387         pci_unregister_driver(&bnx2_pci_driver);
7388 }
7389
7390 module_init(bnx2_init);
7391 module_exit(bnx2_cleanup);
7392
7393
7394