]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/bnx2.c
affb67372e02f03e690e54469da7cc5f0303421e
[linux-2.6-omap-h63xx.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #include "bnx2.h"
13 #include "bnx2_fw.h"
14
15 #define DRV_MODULE_NAME         "bnx2"
16 #define PFX DRV_MODULE_NAME     ": "
17 #define DRV_MODULE_VERSION      "1.2.21"
18 #define DRV_MODULE_RELDATE      "September 7, 2005"
19
20 #define RUN_AT(x) (jiffies + (x))
21
22 /* Time in jiffies before concluding the transmitter is hung. */
23 #define TX_TIMEOUT  (5*HZ)
24
25 static char version[] __devinitdata =
26         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
27
28 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
29 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706 Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_MODULE_VERSION);
32
33 static int disable_msi = 0;
34
35 module_param(disable_msi, int, 0);
36 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
37
38 typedef enum {
39         BCM5706 = 0,
40         NC370T,
41         NC370I,
42         BCM5706S,
43         NC370F,
44         BCM5708,
45         BCM5708S,
46 } board_t;
47
48 /* indexed by board_t, above */
49 static struct {
50         char *name;
51 } board_info[] __devinitdata = {
52         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
53         { "HP NC370T Multifunction Gigabit Server Adapter" },
54         { "HP NC370i Multifunction Gigabit Server Adapter" },
55         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
56         { "HP NC370F Multifunction Gigabit Server Adapter" },
57         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
58         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
59         };
60
61 static struct pci_device_id bnx2_pci_tbl[] = {
62         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
63           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
64         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
65           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
66         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
67           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
68         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
69           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
70         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
71           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
72         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
73           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
74         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
75           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
76         { 0, }
77 };
78
79 static struct flash_spec flash_table[] =
80 {
81         /* Slow EEPROM */
82         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
83          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
84          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
85          "EEPROM - slow"},
86         /* Expansion entry 0001 */
87         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
88          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
89          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
90          "Entry 0001"},
91         /* Saifun SA25F010 (non-buffered flash) */
92         /* strap, cfg1, & write1 need updates */
93         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
94          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
95          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
96          "Non-buffered flash (128kB)"},
97         /* Saifun SA25F020 (non-buffered flash) */
98         /* strap, cfg1, & write1 need updates */
99         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
100          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
101          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
102          "Non-buffered flash (256kB)"},
103         /* Expansion entry 0100 */
104         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
105          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
106          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
107          "Entry 0100"},
108         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
109         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,        
110          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
111          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
112          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
113         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
114         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
115          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
116          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
117          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
118         /* Saifun SA25F005 (non-buffered flash) */
119         /* strap, cfg1, & write1 need updates */
120         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
121          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
122          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
123          "Non-buffered flash (64kB)"},
124         /* Fast EEPROM */
125         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
126          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128          "EEPROM - fast"},
129         /* Expansion entry 1001 */
130         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
131          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133          "Entry 1001"},
134         /* Expansion entry 1010 */
135         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138          "Entry 1010"},
139         /* ATMEL AT45DB011B (buffered flash) */
140         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
141          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
142          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
143          "Buffered flash (128kB)"},
144         /* Expansion entry 1100 */
145         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
146          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
148          "Entry 1100"},
149         /* Expansion entry 1101 */
150         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
151          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
153          "Entry 1101"},
154         /* Ateml Expansion entry 1110 */
155         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
156          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
157          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
158          "Entry 1110 (Atmel)"},
159         /* ATMEL AT45DB021B (buffered flash) */
160         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
161          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
162          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
163          "Buffered flash (256kB)"},
164 };
165
166 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
167
168 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
169 {
170         u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
171
172         if (diff > MAX_TX_DESC_CNT)
173                 diff = (diff & MAX_TX_DESC_CNT) - 1;
174         return (bp->tx_ring_size - diff);
175 }
176
177 static u32
178 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
179 {
180         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
181         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
182 }
183
184 static void
185 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
186 {
187         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
188         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
189 }
190
191 static void
192 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
193 {
194         offset += cid_addr;
195         REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
196         REG_WR(bp, BNX2_CTX_DATA, val);
197 }
198
199 static int
200 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
201 {
202         u32 val1;
203         int i, ret;
204
205         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
206                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
207                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
208
209                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
210                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
211
212                 udelay(40);
213         }
214
215         val1 = (bp->phy_addr << 21) | (reg << 16) |
216                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
217                 BNX2_EMAC_MDIO_COMM_START_BUSY;
218         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
219
220         for (i = 0; i < 50; i++) {
221                 udelay(10);
222
223                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
224                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
225                         udelay(5);
226
227                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
228                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
229
230                         break;
231                 }
232         }
233
234         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
235                 *val = 0x0;
236                 ret = -EBUSY;
237         }
238         else {
239                 *val = val1;
240                 ret = 0;
241         }
242
243         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
244                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
245                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
246
247                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
248                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
249
250                 udelay(40);
251         }
252
253         return ret;
254 }
255
256 static int
257 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
258 {
259         u32 val1;
260         int i, ret;
261
262         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
263                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
264                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
265
266                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
267                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
268
269                 udelay(40);
270         }
271
272         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
273                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
274                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
275         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
276     
277         for (i = 0; i < 50; i++) {
278                 udelay(10);
279
280                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
281                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
282                         udelay(5);
283                         break;
284                 }
285         }
286
287         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
288                 ret = -EBUSY;
289         else
290                 ret = 0;
291
292         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
293                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
294                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
295
296                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
297                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
298
299                 udelay(40);
300         }
301
302         return ret;
303 }
304
305 static void
306 bnx2_disable_int(struct bnx2 *bp)
307 {
308         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
309                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
310         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
311 }
312
313 static void
314 bnx2_enable_int(struct bnx2 *bp)
315 {
316         u32 val;
317
318         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
319                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
320
321         val = REG_RD(bp, BNX2_HC_COMMAND);
322         REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
323 }
324
325 static void
326 bnx2_disable_int_sync(struct bnx2 *bp)
327 {
328         atomic_inc(&bp->intr_sem);
329         bnx2_disable_int(bp);
330         synchronize_irq(bp->pdev->irq);
331 }
332
333 static void
334 bnx2_netif_stop(struct bnx2 *bp)
335 {
336         bnx2_disable_int_sync(bp);
337         if (netif_running(bp->dev)) {
338                 netif_poll_disable(bp->dev);
339                 netif_tx_disable(bp->dev);
340                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
341         }
342 }
343
344 static void
345 bnx2_netif_start(struct bnx2 *bp)
346 {
347         if (atomic_dec_and_test(&bp->intr_sem)) {
348                 if (netif_running(bp->dev)) {
349                         netif_wake_queue(bp->dev);
350                         netif_poll_enable(bp->dev);
351                         bnx2_enable_int(bp);
352                 }
353         }
354 }
355
356 static void
357 bnx2_free_mem(struct bnx2 *bp)
358 {
359         if (bp->stats_blk) {
360                 pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
361                                     bp->stats_blk, bp->stats_blk_mapping);
362                 bp->stats_blk = NULL;
363         }
364         if (bp->status_blk) {
365                 pci_free_consistent(bp->pdev, sizeof(struct status_block),
366                                     bp->status_blk, bp->status_blk_mapping);
367                 bp->status_blk = NULL;
368         }
369         if (bp->tx_desc_ring) {
370                 pci_free_consistent(bp->pdev,
371                                     sizeof(struct tx_bd) * TX_DESC_CNT,
372                                     bp->tx_desc_ring, bp->tx_desc_mapping);
373                 bp->tx_desc_ring = NULL;
374         }
375         kfree(bp->tx_buf_ring);
376         bp->tx_buf_ring = NULL;
377         if (bp->rx_desc_ring) {
378                 pci_free_consistent(bp->pdev,
379                                     sizeof(struct rx_bd) * RX_DESC_CNT,
380                                     bp->rx_desc_ring, bp->rx_desc_mapping);
381                 bp->rx_desc_ring = NULL;
382         }
383         kfree(bp->rx_buf_ring);
384         bp->rx_buf_ring = NULL;
385 }
386
387 static int
388 bnx2_alloc_mem(struct bnx2 *bp)
389 {
390         bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
391                                      GFP_KERNEL);
392         if (bp->tx_buf_ring == NULL)
393                 return -ENOMEM;
394
395         memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
396         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
397                                                 sizeof(struct tx_bd) *
398                                                 TX_DESC_CNT,
399                                                 &bp->tx_desc_mapping);
400         if (bp->tx_desc_ring == NULL)
401                 goto alloc_mem_err;
402
403         bp->rx_buf_ring = kmalloc(sizeof(struct sw_bd) * RX_DESC_CNT,
404                                      GFP_KERNEL);
405         if (bp->rx_buf_ring == NULL)
406                 goto alloc_mem_err;
407
408         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT);
409         bp->rx_desc_ring = pci_alloc_consistent(bp->pdev,
410                                                 sizeof(struct rx_bd) *
411                                                 RX_DESC_CNT,
412                                                 &bp->rx_desc_mapping);
413         if (bp->rx_desc_ring == NULL)
414                 goto alloc_mem_err;
415
416         bp->status_blk = pci_alloc_consistent(bp->pdev,
417                                               sizeof(struct status_block),
418                                               &bp->status_blk_mapping);
419         if (bp->status_blk == NULL)
420                 goto alloc_mem_err;
421
422         memset(bp->status_blk, 0, sizeof(struct status_block));
423
424         bp->stats_blk = pci_alloc_consistent(bp->pdev,
425                                              sizeof(struct statistics_block),
426                                              &bp->stats_blk_mapping);
427         if (bp->stats_blk == NULL)
428                 goto alloc_mem_err;
429
430         memset(bp->stats_blk, 0, sizeof(struct statistics_block));
431
432         return 0;
433
434 alloc_mem_err:
435         bnx2_free_mem(bp);
436         return -ENOMEM;
437 }
438
439 static void
440 bnx2_report_fw_link(struct bnx2 *bp)
441 {
442         u32 fw_link_status = 0;
443
444         if (bp->link_up) {
445                 u32 bmsr;
446
447                 switch (bp->line_speed) {
448                 case SPEED_10:
449                         if (bp->duplex == DUPLEX_HALF)
450                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
451                         else
452                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
453                         break;
454                 case SPEED_100:
455                         if (bp->duplex == DUPLEX_HALF)
456                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
457                         else
458                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
459                         break;
460                 case SPEED_1000:
461                         if (bp->duplex == DUPLEX_HALF)
462                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
463                         else
464                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
465                         break;
466                 case SPEED_2500:
467                         if (bp->duplex == DUPLEX_HALF)
468                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
469                         else
470                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
471                         break;
472                 }
473
474                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
475
476                 if (bp->autoneg) {
477                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
478
479                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
480                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
481
482                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
483                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
484                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
485                         else
486                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
487                 }
488         }
489         else
490                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
491
492         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
493 }
494
495 static void
496 bnx2_report_link(struct bnx2 *bp)
497 {
498         if (bp->link_up) {
499                 netif_carrier_on(bp->dev);
500                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
501
502                 printk("%d Mbps ", bp->line_speed);
503
504                 if (bp->duplex == DUPLEX_FULL)
505                         printk("full duplex");
506                 else
507                         printk("half duplex");
508
509                 if (bp->flow_ctrl) {
510                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
511                                 printk(", receive ");
512                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
513                                         printk("& transmit ");
514                         }
515                         else {
516                                 printk(", transmit ");
517                         }
518                         printk("flow control ON");
519                 }
520                 printk("\n");
521         }
522         else {
523                 netif_carrier_off(bp->dev);
524                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
525         }
526
527         bnx2_report_fw_link(bp);
528 }
529
530 static void
531 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
532 {
533         u32 local_adv, remote_adv;
534
535         bp->flow_ctrl = 0;
536         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != 
537                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
538
539                 if (bp->duplex == DUPLEX_FULL) {
540                         bp->flow_ctrl = bp->req_flow_ctrl;
541                 }
542                 return;
543         }
544
545         if (bp->duplex != DUPLEX_FULL) {
546                 return;
547         }
548
549         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
550             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
551                 u32 val;
552
553                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
554                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
555                         bp->flow_ctrl |= FLOW_CTRL_TX;
556                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
557                         bp->flow_ctrl |= FLOW_CTRL_RX;
558                 return;
559         }
560
561         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
562         bnx2_read_phy(bp, MII_LPA, &remote_adv);
563
564         if (bp->phy_flags & PHY_SERDES_FLAG) {
565                 u32 new_local_adv = 0;
566                 u32 new_remote_adv = 0;
567
568                 if (local_adv & ADVERTISE_1000XPAUSE)
569                         new_local_adv |= ADVERTISE_PAUSE_CAP;
570                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
571                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
572                 if (remote_adv & ADVERTISE_1000XPAUSE)
573                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
574                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
575                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
576
577                 local_adv = new_local_adv;
578                 remote_adv = new_remote_adv;
579         }
580
581         /* See Table 28B-3 of 802.3ab-1999 spec. */
582         if (local_adv & ADVERTISE_PAUSE_CAP) {
583                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
584                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
585                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
586                         }
587                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
588                                 bp->flow_ctrl = FLOW_CTRL_RX;
589                         }
590                 }
591                 else {
592                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
593                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
594                         }
595                 }
596         }
597         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
598                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
599                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
600
601                         bp->flow_ctrl = FLOW_CTRL_TX;
602                 }
603         }
604 }
605
606 static int
607 bnx2_5708s_linkup(struct bnx2 *bp)
608 {
609         u32 val;
610
611         bp->link_up = 1;
612         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
613         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
614                 case BCM5708S_1000X_STAT1_SPEED_10:
615                         bp->line_speed = SPEED_10;
616                         break;
617                 case BCM5708S_1000X_STAT1_SPEED_100:
618                         bp->line_speed = SPEED_100;
619                         break;
620                 case BCM5708S_1000X_STAT1_SPEED_1G:
621                         bp->line_speed = SPEED_1000;
622                         break;
623                 case BCM5708S_1000X_STAT1_SPEED_2G5:
624                         bp->line_speed = SPEED_2500;
625                         break;
626         }
627         if (val & BCM5708S_1000X_STAT1_FD)
628                 bp->duplex = DUPLEX_FULL;
629         else
630                 bp->duplex = DUPLEX_HALF;
631
632         return 0;
633 }
634
635 static int
636 bnx2_5706s_linkup(struct bnx2 *bp)
637 {
638         u32 bmcr, local_adv, remote_adv, common;
639
640         bp->link_up = 1;
641         bp->line_speed = SPEED_1000;
642
643         bnx2_read_phy(bp, MII_BMCR, &bmcr);
644         if (bmcr & BMCR_FULLDPLX) {
645                 bp->duplex = DUPLEX_FULL;
646         }
647         else {
648                 bp->duplex = DUPLEX_HALF;
649         }
650
651         if (!(bmcr & BMCR_ANENABLE)) {
652                 return 0;
653         }
654
655         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
656         bnx2_read_phy(bp, MII_LPA, &remote_adv);
657
658         common = local_adv & remote_adv;
659         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
660
661                 if (common & ADVERTISE_1000XFULL) {
662                         bp->duplex = DUPLEX_FULL;
663                 }
664                 else {
665                         bp->duplex = DUPLEX_HALF;
666                 }
667         }
668
669         return 0;
670 }
671
672 static int
673 bnx2_copper_linkup(struct bnx2 *bp)
674 {
675         u32 bmcr;
676
677         bnx2_read_phy(bp, MII_BMCR, &bmcr);
678         if (bmcr & BMCR_ANENABLE) {
679                 u32 local_adv, remote_adv, common;
680
681                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
682                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
683
684                 common = local_adv & (remote_adv >> 2);
685                 if (common & ADVERTISE_1000FULL) {
686                         bp->line_speed = SPEED_1000;
687                         bp->duplex = DUPLEX_FULL;
688                 }
689                 else if (common & ADVERTISE_1000HALF) {
690                         bp->line_speed = SPEED_1000;
691                         bp->duplex = DUPLEX_HALF;
692                 }
693                 else {
694                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
695                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
696
697                         common = local_adv & remote_adv;
698                         if (common & ADVERTISE_100FULL) {
699                                 bp->line_speed = SPEED_100;
700                                 bp->duplex = DUPLEX_FULL;
701                         }
702                         else if (common & ADVERTISE_100HALF) {
703                                 bp->line_speed = SPEED_100;
704                                 bp->duplex = DUPLEX_HALF;
705                         }
706                         else if (common & ADVERTISE_10FULL) {
707                                 bp->line_speed = SPEED_10;
708                                 bp->duplex = DUPLEX_FULL;
709                         }
710                         else if (common & ADVERTISE_10HALF) {
711                                 bp->line_speed = SPEED_10;
712                                 bp->duplex = DUPLEX_HALF;
713                         }
714                         else {
715                                 bp->line_speed = 0;
716                                 bp->link_up = 0;
717                         }
718                 }
719         }
720         else {
721                 if (bmcr & BMCR_SPEED100) {
722                         bp->line_speed = SPEED_100;
723                 }
724                 else {
725                         bp->line_speed = SPEED_10;
726                 }
727                 if (bmcr & BMCR_FULLDPLX) {
728                         bp->duplex = DUPLEX_FULL;
729                 }
730                 else {
731                         bp->duplex = DUPLEX_HALF;
732                 }
733         }
734
735         return 0;
736 }
737
738 static int
739 bnx2_set_mac_link(struct bnx2 *bp)
740 {
741         u32 val;
742
743         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
744         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
745                 (bp->duplex == DUPLEX_HALF)) {
746                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
747         }
748
749         /* Configure the EMAC mode register. */
750         val = REG_RD(bp, BNX2_EMAC_MODE);
751
752         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
753                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
754                 BNX2_EMAC_MODE_25G);
755
756         if (bp->link_up) {
757                 switch (bp->line_speed) {
758                         case SPEED_10:
759                                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
760                                         val |= BNX2_EMAC_MODE_PORT_MII_10;
761                                         break;
762                                 }
763                                 /* fall through */
764                         case SPEED_100:
765                                 val |= BNX2_EMAC_MODE_PORT_MII;
766                                 break;
767                         case SPEED_2500:
768                                 val |= BNX2_EMAC_MODE_25G;
769                                 /* fall through */
770                         case SPEED_1000:
771                                 val |= BNX2_EMAC_MODE_PORT_GMII;
772                                 break;
773                 }
774         }
775         else {
776                 val |= BNX2_EMAC_MODE_PORT_GMII;
777         }
778
779         /* Set the MAC to operate in the appropriate duplex mode. */
780         if (bp->duplex == DUPLEX_HALF)
781                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
782         REG_WR(bp, BNX2_EMAC_MODE, val);
783
784         /* Enable/disable rx PAUSE. */
785         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
786
787         if (bp->flow_ctrl & FLOW_CTRL_RX)
788                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
789         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
790
791         /* Enable/disable tx PAUSE. */
792         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
793         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
794
795         if (bp->flow_ctrl & FLOW_CTRL_TX)
796                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
797         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
798
799         /* Acknowledge the interrupt. */
800         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
801
802         return 0;
803 }
804
805 static int
806 bnx2_set_link(struct bnx2 *bp)
807 {
808         u32 bmsr;
809         u8 link_up;
810
811         if (bp->loopback == MAC_LOOPBACK) {
812                 bp->link_up = 1;
813                 return 0;
814         }
815
816         link_up = bp->link_up;
817
818         bnx2_read_phy(bp, MII_BMSR, &bmsr);
819         bnx2_read_phy(bp, MII_BMSR, &bmsr);
820
821         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
822             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
823                 u32 val;
824
825                 val = REG_RD(bp, BNX2_EMAC_STATUS);
826                 if (val & BNX2_EMAC_STATUS_LINK)
827                         bmsr |= BMSR_LSTATUS;
828                 else
829                         bmsr &= ~BMSR_LSTATUS;
830         }
831
832         if (bmsr & BMSR_LSTATUS) {
833                 bp->link_up = 1;
834
835                 if (bp->phy_flags & PHY_SERDES_FLAG) {
836                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
837                                 bnx2_5706s_linkup(bp);
838                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
839                                 bnx2_5708s_linkup(bp);
840                 }
841                 else {
842                         bnx2_copper_linkup(bp);
843                 }
844                 bnx2_resolve_flow_ctrl(bp);
845         }
846         else {
847                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
848                         (bp->autoneg & AUTONEG_SPEED)) {
849
850                         u32 bmcr;
851
852                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
853                         if (!(bmcr & BMCR_ANENABLE)) {
854                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
855                                         BMCR_ANENABLE);
856                         }
857                 }
858                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
859                 bp->link_up = 0;
860         }
861
862         if (bp->link_up != link_up) {
863                 bnx2_report_link(bp);
864         }
865
866         bnx2_set_mac_link(bp);
867
868         return 0;
869 }
870
871 static int
872 bnx2_reset_phy(struct bnx2 *bp)
873 {
874         int i;
875         u32 reg;
876
877         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
878
879 #define PHY_RESET_MAX_WAIT 100
880         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
881                 udelay(10);
882
883                 bnx2_read_phy(bp, MII_BMCR, &reg);
884                 if (!(reg & BMCR_RESET)) {
885                         udelay(20);
886                         break;
887                 }
888         }
889         if (i == PHY_RESET_MAX_WAIT) {
890                 return -EBUSY;
891         }
892         return 0;
893 }
894
895 static u32
896 bnx2_phy_get_pause_adv(struct bnx2 *bp)
897 {
898         u32 adv = 0;
899
900         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
901                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
902
903                 if (bp->phy_flags & PHY_SERDES_FLAG) {
904                         adv = ADVERTISE_1000XPAUSE;
905                 }
906                 else {
907                         adv = ADVERTISE_PAUSE_CAP;
908                 }
909         }
910         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
911                 if (bp->phy_flags & PHY_SERDES_FLAG) {
912                         adv = ADVERTISE_1000XPSE_ASYM;
913                 }
914                 else {
915                         adv = ADVERTISE_PAUSE_ASYM;
916                 }
917         }
918         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
919                 if (bp->phy_flags & PHY_SERDES_FLAG) {
920                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
921                 }
922                 else {
923                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
924                 }
925         }
926         return adv;
927 }
928
929 static int
930 bnx2_setup_serdes_phy(struct bnx2 *bp)
931 {
932         u32 adv, bmcr, up1;
933         u32 new_adv = 0;
934
935         if (!(bp->autoneg & AUTONEG_SPEED)) {
936                 u32 new_bmcr;
937                 int force_link_down = 0;
938
939                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
940                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
941                         if (up1 & BCM5708S_UP1_2G5) {
942                                 up1 &= ~BCM5708S_UP1_2G5;
943                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
944                                 force_link_down = 1;
945                         }
946                 }
947
948                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
949                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
950
951                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
952                 new_bmcr = bmcr & ~BMCR_ANENABLE;
953                 new_bmcr |= BMCR_SPEED1000;
954                 if (bp->req_duplex == DUPLEX_FULL) {
955                         adv |= ADVERTISE_1000XFULL;
956                         new_bmcr |= BMCR_FULLDPLX;
957                 }
958                 else {
959                         adv |= ADVERTISE_1000XHALF;
960                         new_bmcr &= ~BMCR_FULLDPLX;
961                 }
962                 if ((new_bmcr != bmcr) || (force_link_down)) {
963                         /* Force a link down visible on the other side */
964                         if (bp->link_up) {
965                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
966                                                ~(ADVERTISE_1000XFULL |
967                                                  ADVERTISE_1000XHALF));
968                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
969                                         BMCR_ANRESTART | BMCR_ANENABLE);
970
971                                 bp->link_up = 0;
972                                 netif_carrier_off(bp->dev);
973                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
974                         }
975                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
976                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
977                 }
978                 return 0;
979         }
980
981         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
982                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
983                 up1 |= BCM5708S_UP1_2G5;
984                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
985         }
986
987         if (bp->advertising & ADVERTISED_1000baseT_Full)
988                 new_adv |= ADVERTISE_1000XFULL;
989
990         new_adv |= bnx2_phy_get_pause_adv(bp);
991
992         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
993         bnx2_read_phy(bp, MII_BMCR, &bmcr);
994
995         bp->serdes_an_pending = 0;
996         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
997                 /* Force a link down visible on the other side */
998                 if (bp->link_up) {
999                         int i;
1000
1001                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1002                         for (i = 0; i < 110; i++) {
1003                                 udelay(100);
1004                         }
1005                 }
1006
1007                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1008                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1009                         BMCR_ANENABLE);
1010                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1011                         /* Speed up link-up time when the link partner
1012                          * does not autonegotiate which is very common
1013                          * in blade servers. Some blade servers use
1014                          * IPMI for kerboard input and it's important
1015                          * to minimize link disruptions. Autoneg. involves
1016                          * exchanging base pages plus 3 next pages and
1017                          * normally completes in about 120 msec.
1018                          */
1019                         bp->current_interval = SERDES_AN_TIMEOUT;
1020                         bp->serdes_an_pending = 1;
1021                         mod_timer(&bp->timer, jiffies + bp->current_interval);
1022                 }
1023         }
1024
1025         return 0;
1026 }
1027
1028 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1029         (ADVERTISED_1000baseT_Full)
1030
1031 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1032         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1033         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1034         ADVERTISED_1000baseT_Full)
1035
1036 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1037         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1038         
1039 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1040
1041 static int
1042 bnx2_setup_copper_phy(struct bnx2 *bp)
1043 {
1044         u32 bmcr;
1045         u32 new_bmcr;
1046
1047         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1048
1049         if (bp->autoneg & AUTONEG_SPEED) {
1050                 u32 adv_reg, adv1000_reg;
1051                 u32 new_adv_reg = 0;
1052                 u32 new_adv1000_reg = 0;
1053
1054                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1055                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1056                         ADVERTISE_PAUSE_ASYM);
1057
1058                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1059                 adv1000_reg &= PHY_ALL_1000_SPEED;
1060
1061                 if (bp->advertising & ADVERTISED_10baseT_Half)
1062                         new_adv_reg |= ADVERTISE_10HALF;
1063                 if (bp->advertising & ADVERTISED_10baseT_Full)
1064                         new_adv_reg |= ADVERTISE_10FULL;
1065                 if (bp->advertising & ADVERTISED_100baseT_Half)
1066                         new_adv_reg |= ADVERTISE_100HALF;
1067                 if (bp->advertising & ADVERTISED_100baseT_Full)
1068                         new_adv_reg |= ADVERTISE_100FULL;
1069                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1070                         new_adv1000_reg |= ADVERTISE_1000FULL;
1071                 
1072                 new_adv_reg |= ADVERTISE_CSMA;
1073
1074                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1075
1076                 if ((adv1000_reg != new_adv1000_reg) ||
1077                         (adv_reg != new_adv_reg) ||
1078                         ((bmcr & BMCR_ANENABLE) == 0)) {
1079
1080                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1081                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1082                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1083                                 BMCR_ANENABLE);
1084                 }
1085                 else if (bp->link_up) {
1086                         /* Flow ctrl may have changed from auto to forced */
1087                         /* or vice-versa. */
1088
1089                         bnx2_resolve_flow_ctrl(bp);
1090                         bnx2_set_mac_link(bp);
1091                 }
1092                 return 0;
1093         }
1094
1095         new_bmcr = 0;
1096         if (bp->req_line_speed == SPEED_100) {
1097                 new_bmcr |= BMCR_SPEED100;
1098         }
1099         if (bp->req_duplex == DUPLEX_FULL) {
1100                 new_bmcr |= BMCR_FULLDPLX;
1101         }
1102         if (new_bmcr != bmcr) {
1103                 u32 bmsr;
1104                 int i = 0;
1105
1106                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1107                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1108                 
1109                 if (bmsr & BMSR_LSTATUS) {
1110                         /* Force link down */
1111                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1112                         do {
1113                                 udelay(100);
1114                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1115                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1116                                 i++;
1117                         } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1118                 }
1119
1120                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1121
1122                 /* Normally, the new speed is setup after the link has
1123                  * gone down and up again. In some cases, link will not go
1124                  * down so we need to set up the new speed here.
1125                  */
1126                 if (bmsr & BMSR_LSTATUS) {
1127                         bp->line_speed = bp->req_line_speed;
1128                         bp->duplex = bp->req_duplex;
1129                         bnx2_resolve_flow_ctrl(bp);
1130                         bnx2_set_mac_link(bp);
1131                 }
1132         }
1133         return 0;
1134 }
1135
1136 static int
1137 bnx2_setup_phy(struct bnx2 *bp)
1138 {
1139         if (bp->loopback == MAC_LOOPBACK)
1140                 return 0;
1141
1142         if (bp->phy_flags & PHY_SERDES_FLAG) {
1143                 return (bnx2_setup_serdes_phy(bp));
1144         }
1145         else {
1146                 return (bnx2_setup_copper_phy(bp));
1147         }
1148 }
1149
1150 static int
1151 bnx2_init_5708s_phy(struct bnx2 *bp)
1152 {
1153         u32 val;
1154
1155         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1156         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1157         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1158
1159         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1160         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1161         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1162
1163         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1164         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1165         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1166
1167         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1168                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1169                 val |= BCM5708S_UP1_2G5;
1170                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1171         }
1172
1173         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1174             (CHIP_ID(bp) == CHIP_ID_5708_B0)) {
1175                 /* increase tx signal amplitude */
1176                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1177                                BCM5708S_BLK_ADDR_TX_MISC);
1178                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1179                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1180                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1181                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1182         }
1183
1184         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1185               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1186
1187         if (val) {
1188                 u32 is_backplane;
1189
1190                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1191                                           BNX2_SHARED_HW_CFG_CONFIG);
1192                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1193                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1194                                        BCM5708S_BLK_ADDR_TX_MISC);
1195                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1196                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1197                                        BCM5708S_BLK_ADDR_DIG);
1198                 }
1199         }
1200         return 0;
1201 }
1202
1203 static int
1204 bnx2_init_5706s_phy(struct bnx2 *bp)
1205 {
1206         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1207
1208         if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1209                 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1210         }
1211
1212         if (bp->dev->mtu > 1500) {
1213                 u32 val;
1214
1215                 /* Set extended packet length bit */
1216                 bnx2_write_phy(bp, 0x18, 0x7);
1217                 bnx2_read_phy(bp, 0x18, &val);
1218                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1219
1220                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1221                 bnx2_read_phy(bp, 0x1c, &val);
1222                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1223         }
1224         else {
1225                 u32 val;
1226
1227                 bnx2_write_phy(bp, 0x18, 0x7);
1228                 bnx2_read_phy(bp, 0x18, &val);
1229                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1230
1231                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1232                 bnx2_read_phy(bp, 0x1c, &val);
1233                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1234         }
1235
1236         return 0;
1237 }
1238
1239 static int
1240 bnx2_init_copper_phy(struct bnx2 *bp)
1241 {
1242         u32 val;
1243
1244         bp->phy_flags |= PHY_CRC_FIX_FLAG;
1245
1246         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1247                 bnx2_write_phy(bp, 0x18, 0x0c00);
1248                 bnx2_write_phy(bp, 0x17, 0x000a);
1249                 bnx2_write_phy(bp, 0x15, 0x310b);
1250                 bnx2_write_phy(bp, 0x17, 0x201f);
1251                 bnx2_write_phy(bp, 0x15, 0x9506);
1252                 bnx2_write_phy(bp, 0x17, 0x401f);
1253                 bnx2_write_phy(bp, 0x15, 0x14e2);
1254                 bnx2_write_phy(bp, 0x18, 0x0400);
1255         }
1256
1257         if (bp->dev->mtu > 1500) {
1258                 /* Set extended packet length bit */
1259                 bnx2_write_phy(bp, 0x18, 0x7);
1260                 bnx2_read_phy(bp, 0x18, &val);
1261                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1262
1263                 bnx2_read_phy(bp, 0x10, &val);
1264                 bnx2_write_phy(bp, 0x10, val | 0x1);
1265         }
1266         else {
1267                 bnx2_write_phy(bp, 0x18, 0x7);
1268                 bnx2_read_phy(bp, 0x18, &val);
1269                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1270
1271                 bnx2_read_phy(bp, 0x10, &val);
1272                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1273         }
1274
1275         /* ethernet@wirespeed */
1276         bnx2_write_phy(bp, 0x18, 0x7007);
1277         bnx2_read_phy(bp, 0x18, &val);
1278         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1279         return 0;
1280 }
1281
1282
1283 static int
1284 bnx2_init_phy(struct bnx2 *bp)
1285 {
1286         u32 val;
1287         int rc = 0;
1288
1289         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1290         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1291
1292         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1293
1294         bnx2_reset_phy(bp);
1295
1296         bnx2_read_phy(bp, MII_PHYSID1, &val);
1297         bp->phy_id = val << 16;
1298         bnx2_read_phy(bp, MII_PHYSID2, &val);
1299         bp->phy_id |= val & 0xffff;
1300
1301         if (bp->phy_flags & PHY_SERDES_FLAG) {
1302                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1303                         rc = bnx2_init_5706s_phy(bp);
1304                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1305                         rc = bnx2_init_5708s_phy(bp);
1306         }
1307         else {
1308                 rc = bnx2_init_copper_phy(bp);
1309         }
1310
1311         bnx2_setup_phy(bp);
1312
1313         return rc;
1314 }
1315
1316 static int
1317 bnx2_set_mac_loopback(struct bnx2 *bp)
1318 {
1319         u32 mac_mode;
1320
1321         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1322         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1323         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1324         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1325         bp->link_up = 1;
1326         return 0;
1327 }
1328
1329 static int
1330 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data)
1331 {
1332         int i;
1333         u32 val;
1334
1335         if (bp->fw_timed_out)
1336                 return -EBUSY;
1337
1338         bp->fw_wr_seq++;
1339         msg_data |= bp->fw_wr_seq;
1340
1341         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1342
1343         /* wait for an acknowledgement. */
1344         for (i = 0; i < (FW_ACK_TIME_OUT_MS * 1000)/5; i++) {
1345                 udelay(5);
1346
1347                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1348
1349                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1350                         break;
1351         }
1352
1353         /* If we timed out, inform the firmware that this is the case. */
1354         if (((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) &&
1355                 ((msg_data & BNX2_DRV_MSG_DATA) != BNX2_DRV_MSG_DATA_WAIT0)) {
1356
1357                 msg_data &= ~BNX2_DRV_MSG_CODE;
1358                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1359
1360                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1361
1362                 bp->fw_timed_out = 1;
1363
1364                 return -EBUSY;
1365         }
1366
1367         return 0;
1368 }
1369
1370 static void
1371 bnx2_init_context(struct bnx2 *bp)
1372 {
1373         u32 vcid;
1374
1375         vcid = 96;
1376         while (vcid) {
1377                 u32 vcid_addr, pcid_addr, offset;
1378
1379                 vcid--;
1380
1381                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1382                         u32 new_vcid;
1383
1384                         vcid_addr = GET_PCID_ADDR(vcid);
1385                         if (vcid & 0x8) {
1386                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1387                         }
1388                         else {
1389                                 new_vcid = vcid;
1390                         }
1391                         pcid_addr = GET_PCID_ADDR(new_vcid);
1392                 }
1393                 else {
1394                         vcid_addr = GET_CID_ADDR(vcid);
1395                         pcid_addr = vcid_addr;
1396                 }
1397
1398                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1399                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1400
1401                 /* Zero out the context. */
1402                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1403                         CTX_WR(bp, 0x00, offset, 0);
1404                 }
1405
1406                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1407                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1408         }
1409 }
1410
1411 static int
1412 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1413 {
1414         u16 *good_mbuf;
1415         u32 good_mbuf_cnt;
1416         u32 val;
1417
1418         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1419         if (good_mbuf == NULL) {
1420                 printk(KERN_ERR PFX "Failed to allocate memory in "
1421                                     "bnx2_alloc_bad_rbuf\n");
1422                 return -ENOMEM;
1423         }
1424
1425         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1426                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1427
1428         good_mbuf_cnt = 0;
1429
1430         /* Allocate a bunch of mbufs and save the good ones in an array. */
1431         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1432         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1433                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1434
1435                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1436
1437                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1438
1439                 /* The addresses with Bit 9 set are bad memory blocks. */
1440                 if (!(val & (1 << 9))) {
1441                         good_mbuf[good_mbuf_cnt] = (u16) val;
1442                         good_mbuf_cnt++;
1443                 }
1444
1445                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1446         }
1447
1448         /* Free the good ones back to the mbuf pool thus discarding
1449          * all the bad ones. */
1450         while (good_mbuf_cnt) {
1451                 good_mbuf_cnt--;
1452
1453                 val = good_mbuf[good_mbuf_cnt];
1454                 val = (val << 9) | val | 1;
1455
1456                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1457         }
1458         kfree(good_mbuf);
1459         return 0;
1460 }
1461
1462 static void
1463 bnx2_set_mac_addr(struct bnx2 *bp) 
1464 {
1465         u32 val;
1466         u8 *mac_addr = bp->dev->dev_addr;
1467
1468         val = (mac_addr[0] << 8) | mac_addr[1];
1469
1470         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1471
1472         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 
1473                 (mac_addr[4] << 8) | mac_addr[5];
1474
1475         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1476 }
1477
1478 static inline int
1479 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1480 {
1481         struct sk_buff *skb;
1482         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1483         dma_addr_t mapping;
1484         struct rx_bd *rxbd = &bp->rx_desc_ring[index];
1485         unsigned long align;
1486
1487         skb = dev_alloc_skb(bp->rx_buf_size);
1488         if (skb == NULL) {
1489                 return -ENOMEM;
1490         }
1491
1492         if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1493                 skb_reserve(skb, 8 - align);
1494         }
1495
1496         skb->dev = bp->dev;
1497         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1498                 PCI_DMA_FROMDEVICE);
1499
1500         rx_buf->skb = skb;
1501         pci_unmap_addr_set(rx_buf, mapping, mapping);
1502
1503         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1504         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1505
1506         bp->rx_prod_bseq += bp->rx_buf_use_size;
1507
1508         return 0;
1509 }
1510
1511 static void
1512 bnx2_phy_int(struct bnx2 *bp)
1513 {
1514         u32 new_link_state, old_link_state;
1515
1516         new_link_state = bp->status_blk->status_attn_bits &
1517                 STATUS_ATTN_BITS_LINK_STATE;
1518         old_link_state = bp->status_blk->status_attn_bits_ack &
1519                 STATUS_ATTN_BITS_LINK_STATE;
1520         if (new_link_state != old_link_state) {
1521                 if (new_link_state) {
1522                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1523                                 STATUS_ATTN_BITS_LINK_STATE);
1524                 }
1525                 else {
1526                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1527                                 STATUS_ATTN_BITS_LINK_STATE);
1528                 }
1529                 bnx2_set_link(bp);
1530         }
1531 }
1532
1533 static void
1534 bnx2_tx_int(struct bnx2 *bp)
1535 {
1536         u16 hw_cons, sw_cons, sw_ring_cons;
1537         int tx_free_bd = 0;
1538
1539         hw_cons = bp->status_blk->status_tx_quick_consumer_index0;
1540         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1541                 hw_cons++;
1542         }
1543         sw_cons = bp->tx_cons;
1544
1545         while (sw_cons != hw_cons) {
1546                 struct sw_bd *tx_buf;
1547                 struct sk_buff *skb;
1548                 int i, last;
1549
1550                 sw_ring_cons = TX_RING_IDX(sw_cons);
1551
1552                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1553                 skb = tx_buf->skb;
1554 #ifdef BCM_TSO 
1555                 /* partial BD completions possible with TSO packets */
1556                 if (skb_shinfo(skb)->tso_size) {
1557                         u16 last_idx, last_ring_idx;
1558
1559                         last_idx = sw_cons +
1560                                 skb_shinfo(skb)->nr_frags + 1;
1561                         last_ring_idx = sw_ring_cons +
1562                                 skb_shinfo(skb)->nr_frags + 1;
1563                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1564                                 last_idx++;
1565                         }
1566                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1567                                 break;
1568                         }
1569                 }
1570 #endif
1571                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1572                         skb_headlen(skb), PCI_DMA_TODEVICE);
1573
1574                 tx_buf->skb = NULL;
1575                 last = skb_shinfo(skb)->nr_frags;
1576
1577                 for (i = 0; i < last; i++) {
1578                         sw_cons = NEXT_TX_BD(sw_cons);
1579
1580                         pci_unmap_page(bp->pdev,
1581                                 pci_unmap_addr(
1582                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1583                                         mapping),
1584                                 skb_shinfo(skb)->frags[i].size,
1585                                 PCI_DMA_TODEVICE);
1586                 }
1587
1588                 sw_cons = NEXT_TX_BD(sw_cons);
1589
1590                 tx_free_bd += last + 1;
1591
1592                 dev_kfree_skb_irq(skb);
1593
1594                 hw_cons = bp->status_blk->status_tx_quick_consumer_index0;
1595                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1596                         hw_cons++;
1597                 }
1598         }
1599
1600         bp->tx_cons = sw_cons;
1601
1602         if (unlikely(netif_queue_stopped(bp->dev))) {
1603                 spin_lock(&bp->tx_lock);
1604                 if ((netif_queue_stopped(bp->dev)) &&
1605                     (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1606
1607                         netif_wake_queue(bp->dev);
1608                 }
1609                 spin_unlock(&bp->tx_lock);
1610         }
1611 }
1612
1613 static inline void
1614 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1615         u16 cons, u16 prod)
1616 {
1617         struct sw_bd *cons_rx_buf = &bp->rx_buf_ring[cons];
1618         struct sw_bd *prod_rx_buf = &bp->rx_buf_ring[prod];
1619         struct rx_bd *cons_bd = &bp->rx_desc_ring[cons];
1620         struct rx_bd *prod_bd = &bp->rx_desc_ring[prod];
1621
1622         pci_dma_sync_single_for_device(bp->pdev,
1623                 pci_unmap_addr(cons_rx_buf, mapping),
1624                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1625
1626         prod_rx_buf->skb = cons_rx_buf->skb;
1627         pci_unmap_addr_set(prod_rx_buf, mapping,
1628                         pci_unmap_addr(cons_rx_buf, mapping));
1629
1630         memcpy(prod_bd, cons_bd, 8);
1631
1632         bp->rx_prod_bseq += bp->rx_buf_use_size;
1633
1634 }
1635
1636 static int
1637 bnx2_rx_int(struct bnx2 *bp, int budget)
1638 {
1639         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1640         struct l2_fhdr *rx_hdr;
1641         int rx_pkt = 0;
1642
1643         hw_cons = bp->status_blk->status_rx_quick_consumer_index0;
1644         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1645                 hw_cons++;
1646         }
1647         sw_cons = bp->rx_cons;
1648         sw_prod = bp->rx_prod;
1649
1650         /* Memory barrier necessary as speculative reads of the rx
1651          * buffer can be ahead of the index in the status block
1652          */
1653         rmb();
1654         while (sw_cons != hw_cons) {
1655                 unsigned int len;
1656                 u16 status;
1657                 struct sw_bd *rx_buf;
1658                 struct sk_buff *skb;
1659
1660                 sw_ring_cons = RX_RING_IDX(sw_cons);
1661                 sw_ring_prod = RX_RING_IDX(sw_prod);
1662
1663                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1664                 skb = rx_buf->skb;
1665                 pci_dma_sync_single_for_cpu(bp->pdev,
1666                         pci_unmap_addr(rx_buf, mapping),
1667                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1668
1669                 rx_hdr = (struct l2_fhdr *) skb->data;
1670                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1671
1672                 if (rx_hdr->l2_fhdr_errors &
1673                         (L2_FHDR_ERRORS_BAD_CRC |
1674                         L2_FHDR_ERRORS_PHY_DECODE |
1675                         L2_FHDR_ERRORS_ALIGNMENT |
1676                         L2_FHDR_ERRORS_TOO_SHORT |
1677                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1678
1679                         goto reuse_rx;
1680                 }
1681
1682                 /* Since we don't have a jumbo ring, copy small packets
1683                  * if mtu > 1500
1684                  */
1685                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1686                         struct sk_buff *new_skb;
1687
1688                         new_skb = dev_alloc_skb(len + 2);
1689                         if (new_skb == NULL)
1690                                 goto reuse_rx;
1691
1692                         /* aligned copy */
1693                         memcpy(new_skb->data,
1694                                 skb->data + bp->rx_offset - 2,
1695                                 len + 2);
1696
1697                         skb_reserve(new_skb, 2);
1698                         skb_put(new_skb, len);
1699                         new_skb->dev = bp->dev;
1700
1701                         bnx2_reuse_rx_skb(bp, skb,
1702                                 sw_ring_cons, sw_ring_prod);
1703
1704                         skb = new_skb;
1705                 }
1706                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1707                         pci_unmap_single(bp->pdev,
1708                                 pci_unmap_addr(rx_buf, mapping),
1709                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1710
1711                         skb_reserve(skb, bp->rx_offset);
1712                         skb_put(skb, len);
1713                 }
1714                 else {
1715 reuse_rx:
1716                         bnx2_reuse_rx_skb(bp, skb,
1717                                 sw_ring_cons, sw_ring_prod);
1718                         goto next_rx;
1719                 }
1720
1721                 skb->protocol = eth_type_trans(skb, bp->dev);
1722
1723                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1724                         (htons(skb->protocol) != 0x8100)) {
1725
1726                         dev_kfree_skb_irq(skb);
1727                         goto next_rx;
1728
1729                 }
1730
1731                 status = rx_hdr->l2_fhdr_status;
1732                 skb->ip_summed = CHECKSUM_NONE;
1733                 if (bp->rx_csum &&
1734                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1735                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1736
1737                         u16 cksum = rx_hdr->l2_fhdr_tcp_udp_xsum;
1738
1739                         if (cksum == 0xffff)
1740                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1741                 }
1742
1743 #ifdef BCM_VLAN
1744                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1745                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1746                                 rx_hdr->l2_fhdr_vlan_tag);
1747                 }
1748                 else
1749 #endif
1750                         netif_receive_skb(skb);
1751
1752                 bp->dev->last_rx = jiffies;
1753                 rx_pkt++;
1754
1755 next_rx:
1756                 rx_buf->skb = NULL;
1757
1758                 sw_cons = NEXT_RX_BD(sw_cons);
1759                 sw_prod = NEXT_RX_BD(sw_prod);
1760
1761                 if ((rx_pkt == budget))
1762                         break;
1763         }
1764         bp->rx_cons = sw_cons;
1765         bp->rx_prod = sw_prod;
1766
1767         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1768
1769         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1770
1771         mmiowb();
1772
1773         return rx_pkt;
1774
1775 }
1776
1777 /* MSI ISR - The only difference between this and the INTx ISR
1778  * is that the MSI interrupt is always serviced.
1779  */
1780 static irqreturn_t
1781 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1782 {
1783         struct net_device *dev = dev_instance;
1784         struct bnx2 *bp = dev->priv;
1785
1786         prefetch(bp->status_blk);
1787         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1788                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1789                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1790
1791         /* Return here if interrupt is disabled. */
1792         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1793                 return IRQ_HANDLED;
1794
1795         netif_rx_schedule(dev);
1796
1797         return IRQ_HANDLED;
1798 }
1799
1800 static irqreturn_t
1801 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1802 {
1803         struct net_device *dev = dev_instance;
1804         struct bnx2 *bp = dev->priv;
1805
1806         /* When using INTx, it is possible for the interrupt to arrive
1807          * at the CPU before the status block posted prior to the
1808          * interrupt. Reading a register will flush the status block.
1809          * When using MSI, the MSI message will always complete after
1810          * the status block write.
1811          */
1812         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1813             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1814              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1815                 return IRQ_NONE;
1816
1817         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1818                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1819                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1820
1821         /* Return here if interrupt is shared and is disabled. */
1822         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1823                 return IRQ_HANDLED;
1824
1825         netif_rx_schedule(dev);
1826
1827         return IRQ_HANDLED;
1828 }
1829
1830 static int
1831 bnx2_poll(struct net_device *dev, int *budget)
1832 {
1833         struct bnx2 *bp = dev->priv;
1834         int rx_done = 1;
1835
1836         bp->last_status_idx = bp->status_blk->status_idx;
1837
1838         rmb();
1839         if ((bp->status_blk->status_attn_bits &
1840                 STATUS_ATTN_BITS_LINK_STATE) !=
1841                 (bp->status_blk->status_attn_bits_ack &
1842                 STATUS_ATTN_BITS_LINK_STATE)) {
1843
1844                 spin_lock(&bp->phy_lock);
1845                 bnx2_phy_int(bp);
1846                 spin_unlock(&bp->phy_lock);
1847         }
1848
1849         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_cons) {
1850                 bnx2_tx_int(bp);
1851         }
1852
1853         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->rx_cons) {
1854                 int orig_budget = *budget;
1855                 int work_done;
1856
1857                 if (orig_budget > dev->quota)
1858                         orig_budget = dev->quota;
1859                 
1860                 work_done = bnx2_rx_int(bp, orig_budget);
1861                 *budget -= work_done;
1862                 dev->quota -= work_done;
1863                 
1864                 if (work_done >= orig_budget) {
1865                         rx_done = 0;
1866                 }
1867         }
1868         
1869         if (rx_done) {
1870                 netif_rx_complete(dev);
1871                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1872                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1873                         bp->last_status_idx);
1874                 return 0;
1875         }
1876
1877         return 1;
1878 }
1879
1880 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
1881  * from set_multicast.
1882  */
1883 static void
1884 bnx2_set_rx_mode(struct net_device *dev)
1885 {
1886         struct bnx2 *bp = dev->priv;
1887         u32 rx_mode, sort_mode;
1888         int i;
1889
1890         spin_lock_bh(&bp->phy_lock);
1891
1892         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1893                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
1894         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
1895 #ifdef BCM_VLAN
1896         if (!bp->vlgrp) {
1897                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1898         }
1899 #else
1900         rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1901 #endif
1902         if (dev->flags & IFF_PROMISC) {
1903                 /* Promiscuous mode. */
1904                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
1905                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
1906         }
1907         else if (dev->flags & IFF_ALLMULTI) {
1908                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
1909                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
1910                                0xffffffff);
1911                 }
1912                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
1913         }
1914         else {
1915                 /* Accept one or more multicast(s). */
1916                 struct dev_mc_list *mclist;
1917                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
1918                 u32 regidx;
1919                 u32 bit;
1920                 u32 crc;
1921
1922                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
1923
1924                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1925                      i++, mclist = mclist->next) {
1926
1927                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
1928                         bit = crc & 0xff;
1929                         regidx = (bit & 0xe0) >> 5;
1930                         bit &= 0x1f;
1931                         mc_filter[regidx] |= (1 << bit);
1932                 }
1933
1934                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
1935                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
1936                                mc_filter[i]);
1937                 }
1938
1939                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
1940         }
1941
1942         if (rx_mode != bp->rx_mode) {
1943                 bp->rx_mode = rx_mode;
1944                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
1945         }
1946
1947         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
1948         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
1949         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
1950
1951         spin_unlock_bh(&bp->phy_lock);
1952 }
1953
1954 static void
1955 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
1956         u32 rv2p_proc)
1957 {
1958         int i;
1959         u32 val;
1960
1961
1962         for (i = 0; i < rv2p_code_len; i += 8) {
1963                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
1964                 rv2p_code++;
1965                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
1966                 rv2p_code++;
1967
1968                 if (rv2p_proc == RV2P_PROC1) {
1969                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
1970                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
1971                 }
1972                 else {
1973                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
1974                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
1975                 }
1976         }
1977
1978         /* Reset the processor, un-stall is done later. */
1979         if (rv2p_proc == RV2P_PROC1) {
1980                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
1981         }
1982         else {
1983                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
1984         }
1985 }
1986
1987 static void
1988 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
1989 {
1990         u32 offset;
1991         u32 val;
1992
1993         /* Halt the CPU. */
1994         val = REG_RD_IND(bp, cpu_reg->mode);
1995         val |= cpu_reg->mode_value_halt;
1996         REG_WR_IND(bp, cpu_reg->mode, val);
1997         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
1998
1999         /* Load the Text area. */
2000         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2001         if (fw->text) {
2002                 int j;
2003
2004                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2005                         REG_WR_IND(bp, offset, fw->text[j]);
2006                 }
2007         }
2008
2009         /* Load the Data area. */
2010         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2011         if (fw->data) {
2012                 int j;
2013
2014                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2015                         REG_WR_IND(bp, offset, fw->data[j]);
2016                 }
2017         }
2018
2019         /* Load the SBSS area. */
2020         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2021         if (fw->sbss) {
2022                 int j;
2023
2024                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2025                         REG_WR_IND(bp, offset, fw->sbss[j]);
2026                 }
2027         }
2028
2029         /* Load the BSS area. */
2030         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2031         if (fw->bss) {
2032                 int j;
2033
2034                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2035                         REG_WR_IND(bp, offset, fw->bss[j]);
2036                 }
2037         }
2038
2039         /* Load the Read-Only area. */
2040         offset = cpu_reg->spad_base +
2041                 (fw->rodata_addr - cpu_reg->mips_view_base);
2042         if (fw->rodata) {
2043                 int j;
2044
2045                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2046                         REG_WR_IND(bp, offset, fw->rodata[j]);
2047                 }
2048         }
2049
2050         /* Clear the pre-fetch instruction. */
2051         REG_WR_IND(bp, cpu_reg->inst, 0);
2052         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2053
2054         /* Start the CPU. */
2055         val = REG_RD_IND(bp, cpu_reg->mode);
2056         val &= ~cpu_reg->mode_value_halt;
2057         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2058         REG_WR_IND(bp, cpu_reg->mode, val);
2059 }
2060
2061 static void
2062 bnx2_init_cpus(struct bnx2 *bp)
2063 {
2064         struct cpu_reg cpu_reg;
2065         struct fw_info fw;
2066
2067         /* Initialize the RV2P processor. */
2068         load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2069         load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2070
2071         /* Initialize the RX Processor. */
2072         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2073         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2074         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2075         cpu_reg.state = BNX2_RXP_CPU_STATE;
2076         cpu_reg.state_value_clear = 0xffffff;
2077         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2078         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2079         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2080         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2081         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2082         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2083         cpu_reg.mips_view_base = 0x8000000;
2084     
2085         fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2086         fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2087         fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2088         fw.start_addr = bnx2_RXP_b06FwStartAddr;
2089
2090         fw.text_addr = bnx2_RXP_b06FwTextAddr;
2091         fw.text_len = bnx2_RXP_b06FwTextLen;
2092         fw.text_index = 0;
2093         fw.text = bnx2_RXP_b06FwText;
2094
2095         fw.data_addr = bnx2_RXP_b06FwDataAddr;
2096         fw.data_len = bnx2_RXP_b06FwDataLen;
2097         fw.data_index = 0;
2098         fw.data = bnx2_RXP_b06FwData;
2099
2100         fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2101         fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2102         fw.sbss_index = 0;
2103         fw.sbss = bnx2_RXP_b06FwSbss;
2104
2105         fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2106         fw.bss_len = bnx2_RXP_b06FwBssLen;
2107         fw.bss_index = 0;
2108         fw.bss = bnx2_RXP_b06FwBss;
2109
2110         fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2111         fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2112         fw.rodata_index = 0;
2113         fw.rodata = bnx2_RXP_b06FwRodata;
2114
2115         load_cpu_fw(bp, &cpu_reg, &fw);
2116
2117         /* Initialize the TX Processor. */
2118         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2119         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2120         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2121         cpu_reg.state = BNX2_TXP_CPU_STATE;
2122         cpu_reg.state_value_clear = 0xffffff;
2123         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2124         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2125         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2126         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2127         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2128         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2129         cpu_reg.mips_view_base = 0x8000000;
2130     
2131         fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2132         fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2133         fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2134         fw.start_addr = bnx2_TXP_b06FwStartAddr;
2135
2136         fw.text_addr = bnx2_TXP_b06FwTextAddr;
2137         fw.text_len = bnx2_TXP_b06FwTextLen;
2138         fw.text_index = 0;
2139         fw.text = bnx2_TXP_b06FwText;
2140
2141         fw.data_addr = bnx2_TXP_b06FwDataAddr;
2142         fw.data_len = bnx2_TXP_b06FwDataLen;
2143         fw.data_index = 0;
2144         fw.data = bnx2_TXP_b06FwData;
2145
2146         fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2147         fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2148         fw.sbss_index = 0;
2149         fw.sbss = bnx2_TXP_b06FwSbss;
2150
2151         fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2152         fw.bss_len = bnx2_TXP_b06FwBssLen;
2153         fw.bss_index = 0;
2154         fw.bss = bnx2_TXP_b06FwBss;
2155
2156         fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2157         fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2158         fw.rodata_index = 0;
2159         fw.rodata = bnx2_TXP_b06FwRodata;
2160
2161         load_cpu_fw(bp, &cpu_reg, &fw);
2162
2163         /* Initialize the TX Patch-up Processor. */
2164         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2165         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2166         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2167         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2168         cpu_reg.state_value_clear = 0xffffff;
2169         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2170         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2171         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2172         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2173         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2174         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2175         cpu_reg.mips_view_base = 0x8000000;
2176     
2177         fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2178         fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2179         fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2180         fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2181
2182         fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2183         fw.text_len = bnx2_TPAT_b06FwTextLen;
2184         fw.text_index = 0;
2185         fw.text = bnx2_TPAT_b06FwText;
2186
2187         fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2188         fw.data_len = bnx2_TPAT_b06FwDataLen;
2189         fw.data_index = 0;
2190         fw.data = bnx2_TPAT_b06FwData;
2191
2192         fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2193         fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2194         fw.sbss_index = 0;
2195         fw.sbss = bnx2_TPAT_b06FwSbss;
2196
2197         fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2198         fw.bss_len = bnx2_TPAT_b06FwBssLen;
2199         fw.bss_index = 0;
2200         fw.bss = bnx2_TPAT_b06FwBss;
2201
2202         fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2203         fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2204         fw.rodata_index = 0;
2205         fw.rodata = bnx2_TPAT_b06FwRodata;
2206
2207         load_cpu_fw(bp, &cpu_reg, &fw);
2208
2209         /* Initialize the Completion Processor. */
2210         cpu_reg.mode = BNX2_COM_CPU_MODE;
2211         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2212         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2213         cpu_reg.state = BNX2_COM_CPU_STATE;
2214         cpu_reg.state_value_clear = 0xffffff;
2215         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2216         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2217         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2218         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2219         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2220         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2221         cpu_reg.mips_view_base = 0x8000000;
2222     
2223         fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2224         fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2225         fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2226         fw.start_addr = bnx2_COM_b06FwStartAddr;
2227
2228         fw.text_addr = bnx2_COM_b06FwTextAddr;
2229         fw.text_len = bnx2_COM_b06FwTextLen;
2230         fw.text_index = 0;
2231         fw.text = bnx2_COM_b06FwText;
2232
2233         fw.data_addr = bnx2_COM_b06FwDataAddr;
2234         fw.data_len = bnx2_COM_b06FwDataLen;
2235         fw.data_index = 0;
2236         fw.data = bnx2_COM_b06FwData;
2237
2238         fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2239         fw.sbss_len = bnx2_COM_b06FwSbssLen;
2240         fw.sbss_index = 0;
2241         fw.sbss = bnx2_COM_b06FwSbss;
2242
2243         fw.bss_addr = bnx2_COM_b06FwBssAddr;
2244         fw.bss_len = bnx2_COM_b06FwBssLen;
2245         fw.bss_index = 0;
2246         fw.bss = bnx2_COM_b06FwBss;
2247
2248         fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2249         fw.rodata_len = bnx2_COM_b06FwRodataLen;
2250         fw.rodata_index = 0;
2251         fw.rodata = bnx2_COM_b06FwRodata;
2252
2253         load_cpu_fw(bp, &cpu_reg, &fw);
2254
2255 }
2256
2257 static int
2258 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2259 {
2260         u16 pmcsr;
2261
2262         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2263
2264         switch (state) {
2265         case PCI_D0: {
2266                 u32 val;
2267
2268                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2269                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2270                         PCI_PM_CTRL_PME_STATUS);
2271
2272                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2273                         /* delay required during transition out of D3hot */
2274                         msleep(20);
2275
2276                 val = REG_RD(bp, BNX2_EMAC_MODE);
2277                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2278                 val &= ~BNX2_EMAC_MODE_MPKT;
2279                 REG_WR(bp, BNX2_EMAC_MODE, val);
2280
2281                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2282                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2283                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2284                 break;
2285         }
2286         case PCI_D3hot: {
2287                 int i;
2288                 u32 val, wol_msg;
2289
2290                 if (bp->wol) {
2291                         u32 advertising;
2292                         u8 autoneg;
2293
2294                         autoneg = bp->autoneg;
2295                         advertising = bp->advertising;
2296
2297                         bp->autoneg = AUTONEG_SPEED;
2298                         bp->advertising = ADVERTISED_10baseT_Half |
2299                                 ADVERTISED_10baseT_Full |
2300                                 ADVERTISED_100baseT_Half |
2301                                 ADVERTISED_100baseT_Full |
2302                                 ADVERTISED_Autoneg;
2303
2304                         bnx2_setup_copper_phy(bp);
2305
2306                         bp->autoneg = autoneg;
2307                         bp->advertising = advertising;
2308
2309                         bnx2_set_mac_addr(bp);
2310
2311                         val = REG_RD(bp, BNX2_EMAC_MODE);
2312
2313                         /* Enable port mode. */
2314                         val &= ~BNX2_EMAC_MODE_PORT;
2315                         val |= BNX2_EMAC_MODE_PORT_MII |
2316                                BNX2_EMAC_MODE_MPKT_RCVD |
2317                                BNX2_EMAC_MODE_ACPI_RCVD |
2318                                BNX2_EMAC_MODE_FORCE_LINK |
2319                                BNX2_EMAC_MODE_MPKT;
2320
2321                         REG_WR(bp, BNX2_EMAC_MODE, val);
2322
2323                         /* receive all multicast */
2324                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2325                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2326                                        0xffffffff);
2327                         }
2328                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2329                                BNX2_EMAC_RX_MODE_SORT_MODE);
2330
2331                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2332                               BNX2_RPM_SORT_USER0_MC_EN;
2333                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2334                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2335                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2336                                BNX2_RPM_SORT_USER0_ENA);
2337
2338                         /* Need to enable EMAC and RPM for WOL. */
2339                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2340                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2341                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2342                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2343
2344                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2345                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2346                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2347
2348                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2349                 }
2350                 else {
2351                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2352                 }
2353
2354                 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg);
2355
2356                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2357                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2358                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2359
2360                         if (bp->wol)
2361                                 pmcsr |= 3;
2362                 }
2363                 else {
2364                         pmcsr |= 3;
2365                 }
2366                 if (bp->wol) {
2367                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2368                 }
2369                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2370                                       pmcsr);
2371
2372                 /* No more memory access after this point until
2373                  * device is brought back to D0.
2374                  */
2375                 udelay(50);
2376                 break;
2377         }
2378         default:
2379                 return -EINVAL;
2380         }
2381         return 0;
2382 }
2383
2384 static int
2385 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2386 {
2387         u32 val;
2388         int j;
2389
2390         /* Request access to the flash interface. */
2391         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2392         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2393                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2394                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2395                         break;
2396
2397                 udelay(5);
2398         }
2399
2400         if (j >= NVRAM_TIMEOUT_COUNT)
2401                 return -EBUSY;
2402
2403         return 0;
2404 }
2405
2406 static int
2407 bnx2_release_nvram_lock(struct bnx2 *bp)
2408 {
2409         int j;
2410         u32 val;
2411
2412         /* Relinquish nvram interface. */
2413         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2414
2415         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2416                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2417                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2418                         break;
2419
2420                 udelay(5);
2421         }
2422
2423         if (j >= NVRAM_TIMEOUT_COUNT)
2424                 return -EBUSY;
2425
2426         return 0;
2427 }
2428
2429
2430 static int
2431 bnx2_enable_nvram_write(struct bnx2 *bp)
2432 {
2433         u32 val;
2434
2435         val = REG_RD(bp, BNX2_MISC_CFG);
2436         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2437
2438         if (!bp->flash_info->buffered) {
2439                 int j;
2440
2441                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2442                 REG_WR(bp, BNX2_NVM_COMMAND,
2443                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2444
2445                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2446                         udelay(5);
2447
2448                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2449                         if (val & BNX2_NVM_COMMAND_DONE)
2450                                 break;
2451                 }
2452
2453                 if (j >= NVRAM_TIMEOUT_COUNT)
2454                         return -EBUSY;
2455         }
2456         return 0;
2457 }
2458
2459 static void
2460 bnx2_disable_nvram_write(struct bnx2 *bp)
2461 {
2462         u32 val;
2463
2464         val = REG_RD(bp, BNX2_MISC_CFG);
2465         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2466 }
2467
2468
2469 static void
2470 bnx2_enable_nvram_access(struct bnx2 *bp)
2471 {
2472         u32 val;
2473
2474         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2475         /* Enable both bits, even on read. */
2476         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2477                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2478 }
2479
2480 static void
2481 bnx2_disable_nvram_access(struct bnx2 *bp)
2482 {
2483         u32 val;
2484
2485         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2486         /* Disable both bits, even after read. */
2487         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2488                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2489                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2490 }
2491
2492 static int
2493 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2494 {
2495         u32 cmd;
2496         int j;
2497
2498         if (bp->flash_info->buffered)
2499                 /* Buffered flash, no erase needed */
2500                 return 0;
2501
2502         /* Build an erase command */
2503         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2504               BNX2_NVM_COMMAND_DOIT;
2505
2506         /* Need to clear DONE bit separately. */
2507         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2508
2509         /* Address of the NVRAM to read from. */
2510         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2511
2512         /* Issue an erase command. */
2513         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2514
2515         /* Wait for completion. */
2516         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2517                 u32 val;
2518
2519                 udelay(5);
2520
2521                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2522                 if (val & BNX2_NVM_COMMAND_DONE)
2523                         break;
2524         }
2525
2526         if (j >= NVRAM_TIMEOUT_COUNT)
2527                 return -EBUSY;
2528
2529         return 0;
2530 }
2531
2532 static int
2533 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2534 {
2535         u32 cmd;
2536         int j;
2537
2538         /* Build the command word. */
2539         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2540
2541         /* Calculate an offset of a buffered flash. */
2542         if (bp->flash_info->buffered) {
2543                 offset = ((offset / bp->flash_info->page_size) <<
2544                            bp->flash_info->page_bits) +
2545                           (offset % bp->flash_info->page_size);
2546         }
2547
2548         /* Need to clear DONE bit separately. */
2549         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2550
2551         /* Address of the NVRAM to read from. */
2552         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2553
2554         /* Issue a read command. */
2555         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2556
2557         /* Wait for completion. */
2558         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2559                 u32 val;
2560
2561                 udelay(5);
2562
2563                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2564                 if (val & BNX2_NVM_COMMAND_DONE) {
2565                         val = REG_RD(bp, BNX2_NVM_READ);
2566
2567                         val = be32_to_cpu(val);
2568                         memcpy(ret_val, &val, 4);
2569                         break;
2570                 }
2571         }
2572         if (j >= NVRAM_TIMEOUT_COUNT)
2573                 return -EBUSY;
2574
2575         return 0;
2576 }
2577
2578
2579 static int
2580 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2581 {
2582         u32 cmd, val32;
2583         int j;
2584
2585         /* Build the command word. */
2586         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2587
2588         /* Calculate an offset of a buffered flash. */
2589         if (bp->flash_info->buffered) {
2590                 offset = ((offset / bp->flash_info->page_size) <<
2591                           bp->flash_info->page_bits) +
2592                          (offset % bp->flash_info->page_size);
2593         }
2594
2595         /* Need to clear DONE bit separately. */
2596         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2597
2598         memcpy(&val32, val, 4);
2599         val32 = cpu_to_be32(val32);
2600
2601         /* Write the data. */
2602         REG_WR(bp, BNX2_NVM_WRITE, val32);
2603
2604         /* Address of the NVRAM to write to. */
2605         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2606
2607         /* Issue the write command. */
2608         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2609
2610         /* Wait for completion. */
2611         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2612                 udelay(5);
2613
2614                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2615                         break;
2616         }
2617         if (j >= NVRAM_TIMEOUT_COUNT)
2618                 return -EBUSY;
2619
2620         return 0;
2621 }
2622
2623 static int
2624 bnx2_init_nvram(struct bnx2 *bp)
2625 {
2626         u32 val;
2627         int j, entry_count, rc;
2628         struct flash_spec *flash;
2629
2630         /* Determine the selected interface. */
2631         val = REG_RD(bp, BNX2_NVM_CFG1);
2632
2633         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2634
2635         rc = 0;
2636         if (val & 0x40000000) {
2637
2638                 /* Flash interface has been reconfigured */
2639                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2640                      j++, flash++) {
2641                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2642                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2643                                 bp->flash_info = flash;
2644                                 break;
2645                         }
2646                 }
2647         }
2648         else {
2649                 u32 mask;
2650                 /* Not yet been reconfigured */
2651
2652                 if (val & (1 << 23))
2653                         mask = FLASH_BACKUP_STRAP_MASK;
2654                 else
2655                         mask = FLASH_STRAP_MASK;
2656
2657                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2658                         j++, flash++) {
2659
2660                         if ((val & mask) == (flash->strapping & mask)) {
2661                                 bp->flash_info = flash;
2662
2663                                 /* Request access to the flash interface. */
2664                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2665                                         return rc;
2666
2667                                 /* Enable access to flash interface */
2668                                 bnx2_enable_nvram_access(bp);
2669
2670                                 /* Reconfigure the flash interface */
2671                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2672                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2673                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2674                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2675
2676                                 /* Disable access to flash interface */
2677                                 bnx2_disable_nvram_access(bp);
2678                                 bnx2_release_nvram_lock(bp);
2679
2680                                 break;
2681                         }
2682                 }
2683         } /* if (val & 0x40000000) */
2684
2685         if (j == entry_count) {
2686                 bp->flash_info = NULL;
2687                 printk(KERN_ALERT "Unknown flash/EEPROM type.\n");
2688                 rc = -ENODEV;
2689         }
2690
2691         return rc;
2692 }
2693
2694 static int
2695 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2696                 int buf_size)
2697 {
2698         int rc = 0;
2699         u32 cmd_flags, offset32, len32, extra;
2700
2701         if (buf_size == 0)
2702                 return 0;
2703
2704         /* Request access to the flash interface. */
2705         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2706                 return rc;
2707
2708         /* Enable access to flash interface */
2709         bnx2_enable_nvram_access(bp);
2710
2711         len32 = buf_size;
2712         offset32 = offset;
2713         extra = 0;
2714
2715         cmd_flags = 0;
2716
2717         if (offset32 & 3) {
2718                 u8 buf[4];
2719                 u32 pre_len;
2720
2721                 offset32 &= ~3;
2722                 pre_len = 4 - (offset & 3);
2723
2724                 if (pre_len >= len32) {
2725                         pre_len = len32;
2726                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2727                                     BNX2_NVM_COMMAND_LAST;
2728                 }
2729                 else {
2730                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2731                 }
2732
2733                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2734
2735                 if (rc)
2736                         return rc;
2737
2738                 memcpy(ret_buf, buf + (offset & 3), pre_len);
2739
2740                 offset32 += 4;
2741                 ret_buf += pre_len;
2742                 len32 -= pre_len;
2743         }
2744         if (len32 & 3) {
2745                 extra = 4 - (len32 & 3);
2746                 len32 = (len32 + 4) & ~3;
2747         }
2748
2749         if (len32 == 4) {
2750                 u8 buf[4];
2751
2752                 if (cmd_flags)
2753                         cmd_flags = BNX2_NVM_COMMAND_LAST;
2754                 else
2755                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2756                                     BNX2_NVM_COMMAND_LAST;
2757
2758                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2759
2760                 memcpy(ret_buf, buf, 4 - extra);
2761         }
2762         else if (len32 > 0) {
2763                 u8 buf[4];
2764
2765                 /* Read the first word. */
2766                 if (cmd_flags)
2767                         cmd_flags = 0;
2768                 else
2769                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2770
2771                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2772
2773                 /* Advance to the next dword. */
2774                 offset32 += 4;
2775                 ret_buf += 4;
2776                 len32 -= 4;
2777
2778                 while (len32 > 4 && rc == 0) {
2779                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2780
2781                         /* Advance to the next dword. */
2782                         offset32 += 4;
2783                         ret_buf += 4;
2784                         len32 -= 4;
2785                 }
2786
2787                 if (rc)
2788                         return rc;
2789
2790                 cmd_flags = BNX2_NVM_COMMAND_LAST;
2791                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2792
2793                 memcpy(ret_buf, buf, 4 - extra);
2794         }
2795
2796         /* Disable access to flash interface */
2797         bnx2_disable_nvram_access(bp);
2798
2799         bnx2_release_nvram_lock(bp);
2800
2801         return rc;
2802 }
2803
2804 static int
2805 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2806                 int buf_size)
2807 {
2808         u32 written, offset32, len32;
2809         u8 *buf, start[4], end[4];
2810         int rc = 0;
2811         int align_start, align_end;
2812
2813         buf = data_buf;
2814         offset32 = offset;
2815         len32 = buf_size;
2816         align_start = align_end = 0;
2817
2818         if ((align_start = (offset32 & 3))) {
2819                 offset32 &= ~3;
2820                 len32 += align_start;
2821                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2822                         return rc;
2823         }
2824
2825         if (len32 & 3) {
2826                 if ((len32 > 4) || !align_start) {
2827                         align_end = 4 - (len32 & 3);
2828                         len32 += align_end;
2829                         if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2830                                 end, 4))) {
2831                                 return rc;
2832                         }
2833                 }
2834         }
2835
2836         if (align_start || align_end) {
2837                 buf = kmalloc(len32, GFP_KERNEL);
2838                 if (buf == 0)
2839                         return -ENOMEM;
2840                 if (align_start) {
2841                         memcpy(buf, start, 4);
2842                 }
2843                 if (align_end) {
2844                         memcpy(buf + len32 - 4, end, 4);
2845                 }
2846                 memcpy(buf + align_start, data_buf, buf_size);
2847         }
2848
2849         written = 0;
2850         while ((written < len32) && (rc == 0)) {
2851                 u32 page_start, page_end, data_start, data_end;
2852                 u32 addr, cmd_flags;
2853                 int i;
2854                 u8 flash_buffer[264];
2855
2856                 /* Find the page_start addr */
2857                 page_start = offset32 + written;
2858                 page_start -= (page_start % bp->flash_info->page_size);
2859                 /* Find the page_end addr */
2860                 page_end = page_start + bp->flash_info->page_size;
2861                 /* Find the data_start addr */
2862                 data_start = (written == 0) ? offset32 : page_start;
2863                 /* Find the data_end addr */
2864                 data_end = (page_end > offset32 + len32) ? 
2865                         (offset32 + len32) : page_end;
2866
2867                 /* Request access to the flash interface. */
2868                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2869                         goto nvram_write_end;
2870
2871                 /* Enable access to flash interface */
2872                 bnx2_enable_nvram_access(bp);
2873
2874                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2875                 if (bp->flash_info->buffered == 0) {
2876                         int j;
2877
2878                         /* Read the whole page into the buffer
2879                          * (non-buffer flash only) */
2880                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
2881                                 if (j == (bp->flash_info->page_size - 4)) {
2882                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
2883                                 }
2884                                 rc = bnx2_nvram_read_dword(bp,
2885                                         page_start + j, 
2886                                         &flash_buffer[j], 
2887                                         cmd_flags);
2888
2889                                 if (rc)
2890                                         goto nvram_write_end;
2891
2892                                 cmd_flags = 0;
2893                         }
2894                 }
2895
2896                 /* Enable writes to flash interface (unlock write-protect) */
2897                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
2898                         goto nvram_write_end;
2899
2900                 /* Erase the page */
2901                 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
2902                         goto nvram_write_end;
2903
2904                 /* Re-enable the write again for the actual write */
2905                 bnx2_enable_nvram_write(bp);
2906
2907                 /* Loop to write back the buffer data from page_start to
2908                  * data_start */
2909                 i = 0;
2910                 if (bp->flash_info->buffered == 0) {
2911                         for (addr = page_start; addr < data_start;
2912                                 addr += 4, i += 4) {
2913                                 
2914                                 rc = bnx2_nvram_write_dword(bp, addr,
2915                                         &flash_buffer[i], cmd_flags);
2916
2917                                 if (rc != 0)
2918                                         goto nvram_write_end;
2919
2920                                 cmd_flags = 0;
2921                         }
2922                 }
2923
2924                 /* Loop to write the new data from data_start to data_end */
2925                 for (addr = data_start; addr < data_end; addr += 4, i++) {
2926                         if ((addr == page_end - 4) ||
2927                                 ((bp->flash_info->buffered) &&
2928                                  (addr == data_end - 4))) {
2929
2930                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
2931                         }
2932                         rc = bnx2_nvram_write_dword(bp, addr, buf,
2933                                 cmd_flags);
2934
2935                         if (rc != 0)
2936                                 goto nvram_write_end;
2937
2938                         cmd_flags = 0;
2939                         buf += 4;
2940                 }
2941
2942                 /* Loop to write back the buffer data from data_end
2943                  * to page_end */
2944                 if (bp->flash_info->buffered == 0) {
2945                         for (addr = data_end; addr < page_end;
2946                                 addr += 4, i += 4) {
2947                         
2948                                 if (addr == page_end-4) {
2949                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
2950                                 }
2951                                 rc = bnx2_nvram_write_dword(bp, addr,
2952                                         &flash_buffer[i], cmd_flags);
2953
2954                                 if (rc != 0)
2955                                         goto nvram_write_end;
2956
2957                                 cmd_flags = 0;
2958                         }
2959                 }
2960
2961                 /* Disable writes to flash interface (lock write-protect) */
2962                 bnx2_disable_nvram_write(bp);
2963
2964                 /* Disable access to flash interface */
2965                 bnx2_disable_nvram_access(bp);
2966                 bnx2_release_nvram_lock(bp);
2967
2968                 /* Increment written */
2969                 written += data_end - data_start;
2970         }
2971
2972 nvram_write_end:
2973         if (align_start || align_end)
2974                 kfree(buf);
2975         return rc;
2976 }
2977
2978 static int
2979 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
2980 {
2981         u32 val;
2982         int i, rc = 0;
2983
2984         /* Wait for the current PCI transaction to complete before
2985          * issuing a reset. */
2986         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
2987                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2988                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2989                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2990                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2991         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
2992         udelay(5);
2993
2994         /* Deposit a driver reset signature so the firmware knows that
2995          * this is a soft reset. */
2996         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
2997                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
2998
2999         bp->fw_timed_out = 0;
3000
3001         /* Wait for the firmware to tell us it is ok to issue a reset. */
3002         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code);
3003
3004         /* Do a dummy read to force the chip to complete all current transaction
3005          * before we issue a reset. */
3006         val = REG_RD(bp, BNX2_MISC_ID);
3007
3008         val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3009               BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3010               BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3011
3012         /* Chip reset. */
3013         REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3014
3015         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3016             (CHIP_ID(bp) == CHIP_ID_5706_A1))
3017                 msleep(15);
3018
3019         /* Reset takes approximate 30 usec */
3020         for (i = 0; i < 10; i++) {
3021                 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3022                 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3023                             BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3024                         break;
3025                 }
3026                 udelay(10);
3027         }
3028
3029         if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3030                    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3031                 printk(KERN_ERR PFX "Chip reset did not complete\n");
3032                 return -EBUSY;
3033         }
3034
3035         /* Make sure byte swapping is properly configured. */
3036         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3037         if (val != 0x01020304) {
3038                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3039                 return -ENODEV;
3040         }
3041
3042         bp->fw_timed_out = 0;
3043
3044         /* Wait for the firmware to finish its initialization. */
3045         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code);
3046
3047         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3048                 /* Adjust the voltage regular to two steps lower.  The default
3049                  * of this register is 0x0000000e. */
3050                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3051
3052                 /* Remove bad rbuf memory from the free pool. */
3053                 rc = bnx2_alloc_bad_rbuf(bp);
3054         }
3055
3056         return rc;
3057 }
3058
3059 static int
3060 bnx2_init_chip(struct bnx2 *bp)
3061 {
3062         u32 val;
3063
3064         /* Make sure the interrupt is not active. */
3065         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3066
3067         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3068               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3069 #ifdef __BIG_ENDIAN
3070               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | 
3071 #endif
3072               BNX2_DMA_CONFIG_CNTL_WORD_SWAP | 
3073               DMA_READ_CHANS << 12 |
3074               DMA_WRITE_CHANS << 16;
3075
3076         val |= (0x2 << 20) | (1 << 11);
3077
3078         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz = 133))
3079                 val |= (1 << 23);
3080
3081         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3082             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3083                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3084
3085         REG_WR(bp, BNX2_DMA_CONFIG, val);
3086
3087         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3088                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3089                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3090                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3091         }
3092
3093         if (bp->flags & PCIX_FLAG) {
3094                 u16 val16;
3095
3096                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3097                                      &val16);
3098                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3099                                       val16 & ~PCI_X_CMD_ERO);
3100         }
3101
3102         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3103                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3104                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3105                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3106
3107         /* Initialize context mapping and zero out the quick contexts.  The
3108          * context block must have already been enabled. */
3109         bnx2_init_context(bp);
3110
3111         bnx2_init_cpus(bp);
3112         bnx2_init_nvram(bp);
3113
3114         bnx2_set_mac_addr(bp);
3115
3116         val = REG_RD(bp, BNX2_MQ_CONFIG);
3117         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3118         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3119         REG_WR(bp, BNX2_MQ_CONFIG, val);
3120
3121         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3122         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3123         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3124
3125         val = (BCM_PAGE_BITS - 8) << 24;
3126         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3127
3128         /* Configure page size. */
3129         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3130         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3131         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3132         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3133
3134         val = bp->mac_addr[0] +
3135               (bp->mac_addr[1] << 8) +
3136               (bp->mac_addr[2] << 16) +
3137               bp->mac_addr[3] +
3138               (bp->mac_addr[4] << 8) +
3139               (bp->mac_addr[5] << 16);
3140         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3141
3142         /* Program the MTU.  Also include 4 bytes for CRC32. */
3143         val = bp->dev->mtu + ETH_HLEN + 4;
3144         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3145                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3146         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3147
3148         bp->last_status_idx = 0;
3149         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3150
3151         /* Set up how to generate a link change interrupt. */
3152         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3153
3154         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3155                (u64) bp->status_blk_mapping & 0xffffffff);
3156         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3157
3158         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3159                (u64) bp->stats_blk_mapping & 0xffffffff);
3160         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3161                (u64) bp->stats_blk_mapping >> 32);
3162
3163         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, 
3164                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3165
3166         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3167                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3168
3169         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3170                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3171
3172         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3173
3174         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3175
3176         REG_WR(bp, BNX2_HC_COM_TICKS,
3177                (bp->com_ticks_int << 16) | bp->com_ticks);
3178
3179         REG_WR(bp, BNX2_HC_CMD_TICKS,
3180                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3181
3182         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3183         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3184
3185         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3186                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3187         else {
3188                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3189                        BNX2_HC_CONFIG_TX_TMR_MODE |
3190                        BNX2_HC_CONFIG_COLLECT_STATS);
3191         }
3192
3193         /* Clear internal stats counters. */
3194         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3195
3196         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3197
3198         /* Initialize the receive filter. */
3199         bnx2_set_rx_mode(bp->dev);
3200
3201         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET);
3202
3203         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3204         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3205
3206         udelay(20);
3207
3208         return 0;
3209 }
3210
3211
3212 static void
3213 bnx2_init_tx_ring(struct bnx2 *bp)
3214 {
3215         struct tx_bd *txbd;
3216         u32 val;
3217
3218         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3219                 
3220         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3221         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3222
3223         bp->tx_prod = 0;
3224         bp->tx_cons = 0;
3225         bp->tx_prod_bseq = 0;
3226         
3227         val = BNX2_L2CTX_TYPE_TYPE_L2;
3228         val |= BNX2_L2CTX_TYPE_SIZE_L2;
3229         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3230
3231         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3232         val |= 8 << 16;
3233         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3234
3235         val = (u64) bp->tx_desc_mapping >> 32;
3236         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3237
3238         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3239         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3240 }
3241
3242 static void
3243 bnx2_init_rx_ring(struct bnx2 *bp)
3244 {
3245         struct rx_bd *rxbd;
3246         int i;
3247         u16 prod, ring_prod; 
3248         u32 val;
3249
3250         /* 8 for CRC and VLAN */
3251         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3252         /* 8 for alignment */
3253         bp->rx_buf_size = bp->rx_buf_use_size + 8;
3254
3255         ring_prod = prod = bp->rx_prod = 0;
3256         bp->rx_cons = 0;
3257         bp->rx_prod_bseq = 0;
3258                 
3259         rxbd = &bp->rx_desc_ring[0];
3260         for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
3261                 rxbd->rx_bd_len = bp->rx_buf_use_size;
3262                 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3263         }
3264
3265         rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping >> 32;
3266         rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping & 0xffffffff;
3267
3268         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3269         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3270         val |= 0x02 << 8;
3271         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3272
3273         val = (u64) bp->rx_desc_mapping >> 32;
3274         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3275
3276         val = (u64) bp->rx_desc_mapping & 0xffffffff;
3277         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3278
3279         for ( ;ring_prod < bp->rx_ring_size; ) {
3280                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3281                         break;
3282                 }
3283                 prod = NEXT_RX_BD(prod);
3284                 ring_prod = RX_RING_IDX(prod);
3285         }
3286         bp->rx_prod = prod;
3287
3288         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3289
3290         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3291 }
3292
3293 static void
3294 bnx2_free_tx_skbs(struct bnx2 *bp)
3295 {
3296         int i;
3297
3298         if (bp->tx_buf_ring == NULL)
3299                 return;
3300
3301         for (i = 0; i < TX_DESC_CNT; ) {
3302                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3303                 struct sk_buff *skb = tx_buf->skb;
3304                 int j, last;
3305
3306                 if (skb == NULL) {
3307                         i++;
3308                         continue;
3309                 }
3310
3311                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3312                         skb_headlen(skb), PCI_DMA_TODEVICE);
3313
3314                 tx_buf->skb = NULL;
3315
3316                 last = skb_shinfo(skb)->nr_frags;
3317                 for (j = 0; j < last; j++) {
3318                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3319                         pci_unmap_page(bp->pdev,
3320                                 pci_unmap_addr(tx_buf, mapping),
3321                                 skb_shinfo(skb)->frags[j].size,
3322                                 PCI_DMA_TODEVICE);
3323                 }
3324                 dev_kfree_skb_any(skb);
3325                 i += j + 1;
3326         }
3327
3328 }
3329
3330 static void
3331 bnx2_free_rx_skbs(struct bnx2 *bp)
3332 {
3333         int i;
3334
3335         if (bp->rx_buf_ring == NULL)
3336                 return;
3337
3338         for (i = 0; i < RX_DESC_CNT; i++) {
3339                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3340                 struct sk_buff *skb = rx_buf->skb;
3341
3342                 if (skb == 0)
3343                         continue;
3344
3345                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3346                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3347
3348                 rx_buf->skb = NULL;
3349
3350                 dev_kfree_skb_any(skb);
3351         }
3352 }
3353
3354 static void
3355 bnx2_free_skbs(struct bnx2 *bp)
3356 {
3357         bnx2_free_tx_skbs(bp);
3358         bnx2_free_rx_skbs(bp);
3359 }
3360
3361 static int
3362 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3363 {
3364         int rc;
3365
3366         rc = bnx2_reset_chip(bp, reset_code);
3367         bnx2_free_skbs(bp);
3368         if (rc)
3369                 return rc;
3370
3371         bnx2_init_chip(bp);
3372         bnx2_init_tx_ring(bp);
3373         bnx2_init_rx_ring(bp);
3374         return 0;
3375 }
3376
3377 static int
3378 bnx2_init_nic(struct bnx2 *bp)
3379 {
3380         int rc;
3381
3382         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3383                 return rc;
3384
3385         bnx2_init_phy(bp);
3386         bnx2_set_link(bp);
3387         return 0;
3388 }
3389
3390 static int
3391 bnx2_test_registers(struct bnx2 *bp)
3392 {
3393         int ret;
3394         int i;
3395         static struct {
3396                 u16   offset;
3397                 u16   flags;
3398                 u32   rw_mask;
3399                 u32   ro_mask;
3400         } reg_tbl[] = {
3401                 { 0x006c, 0, 0x00000000, 0x0000003f },
3402                 { 0x0090, 0, 0xffffffff, 0x00000000 },
3403                 { 0x0094, 0, 0x00000000, 0x00000000 },
3404
3405                 { 0x0404, 0, 0x00003f00, 0x00000000 },
3406                 { 0x0418, 0, 0x00000000, 0xffffffff },
3407                 { 0x041c, 0, 0x00000000, 0xffffffff },
3408                 { 0x0420, 0, 0x00000000, 0x80ffffff },
3409                 { 0x0424, 0, 0x00000000, 0x00000000 },
3410                 { 0x0428, 0, 0x00000000, 0x00000001 },
3411                 { 0x0450, 0, 0x00000000, 0x0000ffff },
3412                 { 0x0454, 0, 0x00000000, 0xffffffff },
3413                 { 0x0458, 0, 0x00000000, 0xffffffff },
3414
3415                 { 0x0808, 0, 0x00000000, 0xffffffff },
3416                 { 0x0854, 0, 0x00000000, 0xffffffff },
3417                 { 0x0868, 0, 0x00000000, 0x77777777 },
3418                 { 0x086c, 0, 0x00000000, 0x77777777 },
3419                 { 0x0870, 0, 0x00000000, 0x77777777 },
3420                 { 0x0874, 0, 0x00000000, 0x77777777 },
3421
3422                 { 0x0c00, 0, 0x00000000, 0x00000001 },
3423                 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3424                 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3425                 { 0x0c0c, 0, 0x00ffffff, 0x00000000 },
3426                 { 0x0c30, 0, 0x00000000, 0xffffffff },
3427                 { 0x0c34, 0, 0x00000000, 0xffffffff },
3428                 { 0x0c38, 0, 0x00000000, 0xffffffff },
3429                 { 0x0c3c, 0, 0x00000000, 0xffffffff },
3430                 { 0x0c40, 0, 0x00000000, 0xffffffff },
3431                 { 0x0c44, 0, 0x00000000, 0xffffffff },
3432                 { 0x0c48, 0, 0x00000000, 0x0007ffff },
3433                 { 0x0c4c, 0, 0x00000000, 0xffffffff },
3434                 { 0x0c50, 0, 0x00000000, 0xffffffff },
3435                 { 0x0c54, 0, 0x00000000, 0xffffffff },
3436                 { 0x0c58, 0, 0x00000000, 0xffffffff },
3437                 { 0x0c5c, 0, 0x00000000, 0xffffffff },
3438                 { 0x0c60, 0, 0x00000000, 0xffffffff },
3439                 { 0x0c64, 0, 0x00000000, 0xffffffff },
3440                 { 0x0c68, 0, 0x00000000, 0xffffffff },
3441                 { 0x0c6c, 0, 0x00000000, 0xffffffff },
3442                 { 0x0c70, 0, 0x00000000, 0xffffffff },
3443                 { 0x0c74, 0, 0x00000000, 0xffffffff },
3444                 { 0x0c78, 0, 0x00000000, 0xffffffff },
3445                 { 0x0c7c, 0, 0x00000000, 0xffffffff },
3446                 { 0x0c80, 0, 0x00000000, 0xffffffff },
3447                 { 0x0c84, 0, 0x00000000, 0xffffffff },
3448                 { 0x0c88, 0, 0x00000000, 0xffffffff },
3449                 { 0x0c8c, 0, 0x00000000, 0xffffffff },
3450                 { 0x0c90, 0, 0x00000000, 0xffffffff },
3451                 { 0x0c94, 0, 0x00000000, 0xffffffff },
3452                 { 0x0c98, 0, 0x00000000, 0xffffffff },
3453                 { 0x0c9c, 0, 0x00000000, 0xffffffff },
3454                 { 0x0ca0, 0, 0x00000000, 0xffffffff },
3455                 { 0x0ca4, 0, 0x00000000, 0xffffffff },
3456                 { 0x0ca8, 0, 0x00000000, 0x0007ffff },
3457                 { 0x0cac, 0, 0x00000000, 0xffffffff },
3458                 { 0x0cb0, 0, 0x00000000, 0xffffffff },
3459                 { 0x0cb4, 0, 0x00000000, 0xffffffff },
3460                 { 0x0cb8, 0, 0x00000000, 0xffffffff },
3461                 { 0x0cbc, 0, 0x00000000, 0xffffffff },
3462                 { 0x0cc0, 0, 0x00000000, 0xffffffff },
3463                 { 0x0cc4, 0, 0x00000000, 0xffffffff },
3464                 { 0x0cc8, 0, 0x00000000, 0xffffffff },
3465                 { 0x0ccc, 0, 0x00000000, 0xffffffff },
3466                 { 0x0cd0, 0, 0x00000000, 0xffffffff },
3467                 { 0x0cd4, 0, 0x00000000, 0xffffffff },
3468                 { 0x0cd8, 0, 0x00000000, 0xffffffff },
3469                 { 0x0cdc, 0, 0x00000000, 0xffffffff },
3470                 { 0x0ce0, 0, 0x00000000, 0xffffffff },
3471                 { 0x0ce4, 0, 0x00000000, 0xffffffff },
3472                 { 0x0ce8, 0, 0x00000000, 0xffffffff },
3473                 { 0x0cec, 0, 0x00000000, 0xffffffff },
3474                 { 0x0cf0, 0, 0x00000000, 0xffffffff },
3475                 { 0x0cf4, 0, 0x00000000, 0xffffffff },
3476                 { 0x0cf8, 0, 0x00000000, 0xffffffff },
3477                 { 0x0cfc, 0, 0x00000000, 0xffffffff },
3478                 { 0x0d00, 0, 0x00000000, 0xffffffff },
3479                 { 0x0d04, 0, 0x00000000, 0xffffffff },
3480
3481                 { 0x1000, 0, 0x00000000, 0x00000001 },
3482                 { 0x1004, 0, 0x00000000, 0x000f0001 },
3483                 { 0x1044, 0, 0x00000000, 0xffc003ff },
3484                 { 0x1080, 0, 0x00000000, 0x0001ffff },
3485                 { 0x1084, 0, 0x00000000, 0xffffffff },
3486                 { 0x1088, 0, 0x00000000, 0xffffffff },
3487                 { 0x108c, 0, 0x00000000, 0xffffffff },
3488                 { 0x1090, 0, 0x00000000, 0xffffffff },
3489                 { 0x1094, 0, 0x00000000, 0xffffffff },
3490                 { 0x1098, 0, 0x00000000, 0xffffffff },
3491                 { 0x109c, 0, 0x00000000, 0xffffffff },
3492                 { 0x10a0, 0, 0x00000000, 0xffffffff },
3493
3494                 { 0x1408, 0, 0x01c00800, 0x00000000 },
3495                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3496                 { 0x14a8, 0, 0x00000000, 0x000001ff },
3497                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3498                 { 0x14b0, 0, 0x00000002, 0x00000001 },
3499                 { 0x14b8, 0, 0x00000000, 0x00000000 },
3500                 { 0x14c0, 0, 0x00000000, 0x00000009 },
3501                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3502                 { 0x14cc, 0, 0x00000000, 0x00000001 },
3503                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3504                 { 0x1500, 0, 0x00000000, 0xffffffff },
3505                 { 0x1504, 0, 0x00000000, 0xffffffff },
3506                 { 0x1508, 0, 0x00000000, 0xffffffff },
3507                 { 0x150c, 0, 0x00000000, 0xffffffff },
3508                 { 0x1510, 0, 0x00000000, 0xffffffff },
3509                 { 0x1514, 0, 0x00000000, 0xffffffff },
3510                 { 0x1518, 0, 0x00000000, 0xffffffff },
3511                 { 0x151c, 0, 0x00000000, 0xffffffff },
3512                 { 0x1520, 0, 0x00000000, 0xffffffff },
3513                 { 0x1524, 0, 0x00000000, 0xffffffff },
3514                 { 0x1528, 0, 0x00000000, 0xffffffff },
3515                 { 0x152c, 0, 0x00000000, 0xffffffff },
3516                 { 0x1530, 0, 0x00000000, 0xffffffff },
3517                 { 0x1534, 0, 0x00000000, 0xffffffff },
3518                 { 0x1538, 0, 0x00000000, 0xffffffff },
3519                 { 0x153c, 0, 0x00000000, 0xffffffff },
3520                 { 0x1540, 0, 0x00000000, 0xffffffff },
3521                 { 0x1544, 0, 0x00000000, 0xffffffff },
3522                 { 0x1548, 0, 0x00000000, 0xffffffff },
3523                 { 0x154c, 0, 0x00000000, 0xffffffff },
3524                 { 0x1550, 0, 0x00000000, 0xffffffff },
3525                 { 0x1554, 0, 0x00000000, 0xffffffff },
3526                 { 0x1558, 0, 0x00000000, 0xffffffff },
3527                 { 0x1600, 0, 0x00000000, 0xffffffff },
3528                 { 0x1604, 0, 0x00000000, 0xffffffff },
3529                 { 0x1608, 0, 0x00000000, 0xffffffff },
3530                 { 0x160c, 0, 0x00000000, 0xffffffff },
3531                 { 0x1610, 0, 0x00000000, 0xffffffff },
3532                 { 0x1614, 0, 0x00000000, 0xffffffff },
3533                 { 0x1618, 0, 0x00000000, 0xffffffff },
3534                 { 0x161c, 0, 0x00000000, 0xffffffff },
3535                 { 0x1620, 0, 0x00000000, 0xffffffff },
3536                 { 0x1624, 0, 0x00000000, 0xffffffff },
3537                 { 0x1628, 0, 0x00000000, 0xffffffff },
3538                 { 0x162c, 0, 0x00000000, 0xffffffff },
3539                 { 0x1630, 0, 0x00000000, 0xffffffff },
3540                 { 0x1634, 0, 0x00000000, 0xffffffff },
3541                 { 0x1638, 0, 0x00000000, 0xffffffff },
3542                 { 0x163c, 0, 0x00000000, 0xffffffff },
3543                 { 0x1640, 0, 0x00000000, 0xffffffff },
3544                 { 0x1644, 0, 0x00000000, 0xffffffff },
3545                 { 0x1648, 0, 0x00000000, 0xffffffff },
3546                 { 0x164c, 0, 0x00000000, 0xffffffff },
3547                 { 0x1650, 0, 0x00000000, 0xffffffff },
3548                 { 0x1654, 0, 0x00000000, 0xffffffff },
3549
3550                 { 0x1800, 0, 0x00000000, 0x00000001 },
3551                 { 0x1804, 0, 0x00000000, 0x00000003 },
3552                 { 0x1840, 0, 0x00000000, 0xffffffff },
3553                 { 0x1844, 0, 0x00000000, 0xffffffff },
3554                 { 0x1848, 0, 0x00000000, 0xffffffff },
3555                 { 0x184c, 0, 0x00000000, 0xffffffff },
3556                 { 0x1850, 0, 0x00000000, 0xffffffff },
3557                 { 0x1900, 0, 0x7ffbffff, 0x00000000 },
3558                 { 0x1904, 0, 0xffffffff, 0x00000000 },
3559                 { 0x190c, 0, 0xffffffff, 0x00000000 },
3560                 { 0x1914, 0, 0xffffffff, 0x00000000 },
3561                 { 0x191c, 0, 0xffffffff, 0x00000000 },
3562                 { 0x1924, 0, 0xffffffff, 0x00000000 },
3563                 { 0x192c, 0, 0xffffffff, 0x00000000 },
3564                 { 0x1934, 0, 0xffffffff, 0x00000000 },
3565                 { 0x193c, 0, 0xffffffff, 0x00000000 },
3566                 { 0x1944, 0, 0xffffffff, 0x00000000 },
3567                 { 0x194c, 0, 0xffffffff, 0x00000000 },
3568                 { 0x1954, 0, 0xffffffff, 0x00000000 },
3569                 { 0x195c, 0, 0xffffffff, 0x00000000 },
3570                 { 0x1964, 0, 0xffffffff, 0x00000000 },
3571                 { 0x196c, 0, 0xffffffff, 0x00000000 },
3572                 { 0x1974, 0, 0xffffffff, 0x00000000 },
3573                 { 0x197c, 0, 0xffffffff, 0x00000000 },
3574                 { 0x1980, 0, 0x0700ffff, 0x00000000 },
3575
3576                 { 0x1c00, 0, 0x00000000, 0x00000001 },
3577                 { 0x1c04, 0, 0x00000000, 0x00000003 },
3578                 { 0x1c08, 0, 0x0000000f, 0x00000000 },
3579                 { 0x1c40, 0, 0x00000000, 0xffffffff },
3580                 { 0x1c44, 0, 0x00000000, 0xffffffff },
3581                 { 0x1c48, 0, 0x00000000, 0xffffffff },
3582                 { 0x1c4c, 0, 0x00000000, 0xffffffff },
3583                 { 0x1c50, 0, 0x00000000, 0xffffffff },
3584                 { 0x1d00, 0, 0x7ffbffff, 0x00000000 },
3585                 { 0x1d04, 0, 0xffffffff, 0x00000000 },
3586                 { 0x1d0c, 0, 0xffffffff, 0x00000000 },
3587                 { 0x1d14, 0, 0xffffffff, 0x00000000 },
3588                 { 0x1d1c, 0, 0xffffffff, 0x00000000 },
3589                 { 0x1d24, 0, 0xffffffff, 0x00000000 },
3590                 { 0x1d2c, 0, 0xffffffff, 0x00000000 },
3591                 { 0x1d34, 0, 0xffffffff, 0x00000000 },
3592                 { 0x1d3c, 0, 0xffffffff, 0x00000000 },
3593                 { 0x1d44, 0, 0xffffffff, 0x00000000 },
3594                 { 0x1d4c, 0, 0xffffffff, 0x00000000 },
3595                 { 0x1d54, 0, 0xffffffff, 0x00000000 },
3596                 { 0x1d5c, 0, 0xffffffff, 0x00000000 },
3597                 { 0x1d64, 0, 0xffffffff, 0x00000000 },
3598                 { 0x1d6c, 0, 0xffffffff, 0x00000000 },
3599                 { 0x1d74, 0, 0xffffffff, 0x00000000 },
3600                 { 0x1d7c, 0, 0xffffffff, 0x00000000 },
3601                 { 0x1d80, 0, 0x0700ffff, 0x00000000 },
3602
3603                 { 0x2004, 0, 0x00000000, 0x0337000f },
3604                 { 0x2008, 0, 0xffffffff, 0x00000000 },
3605                 { 0x200c, 0, 0xffffffff, 0x00000000 },
3606                 { 0x2010, 0, 0xffffffff, 0x00000000 },
3607                 { 0x2014, 0, 0x801fff80, 0x00000000 },
3608                 { 0x2018, 0, 0x000003ff, 0x00000000 },
3609
3610                 { 0x2800, 0, 0x00000000, 0x00000001 },
3611                 { 0x2804, 0, 0x00000000, 0x00003f01 },
3612                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3613                 { 0x2810, 0, 0xffff0000, 0x00000000 },
3614                 { 0x2814, 0, 0xffff0000, 0x00000000 },
3615                 { 0x2818, 0, 0xffff0000, 0x00000000 },
3616                 { 0x281c, 0, 0xffff0000, 0x00000000 },
3617                 { 0x2834, 0, 0xffffffff, 0x00000000 },
3618                 { 0x2840, 0, 0x00000000, 0xffffffff },
3619                 { 0x2844, 0, 0x00000000, 0xffffffff },
3620                 { 0x2848, 0, 0xffffffff, 0x00000000 },
3621                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3622
3623                 { 0x2c00, 0, 0x00000000, 0x00000011 },
3624                 { 0x2c04, 0, 0x00000000, 0x00030007 },
3625
3626                 { 0x3000, 0, 0x00000000, 0x00000001 },
3627                 { 0x3004, 0, 0x00000000, 0x007007ff },
3628                 { 0x3008, 0, 0x00000003, 0x00000000 },
3629                 { 0x300c, 0, 0xffffffff, 0x00000000 },
3630                 { 0x3010, 0, 0xffffffff, 0x00000000 },
3631                 { 0x3014, 0, 0xffffffff, 0x00000000 },
3632                 { 0x3034, 0, 0xffffffff, 0x00000000 },
3633                 { 0x3038, 0, 0xffffffff, 0x00000000 },
3634                 { 0x3050, 0, 0x00000001, 0x00000000 },
3635
3636                 { 0x3c00, 0, 0x00000000, 0x00000001 },
3637                 { 0x3c04, 0, 0x00000000, 0x00070000 },
3638                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3639                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3640                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3641                 { 0x3c14, 0, 0x00000000, 0xffffffff },
3642                 { 0x3c18, 0, 0x00000000, 0xffffffff },
3643                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3644                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3645                 { 0x3c24, 0, 0xffffffff, 0x00000000 },
3646                 { 0x3c28, 0, 0xffffffff, 0x00000000 },
3647                 { 0x3c2c, 0, 0xffffffff, 0x00000000 },
3648                 { 0x3c30, 0, 0xffffffff, 0x00000000 },
3649                 { 0x3c34, 0, 0xffffffff, 0x00000000 },
3650                 { 0x3c38, 0, 0xffffffff, 0x00000000 },
3651                 { 0x3c3c, 0, 0xffffffff, 0x00000000 },
3652                 { 0x3c40, 0, 0xffffffff, 0x00000000 },
3653                 { 0x3c44, 0, 0xffffffff, 0x00000000 },
3654                 { 0x3c48, 0, 0xffffffff, 0x00000000 },
3655                 { 0x3c4c, 0, 0xffffffff, 0x00000000 },
3656                 { 0x3c50, 0, 0xffffffff, 0x00000000 },
3657                 { 0x3c54, 0, 0xffffffff, 0x00000000 },
3658                 { 0x3c58, 0, 0xffffffff, 0x00000000 },
3659                 { 0x3c5c, 0, 0xffffffff, 0x00000000 },
3660                 { 0x3c60, 0, 0xffffffff, 0x00000000 },
3661                 { 0x3c64, 0, 0xffffffff, 0x00000000 },
3662                 { 0x3c68, 0, 0xffffffff, 0x00000000 },
3663                 { 0x3c6c, 0, 0xffffffff, 0x00000000 },
3664                 { 0x3c70, 0, 0xffffffff, 0x00000000 },
3665                 { 0x3c74, 0, 0x0000003f, 0x00000000 },
3666                 { 0x3c78, 0, 0x00000000, 0x00000000 },
3667                 { 0x3c7c, 0, 0x00000000, 0x00000000 },
3668                 { 0x3c80, 0, 0x3fffffff, 0x00000000 },
3669                 { 0x3c84, 0, 0x0000003f, 0x00000000 },
3670                 { 0x3c88, 0, 0x00000000, 0xffffffff },
3671                 { 0x3c8c, 0, 0x00000000, 0xffffffff },
3672
3673                 { 0x4000, 0, 0x00000000, 0x00000001 },
3674                 { 0x4004, 0, 0x00000000, 0x00030000 },
3675                 { 0x4008, 0, 0x00000ff0, 0x00000000 },
3676                 { 0x400c, 0, 0xffffffff, 0x00000000 },
3677                 { 0x4088, 0, 0x00000000, 0x00070303 },
3678
3679                 { 0x4400, 0, 0x00000000, 0x00000001 },
3680                 { 0x4404, 0, 0x00000000, 0x00003f01 },
3681                 { 0x4408, 0, 0x7fff00ff, 0x00000000 },
3682                 { 0x440c, 0, 0xffffffff, 0x00000000 },
3683                 { 0x4410, 0, 0xffff,     0x0000 },
3684                 { 0x4414, 0, 0xffff,     0x0000 },
3685                 { 0x4418, 0, 0xffff,     0x0000 },
3686                 { 0x441c, 0, 0xffff,     0x0000 },
3687                 { 0x4428, 0, 0xffffffff, 0x00000000 },
3688                 { 0x442c, 0, 0xffffffff, 0x00000000 },
3689                 { 0x4430, 0, 0xffffffff, 0x00000000 },
3690                 { 0x4434, 0, 0xffffffff, 0x00000000 },
3691                 { 0x4438, 0, 0xffffffff, 0x00000000 },
3692                 { 0x443c, 0, 0xffffffff, 0x00000000 },
3693                 { 0x4440, 0, 0xffffffff, 0x00000000 },
3694                 { 0x4444, 0, 0xffffffff, 0x00000000 },
3695
3696                 { 0x4c00, 0, 0x00000000, 0x00000001 },
3697                 { 0x4c04, 0, 0x00000000, 0x0000003f },
3698                 { 0x4c08, 0, 0xffffffff, 0x00000000 },
3699                 { 0x4c0c, 0, 0x0007fc00, 0x00000000 },
3700                 { 0x4c10, 0, 0x80003fe0, 0x00000000 },
3701                 { 0x4c14, 0, 0xffffffff, 0x00000000 },
3702                 { 0x4c44, 0, 0x00000000, 0x9fff9fff },
3703                 { 0x4c48, 0, 0x00000000, 0xb3009fff },
3704                 { 0x4c4c, 0, 0x00000000, 0x77f33b30 },
3705                 { 0x4c50, 0, 0x00000000, 0xffffffff },
3706
3707                 { 0x5004, 0, 0x00000000, 0x0000007f },
3708                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3709                 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3710
3711                 { 0x5400, 0, 0x00000008, 0x00000001 },
3712                 { 0x5404, 0, 0x00000000, 0x0000003f },
3713                 { 0x5408, 0, 0x0000001f, 0x00000000 },
3714                 { 0x540c, 0, 0xffffffff, 0x00000000 },
3715                 { 0x5410, 0, 0xffffffff, 0x00000000 },
3716                 { 0x5414, 0, 0x0000ffff, 0x00000000 },
3717                 { 0x5418, 0, 0x0000ffff, 0x00000000 },
3718                 { 0x541c, 0, 0x0000ffff, 0x00000000 },
3719                 { 0x5420, 0, 0x0000ffff, 0x00000000 },
3720                 { 0x5428, 0, 0x000000ff, 0x00000000 },
3721                 { 0x542c, 0, 0xff00ffff, 0x00000000 },
3722                 { 0x5430, 0, 0x001fff80, 0x00000000 },
3723                 { 0x5438, 0, 0xffffffff, 0x00000000 },
3724                 { 0x543c, 0, 0xffffffff, 0x00000000 },
3725                 { 0x5440, 0, 0xf800f800, 0x07ff07ff },
3726
3727                 { 0x5c00, 0, 0x00000000, 0x00000001 },
3728                 { 0x5c04, 0, 0x00000000, 0x0003000f },
3729                 { 0x5c08, 0, 0x00000003, 0x00000000 },
3730                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3731                 { 0x5c10, 0, 0x00000000, 0xffffffff },
3732                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3733                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3734                 { 0x5c88, 0, 0x00000000, 0x00077373 },
3735                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3736
3737                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3738                 { 0x680c, 0, 0xffffffff, 0x00000000 },
3739                 { 0x6810, 0, 0xffffffff, 0x00000000 },
3740                 { 0x6814, 0, 0xffffffff, 0x00000000 },
3741                 { 0x6818, 0, 0xffffffff, 0x00000000 },
3742                 { 0x681c, 0, 0xffffffff, 0x00000000 },
3743                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3744                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3745                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3746                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3747                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3748                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3749                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3750                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3751                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3752                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3753                 { 0x684c, 0, 0xffffffff, 0x00000000 },
3754                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3755                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3756                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3757                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3758                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3759                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3760
3761                 { 0xffff, 0, 0x00000000, 0x00000000 },
3762         };
3763
3764         ret = 0;
3765         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3766                 u32 offset, rw_mask, ro_mask, save_val, val;
3767
3768                 offset = (u32) reg_tbl[i].offset;
3769                 rw_mask = reg_tbl[i].rw_mask;
3770                 ro_mask = reg_tbl[i].ro_mask;
3771
3772                 save_val = readl(bp->regview + offset);
3773
3774                 writel(0, bp->regview + offset);
3775
3776                 val = readl(bp->regview + offset);
3777                 if ((val & rw_mask) != 0) {
3778                         goto reg_test_err;
3779                 }
3780
3781                 if ((val & ro_mask) != (save_val & ro_mask)) {
3782                         goto reg_test_err;
3783                 }
3784
3785                 writel(0xffffffff, bp->regview + offset);
3786
3787                 val = readl(bp->regview + offset);
3788                 if ((val & rw_mask) != rw_mask) {
3789                         goto reg_test_err;
3790                 }
3791
3792                 if ((val & ro_mask) != (save_val & ro_mask)) {
3793                         goto reg_test_err;
3794                 }
3795
3796                 writel(save_val, bp->regview + offset);
3797                 continue;
3798
3799 reg_test_err:
3800                 writel(save_val, bp->regview + offset);
3801                 ret = -ENODEV;
3802                 break;
3803         }
3804         return ret;
3805 }
3806
3807 static int
3808 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3809 {
3810         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3811                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3812         int i;
3813
3814         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3815                 u32 offset;
3816
3817                 for (offset = 0; offset < size; offset += 4) {
3818
3819                         REG_WR_IND(bp, start + offset, test_pattern[i]);
3820
3821                         if (REG_RD_IND(bp, start + offset) !=
3822                                 test_pattern[i]) {
3823                                 return -ENODEV;
3824                         }
3825                 }
3826         }
3827         return 0;
3828 }
3829
3830 static int
3831 bnx2_test_memory(struct bnx2 *bp)
3832 {
3833         int ret = 0;
3834         int i;
3835         static struct {
3836                 u32   offset;
3837                 u32   len;
3838         } mem_tbl[] = {
3839                 { 0x60000,  0x4000 },
3840                 { 0xa0000,  0x3000 },
3841                 { 0xe0000,  0x4000 },
3842                 { 0x120000, 0x4000 },
3843                 { 0x1a0000, 0x4000 },
3844                 { 0x160000, 0x4000 },
3845                 { 0xffffffff, 0    },
3846         };
3847
3848         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3849                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3850                         mem_tbl[i].len)) != 0) {
3851                         return ret;
3852                 }
3853         }
3854         
3855         return ret;
3856 }
3857
3858 static int
3859 bnx2_test_loopback(struct bnx2 *bp)
3860 {
3861         unsigned int pkt_size, num_pkts, i;
3862         struct sk_buff *skb, *rx_skb;
3863         unsigned char *packet;
3864         u16 rx_start_idx, rx_idx, send_idx;
3865         u32 send_bseq, val;
3866         dma_addr_t map;
3867         struct tx_bd *txbd;
3868         struct sw_bd *rx_buf;
3869         struct l2_fhdr *rx_hdr;
3870         int ret = -ENODEV;
3871
3872         if (!netif_running(bp->dev))
3873                 return -ENODEV;
3874
3875         bp->loopback = MAC_LOOPBACK;
3876         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_DIAG);
3877         bnx2_set_mac_loopback(bp);
3878
3879         pkt_size = 1514;
3880         skb = dev_alloc_skb(pkt_size);
3881         packet = skb_put(skb, pkt_size);
3882         memcpy(packet, bp->mac_addr, 6);
3883         memset(packet + 6, 0x0, 8);
3884         for (i = 14; i < pkt_size; i++)
3885                 packet[i] = (unsigned char) (i & 0xff);
3886
3887         map = pci_map_single(bp->pdev, skb->data, pkt_size,
3888                 PCI_DMA_TODEVICE);
3889
3890         val = REG_RD(bp, BNX2_HC_COMMAND);
3891         REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3892         REG_RD(bp, BNX2_HC_COMMAND);
3893
3894         udelay(5);
3895         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3896
3897         send_idx = 0;
3898         send_bseq = 0;
3899         num_pkts = 0;
3900
3901         txbd = &bp->tx_desc_ring[send_idx];
3902
3903         txbd->tx_bd_haddr_hi = (u64) map >> 32;
3904         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3905         txbd->tx_bd_mss_nbytes = pkt_size;
3906         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3907
3908         num_pkts++;
3909         send_idx = NEXT_TX_BD(send_idx);
3910
3911         send_bseq += pkt_size;
3912
3913         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, send_idx);
3914         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, send_bseq);
3915
3916
3917         udelay(100);
3918
3919         val = REG_RD(bp, BNX2_HC_COMMAND);
3920         REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3921         REG_RD(bp, BNX2_HC_COMMAND);
3922
3923         udelay(5);
3924
3925         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3926         dev_kfree_skb_irq(skb);
3927
3928         if (bp->status_blk->status_tx_quick_consumer_index0 != send_idx) {
3929                 goto loopback_test_done;
3930         }
3931
3932         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3933         if (rx_idx != rx_start_idx + num_pkts) {
3934                 goto loopback_test_done;
3935         }
3936
3937         rx_buf = &bp->rx_buf_ring[rx_start_idx];
3938         rx_skb = rx_buf->skb;
3939
3940         rx_hdr = (struct l2_fhdr *) rx_skb->data;
3941         skb_reserve(rx_skb, bp->rx_offset);
3942
3943         pci_dma_sync_single_for_cpu(bp->pdev,
3944                 pci_unmap_addr(rx_buf, mapping),
3945                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3946
3947         if (rx_hdr->l2_fhdr_errors &
3948                 (L2_FHDR_ERRORS_BAD_CRC |
3949                 L2_FHDR_ERRORS_PHY_DECODE |
3950                 L2_FHDR_ERRORS_ALIGNMENT |
3951                 L2_FHDR_ERRORS_TOO_SHORT |
3952                 L2_FHDR_ERRORS_GIANT_FRAME)) {
3953
3954                 goto loopback_test_done;
3955         }
3956
3957         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3958                 goto loopback_test_done;
3959         }
3960
3961         for (i = 14; i < pkt_size; i++) {
3962                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3963                         goto loopback_test_done;
3964                 }
3965         }
3966
3967         ret = 0;
3968
3969 loopback_test_done:
3970         bp->loopback = 0;
3971         return ret;
3972 }
3973
3974 #define NVRAM_SIZE 0x200
3975 #define CRC32_RESIDUAL 0xdebb20e3
3976
3977 static int
3978 bnx2_test_nvram(struct bnx2 *bp)
3979 {
3980         u32 buf[NVRAM_SIZE / 4];
3981         u8 *data = (u8 *) buf;
3982         int rc = 0;
3983         u32 magic, csum;
3984
3985         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3986                 goto test_nvram_done;
3987
3988         magic = be32_to_cpu(buf[0]);
3989         if (magic != 0x669955aa) {
3990                 rc = -ENODEV;
3991                 goto test_nvram_done;
3992         }
3993
3994         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3995                 goto test_nvram_done;
3996
3997         csum = ether_crc_le(0x100, data);
3998         if (csum != CRC32_RESIDUAL) {
3999                 rc = -ENODEV;
4000                 goto test_nvram_done;
4001         }
4002
4003         csum = ether_crc_le(0x100, data + 0x100);
4004         if (csum != CRC32_RESIDUAL) {
4005                 rc = -ENODEV;
4006         }
4007
4008 test_nvram_done:
4009         return rc;
4010 }
4011
4012 static int
4013 bnx2_test_link(struct bnx2 *bp)
4014 {
4015         u32 bmsr;
4016
4017         spin_lock_bh(&bp->phy_lock);
4018         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4019         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4020         spin_unlock_bh(&bp->phy_lock);
4021                 
4022         if (bmsr & BMSR_LSTATUS) {
4023                 return 0;
4024         }
4025         return -ENODEV;
4026 }
4027
4028 static int
4029 bnx2_test_intr(struct bnx2 *bp)
4030 {
4031         int i;
4032         u32 val;
4033         u16 status_idx;
4034
4035         if (!netif_running(bp->dev))
4036                 return -ENODEV;
4037
4038         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4039
4040         /* This register is not touched during run-time. */
4041         val = REG_RD(bp, BNX2_HC_COMMAND);
4042         REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
4043         REG_RD(bp, BNX2_HC_COMMAND);
4044
4045         for (i = 0; i < 10; i++) {
4046                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4047                         status_idx) {
4048
4049                         break;
4050                 }
4051
4052                 msleep_interruptible(10);
4053         }
4054         if (i < 10)
4055                 return 0;
4056
4057         return -ENODEV;
4058 }
4059
4060 static void
4061 bnx2_timer(unsigned long data)
4062 {
4063         struct bnx2 *bp = (struct bnx2 *) data;
4064         u32 msg;
4065
4066         if (!netif_running(bp->dev))
4067                 return;
4068
4069         if (atomic_read(&bp->intr_sem) != 0)
4070                 goto bnx2_restart_timer;
4071
4072         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4073         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4074
4075         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4076             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
4077
4078                 spin_lock(&bp->phy_lock);
4079                 if (bp->serdes_an_pending) {
4080                         bp->serdes_an_pending--;
4081                 }
4082                 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4083                         u32 bmcr;
4084
4085                         bp->current_interval = bp->timer_interval;
4086
4087                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4088
4089                         if (bmcr & BMCR_ANENABLE) {
4090                                 u32 phy1, phy2;
4091
4092                                 bnx2_write_phy(bp, 0x1c, 0x7c00);
4093                                 bnx2_read_phy(bp, 0x1c, &phy1);
4094
4095                                 bnx2_write_phy(bp, 0x17, 0x0f01);
4096                                 bnx2_read_phy(bp, 0x15, &phy2);
4097                                 bnx2_write_phy(bp, 0x17, 0x0f01);
4098                                 bnx2_read_phy(bp, 0x15, &phy2);
4099
4100                                 if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4101                                         !(phy2 & 0x20)) {       /* no CONFIG */
4102
4103                                         bmcr &= ~BMCR_ANENABLE;
4104                                         bmcr |= BMCR_SPEED1000 |
4105                                                 BMCR_FULLDPLX;
4106                                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4107                                         bp->phy_flags |=
4108                                                 PHY_PARALLEL_DETECT_FLAG;
4109                                 }
4110                         }
4111                 }
4112                 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4113                         (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4114                         u32 phy2;
4115
4116                         bnx2_write_phy(bp, 0x17, 0x0f01);
4117                         bnx2_read_phy(bp, 0x15, &phy2);
4118                         if (phy2 & 0x20) {
4119                                 u32 bmcr;
4120
4121                                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4122                                 bmcr |= BMCR_ANENABLE;
4123                                 bnx2_write_phy(bp, MII_BMCR, bmcr);
4124
4125                                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4126
4127                         }
4128                 }
4129                 else
4130                         bp->current_interval = bp->timer_interval;
4131
4132                 spin_unlock(&bp->phy_lock);
4133         }
4134
4135 bnx2_restart_timer:
4136         mod_timer(&bp->timer, jiffies + bp->current_interval);
4137 }
4138
4139 /* Called with rtnl_lock */
4140 static int
4141 bnx2_open(struct net_device *dev)
4142 {
4143         struct bnx2 *bp = dev->priv;
4144         int rc;
4145
4146         bnx2_set_power_state(bp, PCI_D0);
4147         bnx2_disable_int(bp);
4148
4149         rc = bnx2_alloc_mem(bp);
4150         if (rc)
4151                 return rc;
4152
4153         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4154                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4155                 !disable_msi) {
4156
4157                 if (pci_enable_msi(bp->pdev) == 0) {
4158                         bp->flags |= USING_MSI_FLAG;
4159                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4160                                         dev);
4161                 }
4162                 else {
4163                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4164                                         SA_SHIRQ, dev->name, dev);
4165                 }
4166         }
4167         else {
4168                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4169                                 dev->name, dev);
4170         }
4171         if (rc) {
4172                 bnx2_free_mem(bp);
4173                 return rc;
4174         }
4175
4176         rc = bnx2_init_nic(bp);
4177
4178         if (rc) {
4179                 free_irq(bp->pdev->irq, dev);
4180                 if (bp->flags & USING_MSI_FLAG) {
4181                         pci_disable_msi(bp->pdev);
4182                         bp->flags &= ~USING_MSI_FLAG;
4183                 }
4184                 bnx2_free_skbs(bp);
4185                 bnx2_free_mem(bp);
4186                 return rc;
4187         }
4188         
4189         mod_timer(&bp->timer, jiffies + bp->current_interval);
4190
4191         atomic_set(&bp->intr_sem, 0);
4192
4193         bnx2_enable_int(bp);
4194
4195         if (bp->flags & USING_MSI_FLAG) {
4196                 /* Test MSI to make sure it is working
4197                  * If MSI test fails, go back to INTx mode
4198                  */
4199                 if (bnx2_test_intr(bp) != 0) {
4200                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4201                                " using MSI, switching to INTx mode. Please"
4202                                " report this failure to the PCI maintainer"
4203                                " and include system chipset information.\n",
4204                                bp->dev->name);
4205
4206                         bnx2_disable_int(bp);
4207                         free_irq(bp->pdev->irq, dev);
4208                         pci_disable_msi(bp->pdev);
4209                         bp->flags &= ~USING_MSI_FLAG;
4210
4211                         rc = bnx2_init_nic(bp);
4212
4213                         if (!rc) {
4214                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4215                                         SA_SHIRQ, dev->name, dev);
4216                         }
4217                         if (rc) {
4218                                 bnx2_free_skbs(bp);
4219                                 bnx2_free_mem(bp);
4220                                 del_timer_sync(&bp->timer);
4221                                 return rc;
4222                         }
4223                         bnx2_enable_int(bp);
4224                 }
4225         }
4226         if (bp->flags & USING_MSI_FLAG) {
4227                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4228         }
4229
4230         netif_start_queue(dev);
4231
4232         return 0;
4233 }
4234
4235 static void
4236 bnx2_reset_task(void *data)
4237 {
4238         struct bnx2 *bp = data;
4239
4240         if (!netif_running(bp->dev))
4241                 return;
4242
4243         bp->in_reset_task = 1;
4244         bnx2_netif_stop(bp);
4245
4246         bnx2_init_nic(bp);
4247
4248         atomic_set(&bp->intr_sem, 1);
4249         bnx2_netif_start(bp);
4250         bp->in_reset_task = 0;
4251 }
4252
4253 static void
4254 bnx2_tx_timeout(struct net_device *dev)
4255 {
4256         struct bnx2 *bp = dev->priv;
4257
4258         /* This allows the netif to be shutdown gracefully before resetting */
4259         schedule_work(&bp->reset_task);
4260 }
4261
4262 #ifdef BCM_VLAN
4263 /* Called with rtnl_lock */
4264 static void
4265 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4266 {
4267         struct bnx2 *bp = dev->priv;
4268
4269         bnx2_netif_stop(bp);
4270
4271         bp->vlgrp = vlgrp;
4272         bnx2_set_rx_mode(dev);
4273
4274         bnx2_netif_start(bp);
4275 }
4276
4277 /* Called with rtnl_lock */
4278 static void
4279 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4280 {
4281         struct bnx2 *bp = dev->priv;
4282
4283         bnx2_netif_stop(bp);
4284
4285         if (bp->vlgrp)
4286                 bp->vlgrp->vlan_devices[vid] = NULL;
4287         bnx2_set_rx_mode(dev);
4288
4289         bnx2_netif_start(bp);
4290 }
4291 #endif
4292
4293 /* Called with dev->xmit_lock.
4294  * hard_start_xmit is pseudo-lockless - a lock is only required when
4295  * the tx queue is full. This way, we get the benefit of lockless
4296  * operations most of the time without the complexities to handle
4297  * netif_stop_queue/wake_queue race conditions.
4298  */
4299 static int
4300 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4301 {
4302         struct bnx2 *bp = dev->priv;
4303         dma_addr_t mapping;
4304         struct tx_bd *txbd;
4305         struct sw_bd *tx_buf;
4306         u32 len, vlan_tag_flags, last_frag, mss;
4307         u16 prod, ring_prod;
4308         int i;
4309
4310         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4311                 netif_stop_queue(dev);
4312                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4313                         dev->name);
4314
4315                 return NETDEV_TX_BUSY;
4316         }
4317         len = skb_headlen(skb);
4318         prod = bp->tx_prod;
4319         ring_prod = TX_RING_IDX(prod);
4320
4321         vlan_tag_flags = 0;
4322         if (skb->ip_summed == CHECKSUM_HW) {
4323                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4324         }
4325
4326         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4327                 vlan_tag_flags |=
4328                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4329         }
4330 #ifdef BCM_TSO 
4331         if ((mss = skb_shinfo(skb)->tso_size) &&
4332                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4333                 u32 tcp_opt_len, ip_tcp_len;
4334
4335                 if (skb_header_cloned(skb) &&
4336                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4337                         dev_kfree_skb(skb);
4338                         return NETDEV_TX_OK;
4339                 }
4340
4341                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4342                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4343
4344                 tcp_opt_len = 0;
4345                 if (skb->h.th->doff > 5) {
4346                         tcp_opt_len = (skb->h.th->doff - 5) << 2;
4347                 }
4348                 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4349
4350                 skb->nh.iph->check = 0;
4351                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4352                 skb->h.th->check =
4353                         ~csum_tcpudp_magic(skb->nh.iph->saddr,
4354                                             skb->nh.iph->daddr,
4355                                             0, IPPROTO_TCP, 0);
4356
4357                 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4358                         vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4359                                 (tcp_opt_len >> 2)) << 8;
4360                 }
4361         }
4362         else
4363 #endif
4364         {
4365                 mss = 0;
4366         }
4367
4368         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4369         
4370         tx_buf = &bp->tx_buf_ring[ring_prod];
4371         tx_buf->skb = skb;
4372         pci_unmap_addr_set(tx_buf, mapping, mapping);
4373
4374         txbd = &bp->tx_desc_ring[ring_prod];
4375
4376         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4377         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4378         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4379         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4380
4381         last_frag = skb_shinfo(skb)->nr_frags;
4382
4383         for (i = 0; i < last_frag; i++) {
4384                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4385
4386                 prod = NEXT_TX_BD(prod);
4387                 ring_prod = TX_RING_IDX(prod);
4388                 txbd = &bp->tx_desc_ring[ring_prod];
4389
4390                 len = frag->size;
4391                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4392                         len, PCI_DMA_TODEVICE);
4393                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4394                                 mapping, mapping);
4395
4396                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4397                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4398                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4399                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4400
4401         }
4402         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4403
4404         prod = NEXT_TX_BD(prod);
4405         bp->tx_prod_bseq += skb->len;
4406
4407         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4408         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4409
4410         mmiowb();
4411
4412         bp->tx_prod = prod;
4413         dev->trans_start = jiffies;
4414
4415         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4416                 spin_lock(&bp->tx_lock);
4417                 netif_stop_queue(dev);
4418                 
4419                 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4420                         netif_wake_queue(dev);
4421                 spin_unlock(&bp->tx_lock);
4422         }
4423
4424         return NETDEV_TX_OK;
4425 }
4426
4427 /* Called with rtnl_lock */
4428 static int
4429 bnx2_close(struct net_device *dev)
4430 {
4431         struct bnx2 *bp = dev->priv;
4432         u32 reset_code;
4433
4434         /* Calling flush_scheduled_work() may deadlock because
4435          * linkwatch_event() may be on the workqueue and it will try to get
4436          * the rtnl_lock which we are holding.
4437          */
4438         while (bp->in_reset_task)
4439                 msleep(1);
4440
4441         bnx2_netif_stop(bp);
4442         del_timer_sync(&bp->timer);
4443         if (bp->wol)
4444                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4445         else
4446                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4447         bnx2_reset_chip(bp, reset_code);
4448         free_irq(bp->pdev->irq, dev);
4449         if (bp->flags & USING_MSI_FLAG) {
4450                 pci_disable_msi(bp->pdev);
4451                 bp->flags &= ~USING_MSI_FLAG;
4452         }
4453         bnx2_free_skbs(bp);
4454         bnx2_free_mem(bp);
4455         bp->link_up = 0;
4456         netif_carrier_off(bp->dev);
4457         bnx2_set_power_state(bp, PCI_D3hot);
4458         return 0;
4459 }
4460
4461 #define GET_NET_STATS64(ctr)                                    \
4462         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4463         (unsigned long) (ctr##_lo)
4464
4465 #define GET_NET_STATS32(ctr)            \
4466         (ctr##_lo)
4467
4468 #if (BITS_PER_LONG == 64)
4469 #define GET_NET_STATS   GET_NET_STATS64
4470 #else
4471 #define GET_NET_STATS   GET_NET_STATS32
4472 #endif
4473
4474 static struct net_device_stats *
4475 bnx2_get_stats(struct net_device *dev)
4476 {
4477         struct bnx2 *bp = dev->priv;
4478         struct statistics_block *stats_blk = bp->stats_blk;
4479         struct net_device_stats *net_stats = &bp->net_stats;
4480
4481         if (bp->stats_blk == NULL) {
4482                 return net_stats;
4483         }
4484         net_stats->rx_packets =
4485                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4486                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4487                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4488
4489         net_stats->tx_packets =
4490                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4491                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4492                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4493
4494         net_stats->rx_bytes =
4495                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4496
4497         net_stats->tx_bytes =
4498                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4499
4500         net_stats->multicast = 
4501                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4502
4503         net_stats->collisions = 
4504                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4505
4506         net_stats->rx_length_errors = 
4507                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4508                 stats_blk->stat_EtherStatsOverrsizePkts);
4509
4510         net_stats->rx_over_errors = 
4511                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4512
4513         net_stats->rx_frame_errors = 
4514                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4515
4516         net_stats->rx_crc_errors = 
4517                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4518
4519         net_stats->rx_errors = net_stats->rx_length_errors +
4520                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4521                 net_stats->rx_crc_errors;
4522
4523         net_stats->tx_aborted_errors =
4524                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4525                 stats_blk->stat_Dot3StatsLateCollisions);
4526
4527         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4528             (CHIP_ID(bp) == CHIP_ID_5708_A0))
4529                 net_stats->tx_carrier_errors = 0;
4530         else {
4531                 net_stats->tx_carrier_errors =
4532                         (unsigned long)
4533                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4534         }
4535
4536         net_stats->tx_errors =
4537                 (unsigned long) 
4538                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4539                 +
4540                 net_stats->tx_aborted_errors +
4541                 net_stats->tx_carrier_errors;
4542
4543         return net_stats;
4544 }
4545
4546 /* All ethtool functions called with rtnl_lock */
4547
4548 static int
4549 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4550 {
4551         struct bnx2 *bp = dev->priv;
4552
4553         cmd->supported = SUPPORTED_Autoneg;
4554         if (bp->phy_flags & PHY_SERDES_FLAG) {
4555                 cmd->supported |= SUPPORTED_1000baseT_Full |
4556                         SUPPORTED_FIBRE;
4557
4558                 cmd->port = PORT_FIBRE;
4559         }
4560         else {
4561                 cmd->supported |= SUPPORTED_10baseT_Half |
4562                         SUPPORTED_10baseT_Full |
4563                         SUPPORTED_100baseT_Half |
4564                         SUPPORTED_100baseT_Full |
4565                         SUPPORTED_1000baseT_Full |
4566                         SUPPORTED_TP;
4567
4568                 cmd->port = PORT_TP;
4569         }
4570
4571         cmd->advertising = bp->advertising;
4572
4573         if (bp->autoneg & AUTONEG_SPEED) {
4574                 cmd->autoneg = AUTONEG_ENABLE;
4575         }
4576         else {
4577                 cmd->autoneg = AUTONEG_DISABLE;
4578         }
4579
4580         if (netif_carrier_ok(dev)) {
4581                 cmd->speed = bp->line_speed;
4582                 cmd->duplex = bp->duplex;
4583         }
4584         else {
4585                 cmd->speed = -1;
4586                 cmd->duplex = -1;
4587         }
4588
4589         cmd->transceiver = XCVR_INTERNAL;
4590         cmd->phy_address = bp->phy_addr;
4591
4592         return 0;
4593 }
4594   
4595 static int
4596 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4597 {
4598         struct bnx2 *bp = dev->priv;
4599         u8 autoneg = bp->autoneg;
4600         u8 req_duplex = bp->req_duplex;
4601         u16 req_line_speed = bp->req_line_speed;
4602         u32 advertising = bp->advertising;
4603
4604         if (cmd->autoneg == AUTONEG_ENABLE) {
4605                 autoneg |= AUTONEG_SPEED;
4606
4607                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED; 
4608
4609                 /* allow advertising 1 speed */
4610                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4611                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
4612                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
4613                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
4614
4615                         if (bp->phy_flags & PHY_SERDES_FLAG)
4616                                 return -EINVAL;
4617
4618                         advertising = cmd->advertising;
4619
4620                 }
4621                 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4622                         advertising = cmd->advertising;
4623                 }
4624                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4625                         return -EINVAL;
4626                 }
4627                 else {
4628                         if (bp->phy_flags & PHY_SERDES_FLAG) {
4629                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4630                         }
4631                         else {
4632                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
4633                         }
4634                 }
4635                 advertising |= ADVERTISED_Autoneg;
4636         }
4637         else {
4638                 if (bp->phy_flags & PHY_SERDES_FLAG) {
4639                         if ((cmd->speed != SPEED_1000) ||
4640                                 (cmd->duplex != DUPLEX_FULL)) {
4641                                 return -EINVAL;
4642                         }
4643                 }
4644                 else if (cmd->speed == SPEED_1000) {
4645                         return -EINVAL;
4646                 }
4647                 autoneg &= ~AUTONEG_SPEED;
4648                 req_line_speed = cmd->speed;
4649                 req_duplex = cmd->duplex;
4650                 advertising = 0;
4651         }
4652
4653         bp->autoneg = autoneg;
4654         bp->advertising = advertising;
4655         bp->req_line_speed = req_line_speed;
4656         bp->req_duplex = req_duplex;
4657
4658         spin_lock_bh(&bp->phy_lock);
4659
4660         bnx2_setup_phy(bp);
4661
4662         spin_unlock_bh(&bp->phy_lock);
4663
4664         return 0;
4665 }
4666
4667 static void
4668 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4669 {
4670         struct bnx2 *bp = dev->priv;
4671
4672         strcpy(info->driver, DRV_MODULE_NAME);
4673         strcpy(info->version, DRV_MODULE_VERSION);
4674         strcpy(info->bus_info, pci_name(bp->pdev));
4675         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4676         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4677         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4678         info->fw_version[6] = (bp->fw_ver & 0xff) + '0';
4679         info->fw_version[1] = info->fw_version[3] = info->fw_version[5] = '.';
4680         info->fw_version[7] = 0;
4681 }
4682
4683 static void
4684 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4685 {
4686         struct bnx2 *bp = dev->priv;
4687
4688         if (bp->flags & NO_WOL_FLAG) {
4689                 wol->supported = 0;
4690                 wol->wolopts = 0;
4691         }
4692         else {
4693                 wol->supported = WAKE_MAGIC;
4694                 if (bp->wol)
4695                         wol->wolopts = WAKE_MAGIC;
4696                 else
4697                         wol->wolopts = 0;
4698         }
4699         memset(&wol->sopass, 0, sizeof(wol->sopass));
4700 }
4701
4702 static int
4703 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4704 {
4705         struct bnx2 *bp = dev->priv;
4706
4707         if (wol->wolopts & ~WAKE_MAGIC)
4708                 return -EINVAL;
4709
4710         if (wol->wolopts & WAKE_MAGIC) {
4711                 if (bp->flags & NO_WOL_FLAG)
4712                         return -EINVAL;
4713
4714                 bp->wol = 1;
4715         }
4716         else {
4717                 bp->wol = 0;
4718         }
4719         return 0;
4720 }
4721
4722 static int
4723 bnx2_nway_reset(struct net_device *dev)
4724 {
4725         struct bnx2 *bp = dev->priv;
4726         u32 bmcr;
4727
4728         if (!(bp->autoneg & AUTONEG_SPEED)) {
4729                 return -EINVAL;
4730         }
4731
4732         spin_lock_bh(&bp->phy_lock);
4733
4734         /* Force a link down visible on the other side */
4735         if (bp->phy_flags & PHY_SERDES_FLAG) {
4736                 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4737                 spin_unlock_bh(&bp->phy_lock);
4738
4739                 msleep(20);
4740
4741                 spin_lock_bh(&bp->phy_lock);
4742                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4743                         bp->current_interval = SERDES_AN_TIMEOUT;
4744                         bp->serdes_an_pending = 1;
4745                         mod_timer(&bp->timer, jiffies + bp->current_interval);
4746                 }
4747         }
4748
4749         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4750         bmcr &= ~BMCR_LOOPBACK;
4751         bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4752
4753         spin_unlock_bh(&bp->phy_lock);
4754
4755         return 0;
4756 }
4757
4758 static int
4759 bnx2_get_eeprom_len(struct net_device *dev)
4760 {
4761         struct bnx2 *bp = dev->priv;
4762
4763         if (bp->flash_info == 0)
4764                 return 0;
4765
4766         return (int) bp->flash_info->total_size;
4767 }
4768
4769 static int
4770 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4771                 u8 *eebuf)
4772 {
4773         struct bnx2 *bp = dev->priv;
4774         int rc;
4775
4776         if (eeprom->offset > bp->flash_info->total_size)
4777                 return -EINVAL;
4778
4779         if ((eeprom->offset + eeprom->len) > bp->flash_info->total_size)
4780                 eeprom->len = bp->flash_info->total_size - eeprom->offset;
4781
4782         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4783
4784         return rc;
4785 }
4786
4787 static int
4788 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4789                 u8 *eebuf)
4790 {
4791         struct bnx2 *bp = dev->priv;
4792         int rc;
4793
4794         if (eeprom->offset > bp->flash_info->total_size)
4795                 return -EINVAL;
4796
4797         if ((eeprom->offset + eeprom->len) > bp->flash_info->total_size)
4798                 eeprom->len = bp->flash_info->total_size - eeprom->offset;
4799
4800         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4801
4802         return rc;
4803 }
4804
4805 static int
4806 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4807 {
4808         struct bnx2 *bp = dev->priv;
4809
4810         memset(coal, 0, sizeof(struct ethtool_coalesce));
4811
4812         coal->rx_coalesce_usecs = bp->rx_ticks;
4813         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4814         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4815         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4816
4817         coal->tx_coalesce_usecs = bp->tx_ticks;
4818         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4819         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4820         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4821
4822         coal->stats_block_coalesce_usecs = bp->stats_ticks;
4823
4824         return 0;
4825 }
4826
4827 static int
4828 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4829 {
4830         struct bnx2 *bp = dev->priv;
4831
4832         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4833         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4834
4835         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; 
4836         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4837
4838         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4839         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4840
4841         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4842         if (bp->rx_quick_cons_trip_int > 0xff)
4843                 bp->rx_quick_cons_trip_int = 0xff;
4844
4845         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4846         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4847
4848         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4849         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4850
4851         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4852         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4853
4854         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4855         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4856                 0xff;
4857
4858         bp->stats_ticks = coal->stats_block_coalesce_usecs;
4859         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4860         bp->stats_ticks &= 0xffff00;
4861
4862         if (netif_running(bp->dev)) {
4863                 bnx2_netif_stop(bp);
4864                 bnx2_init_nic(bp);
4865                 bnx2_netif_start(bp);
4866         }
4867
4868         return 0;
4869 }
4870
4871 static void
4872 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4873 {
4874         struct bnx2 *bp = dev->priv;
4875
4876         ering->rx_max_pending = MAX_RX_DESC_CNT;
4877         ering->rx_mini_max_pending = 0;
4878         ering->rx_jumbo_max_pending = 0;
4879
4880         ering->rx_pending = bp->rx_ring_size;
4881         ering->rx_mini_pending = 0;
4882         ering->rx_jumbo_pending = 0;
4883
4884         ering->tx_max_pending = MAX_TX_DESC_CNT;
4885         ering->tx_pending = bp->tx_ring_size;
4886 }
4887
4888 static int
4889 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4890 {
4891         struct bnx2 *bp = dev->priv;
4892
4893         if ((ering->rx_pending > MAX_RX_DESC_CNT) ||
4894                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4895                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4896
4897                 return -EINVAL;
4898         }
4899         bp->rx_ring_size = ering->rx_pending;
4900         bp->tx_ring_size = ering->tx_pending;
4901
4902         if (netif_running(bp->dev)) {
4903                 bnx2_netif_stop(bp);
4904                 bnx2_init_nic(bp);
4905                 bnx2_netif_start(bp);
4906         }
4907
4908         return 0;
4909 }
4910
4911 static void
4912 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4913 {
4914         struct bnx2 *bp = dev->priv;
4915
4916         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4917         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4918         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4919 }
4920
4921 static int
4922 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4923 {
4924         struct bnx2 *bp = dev->priv;
4925
4926         bp->req_flow_ctrl = 0;
4927         if (epause->rx_pause)
4928                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4929         if (epause->tx_pause)
4930                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4931
4932         if (epause->autoneg) {
4933                 bp->autoneg |= AUTONEG_FLOW_CTRL;
4934         }
4935         else {
4936                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4937         }
4938
4939         spin_lock_bh(&bp->phy_lock);
4940
4941         bnx2_setup_phy(bp);
4942
4943         spin_unlock_bh(&bp->phy_lock);
4944
4945         return 0;
4946 }
4947
4948 static u32
4949 bnx2_get_rx_csum(struct net_device *dev)
4950 {
4951         struct bnx2 *bp = dev->priv;
4952
4953         return bp->rx_csum;
4954 }
4955
4956 static int
4957 bnx2_set_rx_csum(struct net_device *dev, u32 data)
4958 {
4959         struct bnx2 *bp = dev->priv;
4960
4961         bp->rx_csum = data;
4962         return 0;
4963 }
4964
4965 #define BNX2_NUM_STATS 45
4966
4967 static struct {
4968         char string[ETH_GSTRING_LEN];
4969 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4970         { "rx_bytes" },
4971         { "rx_error_bytes" },
4972         { "tx_bytes" },
4973         { "tx_error_bytes" },
4974         { "rx_ucast_packets" },
4975         { "rx_mcast_packets" },
4976         { "rx_bcast_packets" },
4977         { "tx_ucast_packets" },
4978         { "tx_mcast_packets" },
4979         { "tx_bcast_packets" },
4980         { "tx_mac_errors" },
4981         { "tx_carrier_errors" },
4982         { "rx_crc_errors" },
4983         { "rx_align_errors" },
4984         { "tx_single_collisions" },
4985         { "tx_multi_collisions" },
4986         { "tx_deferred" },
4987         { "tx_excess_collisions" },
4988         { "tx_late_collisions" },
4989         { "tx_total_collisions" },
4990         { "rx_fragments" },
4991         { "rx_jabbers" },
4992         { "rx_undersize_packets" },
4993         { "rx_oversize_packets" },
4994         { "rx_64_byte_packets" },
4995         { "rx_65_to_127_byte_packets" },
4996         { "rx_128_to_255_byte_packets" },
4997         { "rx_256_to_511_byte_packets" },
4998         { "rx_512_to_1023_byte_packets" },
4999         { "rx_1024_to_1522_byte_packets" },
5000         { "rx_1523_to_9022_byte_packets" },
5001         { "tx_64_byte_packets" },
5002         { "tx_65_to_127_byte_packets" },
5003         { "tx_128_to_255_byte_packets" },
5004         { "tx_256_to_511_byte_packets" },
5005         { "tx_512_to_1023_byte_packets" },
5006         { "tx_1024_to_1522_byte_packets" },
5007         { "tx_1523_to_9022_byte_packets" },
5008         { "rx_xon_frames" },
5009         { "rx_xoff_frames" },
5010         { "tx_xon_frames" },
5011         { "tx_xoff_frames" },
5012         { "rx_mac_ctrl_frames" },
5013         { "rx_filtered_packets" },
5014         { "rx_discards" },
5015 };
5016
5017 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5018
5019 static unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5020     STATS_OFFSET32(stat_IfHCInOctets_hi),
5021     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5022     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5023     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5024     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5025     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5026     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5027     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5028     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5029     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5030     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5031     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),                 
5032     STATS_OFFSET32(stat_Dot3StatsFCSErrors),                          
5033     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),                    
5034     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),              
5035     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),            
5036     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),              
5037     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),                
5038     STATS_OFFSET32(stat_Dot3StatsLateCollisions),                     
5039     STATS_OFFSET32(stat_EtherStatsCollisions),                        
5040     STATS_OFFSET32(stat_EtherStatsFragments),                         
5041     STATS_OFFSET32(stat_EtherStatsJabbers),                           
5042     STATS_OFFSET32(stat_EtherStatsUndersizePkts),                     
5043     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),                     
5044     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),                    
5045     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),         
5046     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),        
5047     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),        
5048     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),       
5049     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),      
5050     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),      
5051     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),                    
5052     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),         
5053     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),        
5054     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),        
5055     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),       
5056     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),      
5057     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),      
5058     STATS_OFFSET32(stat_XonPauseFramesReceived),                      
5059     STATS_OFFSET32(stat_XoffPauseFramesReceived),                     
5060     STATS_OFFSET32(stat_OutXonSent),                                  
5061     STATS_OFFSET32(stat_OutXoffSent),                                 
5062     STATS_OFFSET32(stat_MacControlFramesReceived),                    
5063     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),                  
5064     STATS_OFFSET32(stat_IfInMBUFDiscards),                            
5065 };
5066
5067 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5068  * skipped because of errata.
5069  */               
5070 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5071         8,0,8,8,8,8,8,8,8,8,
5072         4,0,4,4,4,4,4,4,4,4,
5073         4,4,4,4,4,4,4,4,4,4,
5074         4,4,4,4,4,4,4,4,4,4,
5075         4,4,4,4,4,
5076 };
5077
5078 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5079         8,0,8,8,8,8,8,8,8,8,
5080         4,4,4,4,4,4,4,4,4,4,
5081         4,4,4,4,4,4,4,4,4,4,
5082         4,4,4,4,4,4,4,4,4,4,
5083         4,4,4,4,4,
5084 };
5085
5086 #define BNX2_NUM_TESTS 6
5087
5088 static struct {
5089         char string[ETH_GSTRING_LEN];
5090 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5091         { "register_test (offline)" },
5092         { "memory_test (offline)" },
5093         { "loopback_test (offline)" },
5094         { "nvram_test (online)" },
5095         { "interrupt_test (online)" },
5096         { "link_test (online)" },
5097 };
5098
5099 static int
5100 bnx2_self_test_count(struct net_device *dev)
5101 {
5102         return BNX2_NUM_TESTS;
5103 }
5104
5105 static void
5106 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5107 {
5108         struct bnx2 *bp = dev->priv;
5109
5110         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5111         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5112                 bnx2_netif_stop(bp);
5113                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5114                 bnx2_free_skbs(bp);
5115
5116                 if (bnx2_test_registers(bp) != 0) {
5117                         buf[0] = 1;
5118                         etest->flags |= ETH_TEST_FL_FAILED;
5119                 }
5120                 if (bnx2_test_memory(bp) != 0) {
5121                         buf[1] = 1;
5122                         etest->flags |= ETH_TEST_FL_FAILED;
5123                 }
5124                 if (bnx2_test_loopback(bp) != 0) {
5125                         buf[2] = 1;
5126                         etest->flags |= ETH_TEST_FL_FAILED;
5127                 }
5128
5129                 if (!netif_running(bp->dev)) {
5130                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5131                 }
5132                 else {
5133                         bnx2_init_nic(bp);
5134                         bnx2_netif_start(bp);
5135                 }
5136
5137                 /* wait for link up */
5138                 msleep_interruptible(3000);
5139                 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5140                         msleep_interruptible(4000);
5141         }
5142
5143         if (bnx2_test_nvram(bp) != 0) {
5144                 buf[3] = 1;
5145                 etest->flags |= ETH_TEST_FL_FAILED;
5146         }
5147         if (bnx2_test_intr(bp) != 0) {
5148                 buf[4] = 1;
5149                 etest->flags |= ETH_TEST_FL_FAILED;
5150         }
5151
5152         if (bnx2_test_link(bp) != 0) {
5153                 buf[5] = 1;
5154                 etest->flags |= ETH_TEST_FL_FAILED;
5155
5156         }
5157 }
5158
5159 static void
5160 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5161 {
5162         switch (stringset) {
5163         case ETH_SS_STATS:
5164                 memcpy(buf, bnx2_stats_str_arr,
5165                         sizeof(bnx2_stats_str_arr));
5166                 break;
5167         case ETH_SS_TEST:
5168                 memcpy(buf, bnx2_tests_str_arr,
5169                         sizeof(bnx2_tests_str_arr));
5170                 break;
5171         }
5172 }
5173
5174 static int
5175 bnx2_get_stats_count(struct net_device *dev)
5176 {
5177         return BNX2_NUM_STATS;
5178 }
5179
5180 static void
5181 bnx2_get_ethtool_stats(struct net_device *dev,
5182                 struct ethtool_stats *stats, u64 *buf)
5183 {
5184         struct bnx2 *bp = dev->priv;
5185         int i;
5186         u32 *hw_stats = (u32 *) bp->stats_blk;
5187         u8 *stats_len_arr = NULL;
5188
5189         if (hw_stats == NULL) {
5190                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5191                 return;
5192         }
5193
5194         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5195             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5196             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5197             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5198                 stats_len_arr = bnx2_5706_stats_len_arr;
5199         else
5200                 stats_len_arr = bnx2_5708_stats_len_arr;
5201
5202         for (i = 0; i < BNX2_NUM_STATS; i++) {
5203                 if (stats_len_arr[i] == 0) {
5204                         /* skip this counter */
5205                         buf[i] = 0;
5206                         continue;
5207                 }
5208                 if (stats_len_arr[i] == 4) {
5209                         /* 4-byte counter */
5210                         buf[i] = (u64)
5211                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5212                         continue;
5213                 }
5214                 /* 8-byte counter */
5215                 buf[i] = (((u64) *(hw_stats +
5216                                         bnx2_stats_offset_arr[i])) << 32) +
5217                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5218         }
5219 }
5220
5221 static int
5222 bnx2_phys_id(struct net_device *dev, u32 data)
5223 {
5224         struct bnx2 *bp = dev->priv;
5225         int i;
5226         u32 save;
5227
5228         if (data == 0)
5229                 data = 2;
5230
5231         save = REG_RD(bp, BNX2_MISC_CFG);
5232         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5233
5234         for (i = 0; i < (data * 2); i++) {
5235                 if ((i % 2) == 0) {
5236                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5237                 }
5238                 else {
5239                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5240                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5241                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5242                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5243                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5244                                 BNX2_EMAC_LED_TRAFFIC);
5245                 }
5246                 msleep_interruptible(500);
5247                 if (signal_pending(current))
5248                         break;
5249         }
5250         REG_WR(bp, BNX2_EMAC_LED, 0);
5251         REG_WR(bp, BNX2_MISC_CFG, save);
5252         return 0;
5253 }
5254
5255 static struct ethtool_ops bnx2_ethtool_ops = {
5256         .get_settings           = bnx2_get_settings,
5257         .set_settings           = bnx2_set_settings,
5258         .get_drvinfo            = bnx2_get_drvinfo,
5259         .get_wol                = bnx2_get_wol,
5260         .set_wol                = bnx2_set_wol,
5261         .nway_reset             = bnx2_nway_reset,
5262         .get_link               = ethtool_op_get_link,
5263         .get_eeprom_len         = bnx2_get_eeprom_len,
5264         .get_eeprom             = bnx2_get_eeprom,
5265         .set_eeprom             = bnx2_set_eeprom,
5266         .get_coalesce           = bnx2_get_coalesce,
5267         .set_coalesce           = bnx2_set_coalesce,
5268         .get_ringparam          = bnx2_get_ringparam,
5269         .set_ringparam          = bnx2_set_ringparam,
5270         .get_pauseparam         = bnx2_get_pauseparam,
5271         .set_pauseparam         = bnx2_set_pauseparam,
5272         .get_rx_csum            = bnx2_get_rx_csum,
5273         .set_rx_csum            = bnx2_set_rx_csum,
5274         .get_tx_csum            = ethtool_op_get_tx_csum,
5275         .set_tx_csum            = ethtool_op_set_tx_csum,
5276         .get_sg                 = ethtool_op_get_sg,
5277         .set_sg                 = ethtool_op_set_sg,
5278 #ifdef BCM_TSO
5279         .get_tso                = ethtool_op_get_tso,
5280         .set_tso                = ethtool_op_set_tso,
5281 #endif
5282         .self_test_count        = bnx2_self_test_count,
5283         .self_test              = bnx2_self_test,
5284         .get_strings            = bnx2_get_strings,
5285         .phys_id                = bnx2_phys_id,
5286         .get_stats_count        = bnx2_get_stats_count,
5287         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5288         .get_perm_addr          = ethtool_op_get_perm_addr,
5289 };
5290
5291 /* Called with rtnl_lock */
5292 static int
5293 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5294 {
5295         struct mii_ioctl_data *data = if_mii(ifr);
5296         struct bnx2 *bp = dev->priv;
5297         int err;
5298
5299         switch(cmd) {
5300         case SIOCGMIIPHY:
5301                 data->phy_id = bp->phy_addr;
5302
5303                 /* fallthru */
5304         case SIOCGMIIREG: {
5305                 u32 mii_regval;
5306
5307                 spin_lock_bh(&bp->phy_lock);
5308                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5309                 spin_unlock_bh(&bp->phy_lock);
5310
5311                 data->val_out = mii_regval;
5312
5313                 return err;
5314         }
5315
5316         case SIOCSMIIREG:
5317                 if (!capable(CAP_NET_ADMIN))
5318                         return -EPERM;
5319
5320                 spin_lock_bh(&bp->phy_lock);
5321                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5322                 spin_unlock_bh(&bp->phy_lock);
5323
5324                 return err;
5325
5326         default:
5327                 /* do nothing */
5328                 break;
5329         }
5330         return -EOPNOTSUPP;
5331 }
5332
5333 /* Called with rtnl_lock */
5334 static int
5335 bnx2_change_mac_addr(struct net_device *dev, void *p)
5336 {
5337         struct sockaddr *addr = p;
5338         struct bnx2 *bp = dev->priv;
5339
5340         if (!is_valid_ether_addr(addr->sa_data))
5341                 return -EINVAL;
5342
5343         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5344         if (netif_running(dev))
5345                 bnx2_set_mac_addr(bp);
5346
5347         return 0;
5348 }
5349
5350 /* Called with rtnl_lock */
5351 static int
5352 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5353 {
5354         struct bnx2 *bp = dev->priv;
5355
5356         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5357                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5358                 return -EINVAL;
5359
5360         dev->mtu = new_mtu;
5361         if (netif_running(dev)) {
5362                 bnx2_netif_stop(bp);
5363
5364                 bnx2_init_nic(bp);
5365
5366                 bnx2_netif_start(bp);
5367         }
5368         return 0;
5369 }
5370
5371 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5372 static void
5373 poll_bnx2(struct net_device *dev)
5374 {
5375         struct bnx2 *bp = dev->priv;
5376
5377         disable_irq(bp->pdev->irq);
5378         bnx2_interrupt(bp->pdev->irq, dev, NULL);
5379         enable_irq(bp->pdev->irq);
5380 }
5381 #endif
5382
5383 static int __devinit
5384 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5385 {
5386         struct bnx2 *bp;
5387         unsigned long mem_len;
5388         int rc;
5389         u32 reg;
5390
5391         SET_MODULE_OWNER(dev);
5392         SET_NETDEV_DEV(dev, &pdev->dev);
5393         bp = dev->priv;
5394
5395         bp->flags = 0;
5396         bp->phy_flags = 0;
5397
5398         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5399         rc = pci_enable_device(pdev);
5400         if (rc) {
5401                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5402                 goto err_out;
5403         }
5404
5405         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5406                 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5407                        "aborting.\n");
5408                 rc = -ENODEV;
5409                 goto err_out_disable;
5410         }
5411
5412         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5413         if (rc) {
5414                 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5415                 goto err_out_disable;
5416         }
5417
5418         pci_set_master(pdev);
5419
5420         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5421         if (bp->pm_cap == 0) {
5422                 printk(KERN_ERR PFX "Cannot find power management capability, "
5423                                "aborting.\n");
5424                 rc = -EIO;
5425                 goto err_out_release;
5426         }
5427
5428         bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5429         if (bp->pcix_cap == 0) {
5430                 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5431                 rc = -EIO;
5432                 goto err_out_release;
5433         }
5434
5435         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5436                 bp->flags |= USING_DAC_FLAG;
5437                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5438                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5439                                "failed, aborting.\n");
5440                         rc = -EIO;
5441                         goto err_out_release;
5442                 }
5443         }
5444         else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5445                 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5446                 rc = -EIO;
5447                 goto err_out_release;
5448         }
5449
5450         bp->dev = dev;
5451         bp->pdev = pdev;
5452
5453         spin_lock_init(&bp->phy_lock);
5454         spin_lock_init(&bp->tx_lock);
5455         INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5456
5457         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5458         mem_len = MB_GET_CID_ADDR(17);
5459         dev->mem_end = dev->mem_start + mem_len;
5460         dev->irq = pdev->irq;
5461
5462         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5463
5464         if (!bp->regview) {
5465                 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5466                 rc = -ENOMEM;
5467                 goto err_out_release;
5468         }
5469
5470         /* Configure byte swap and enable write to the reg_window registers.
5471          * Rely on CPU to do target byte swapping on big endian systems
5472          * The chip's target access swapping will not swap all accesses
5473          */
5474         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5475                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5476                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5477
5478         bnx2_set_power_state(bp, PCI_D0);
5479
5480         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5481
5482         /* Get bus information. */
5483         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5484         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5485                 u32 clkreg;
5486
5487                 bp->flags |= PCIX_FLAG;
5488
5489                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5490                 
5491                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5492                 switch (clkreg) {
5493                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5494                         bp->bus_speed_mhz = 133;
5495                         break;
5496
5497                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5498                         bp->bus_speed_mhz = 100;
5499                         break;
5500
5501                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5502                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5503                         bp->bus_speed_mhz = 66;
5504                         break;
5505
5506                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5507                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5508                         bp->bus_speed_mhz = 50;
5509                         break;
5510
5511                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5512                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5513                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5514                         bp->bus_speed_mhz = 33;
5515                         break;
5516                 }
5517         }
5518         else {
5519                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5520                         bp->bus_speed_mhz = 66;
5521                 else
5522                         bp->bus_speed_mhz = 33;
5523         }
5524
5525         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5526                 bp->flags |= PCI_32BIT_FLAG;
5527
5528         /* 5706A0 may falsely detect SERR and PERR. */
5529         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5530                 reg = REG_RD(bp, PCI_COMMAND);
5531                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5532                 REG_WR(bp, PCI_COMMAND, reg);
5533         }
5534         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5535                 !(bp->flags & PCIX_FLAG)) {
5536
5537                 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5538                        "aborting.\n");
5539                 goto err_out_unmap;
5540         }
5541
5542         bnx2_init_nvram(bp);
5543
5544         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5545
5546         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5547             BNX2_SHM_HDR_SIGNATURE_SIG)
5548                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5549         else
5550                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5551
5552         /* Get the permanent MAC address.  First we need to make sure the
5553          * firmware is actually running.
5554          */
5555         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5556
5557         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5558             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5559                 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5560                 rc = -ENODEV;
5561                 goto err_out_unmap;
5562         }
5563
5564         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5565
5566         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5567         bp->mac_addr[0] = (u8) (reg >> 8);
5568         bp->mac_addr[1] = (u8) reg;
5569
5570         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5571         bp->mac_addr[2] = (u8) (reg >> 24);
5572         bp->mac_addr[3] = (u8) (reg >> 16);
5573         bp->mac_addr[4] = (u8) (reg >> 8);
5574         bp->mac_addr[5] = (u8) reg;
5575
5576         bp->tx_ring_size = MAX_TX_DESC_CNT;
5577         bp->rx_ring_size = 100;
5578
5579         bp->rx_csum = 1;
5580
5581         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5582
5583         bp->tx_quick_cons_trip_int = 20;
5584         bp->tx_quick_cons_trip = 20;
5585         bp->tx_ticks_int = 80;
5586         bp->tx_ticks = 80;
5587                 
5588         bp->rx_quick_cons_trip_int = 6;
5589         bp->rx_quick_cons_trip = 6;
5590         bp->rx_ticks_int = 18;
5591         bp->rx_ticks = 18;
5592
5593         bp->stats_ticks = 1000000 & 0xffff00;
5594
5595         bp->timer_interval =  HZ;
5596         bp->current_interval =  HZ;
5597
5598         bp->phy_addr = 1;
5599
5600         /* Disable WOL support if we are running on a SERDES chip. */
5601         if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5602                 bp->phy_flags |= PHY_SERDES_FLAG;
5603                 bp->flags |= NO_WOL_FLAG;
5604                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5605                         bp->phy_addr = 2;
5606                         reg = REG_RD_IND(bp, bp->shmem_base +
5607                                          BNX2_SHARED_HW_CFG_CONFIG);
5608                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5609                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5610                 }
5611         }
5612
5613         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5614                 bp->tx_quick_cons_trip_int =
5615                         bp->tx_quick_cons_trip;
5616                 bp->tx_ticks_int = bp->tx_ticks;
5617                 bp->rx_quick_cons_trip_int =
5618                         bp->rx_quick_cons_trip;
5619                 bp->rx_ticks_int = bp->rx_ticks;
5620                 bp->comp_prod_trip_int = bp->comp_prod_trip;
5621                 bp->com_ticks_int = bp->com_ticks;
5622                 bp->cmd_ticks_int = bp->cmd_ticks;
5623         }
5624
5625         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5626         bp->req_line_speed = 0;
5627         if (bp->phy_flags & PHY_SERDES_FLAG) {
5628                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5629
5630                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5631                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5632                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5633                         bp->autoneg = 0;
5634                         bp->req_line_speed = bp->line_speed = SPEED_1000;
5635                         bp->req_duplex = DUPLEX_FULL;
5636                 }
5637         }
5638         else {
5639                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5640         }
5641
5642         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5643
5644         init_timer(&bp->timer);
5645         bp->timer.expires = RUN_AT(bp->timer_interval);
5646         bp->timer.data = (unsigned long) bp;
5647         bp->timer.function = bnx2_timer;
5648
5649         return 0;
5650
5651 err_out_unmap:
5652         if (bp->regview) {
5653                 iounmap(bp->regview);
5654                 bp->regview = NULL;
5655         }
5656
5657 err_out_release:
5658         pci_release_regions(pdev);
5659
5660 err_out_disable:
5661         pci_disable_device(pdev);
5662         pci_set_drvdata(pdev, NULL);
5663
5664 err_out:
5665         return rc;
5666 }
5667
5668 static int __devinit
5669 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5670 {
5671         static int version_printed = 0;
5672         struct net_device *dev = NULL;
5673         struct bnx2 *bp;
5674         int rc, i;
5675
5676         if (version_printed++ == 0)
5677                 printk(KERN_INFO "%s", version);
5678
5679         /* dev zeroed in init_etherdev */
5680         dev = alloc_etherdev(sizeof(*bp));
5681
5682         if (!dev)
5683                 return -ENOMEM;
5684
5685         rc = bnx2_init_board(pdev, dev);
5686         if (rc < 0) {
5687                 free_netdev(dev);
5688                 return rc;
5689         }
5690
5691         dev->open = bnx2_open;
5692         dev->hard_start_xmit = bnx2_start_xmit;
5693         dev->stop = bnx2_close;
5694         dev->get_stats = bnx2_get_stats;
5695         dev->set_multicast_list = bnx2_set_rx_mode;
5696         dev->do_ioctl = bnx2_ioctl;
5697         dev->set_mac_address = bnx2_change_mac_addr;
5698         dev->change_mtu = bnx2_change_mtu;
5699         dev->tx_timeout = bnx2_tx_timeout;
5700         dev->watchdog_timeo = TX_TIMEOUT;
5701 #ifdef BCM_VLAN
5702         dev->vlan_rx_register = bnx2_vlan_rx_register;
5703         dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5704 #endif
5705         dev->poll = bnx2_poll;
5706         dev->ethtool_ops = &bnx2_ethtool_ops;
5707         dev->weight = 64;
5708
5709         bp = dev->priv;
5710
5711 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5712         dev->poll_controller = poll_bnx2;
5713 #endif
5714
5715         if ((rc = register_netdev(dev))) {
5716                 printk(KERN_ERR PFX "Cannot register net device\n");
5717                 if (bp->regview)
5718                         iounmap(bp->regview);
5719                 pci_release_regions(pdev);
5720                 pci_disable_device(pdev);
5721                 pci_set_drvdata(pdev, NULL);
5722                 free_netdev(dev);
5723                 return rc;
5724         }
5725
5726         pci_set_drvdata(pdev, dev);
5727
5728         memcpy(dev->dev_addr, bp->mac_addr, 6);
5729         memcpy(dev->perm_addr, bp->mac_addr, 6);
5730         bp->name = board_info[ent->driver_data].name,
5731         printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5732                 "IRQ %d, ",
5733                 dev->name,
5734                 bp->name,
5735                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5736                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5737                 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5738                 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5739                 bp->bus_speed_mhz,
5740                 dev->base_addr,
5741                 bp->pdev->irq);
5742
5743         printk("node addr ");
5744         for (i = 0; i < 6; i++)
5745                 printk("%2.2x", dev->dev_addr[i]);
5746         printk("\n");
5747
5748         dev->features |= NETIF_F_SG;
5749         if (bp->flags & USING_DAC_FLAG)
5750                 dev->features |= NETIF_F_HIGHDMA;
5751         dev->features |= NETIF_F_IP_CSUM;
5752 #ifdef BCM_VLAN
5753         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5754 #endif
5755 #ifdef BCM_TSO
5756         dev->features |= NETIF_F_TSO;
5757 #endif
5758
5759         netif_carrier_off(bp->dev);
5760
5761         return 0;
5762 }
5763
5764 static void __devexit
5765 bnx2_remove_one(struct pci_dev *pdev)
5766 {
5767         struct net_device *dev = pci_get_drvdata(pdev);
5768         struct bnx2 *bp = dev->priv;
5769
5770         flush_scheduled_work();
5771
5772         unregister_netdev(dev);
5773
5774         if (bp->regview)
5775                 iounmap(bp->regview);
5776
5777         free_netdev(dev);
5778         pci_release_regions(pdev);
5779         pci_disable_device(pdev);
5780         pci_set_drvdata(pdev, NULL);
5781 }
5782
5783 static int
5784 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5785 {
5786         struct net_device *dev = pci_get_drvdata(pdev);
5787         struct bnx2 *bp = dev->priv;
5788         u32 reset_code;
5789
5790         if (!netif_running(dev))
5791                 return 0;
5792
5793         bnx2_netif_stop(bp);
5794         netif_device_detach(dev);
5795         del_timer_sync(&bp->timer);
5796         if (bp->wol)
5797                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5798         else
5799                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5800         bnx2_reset_chip(bp, reset_code);
5801         bnx2_free_skbs(bp);
5802         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5803         return 0;
5804 }
5805
5806 static int
5807 bnx2_resume(struct pci_dev *pdev)
5808 {
5809         struct net_device *dev = pci_get_drvdata(pdev);
5810         struct bnx2 *bp = dev->priv;
5811
5812         if (!netif_running(dev))
5813                 return 0;
5814
5815         bnx2_set_power_state(bp, PCI_D0);
5816         netif_device_attach(dev);
5817         bnx2_init_nic(bp);
5818         bnx2_netif_start(bp);
5819         return 0;
5820 }
5821
5822 static struct pci_driver bnx2_pci_driver = {
5823         .name           = DRV_MODULE_NAME,
5824         .id_table       = bnx2_pci_tbl,
5825         .probe          = bnx2_init_one,
5826         .remove         = __devexit_p(bnx2_remove_one),
5827         .suspend        = bnx2_suspend,
5828         .resume         = bnx2_resume,
5829 };
5830
5831 static int __init bnx2_init(void)
5832 {
5833         return pci_module_init(&bnx2_pci_driver);
5834 }
5835
5836 static void __exit bnx2_cleanup(void)
5837 {
5838         pci_unregister_driver(&bnx2_pci_driver);
5839 }
5840
5841 module_init(bnx2_init);
5842 module_exit(bnx2_cleanup);
5843
5844
5845