]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/tg3.c
92b0e4975135da3bc7571646152c26a381deef2a
[linux-2.6-omap-h63xx.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.26"
65 #define DRV_MODULE_RELDATE      "April 24, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
107
108 #define TG3_TX_RING_SIZE                512
109 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
110
111 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_RING_SIZE)
113 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_JUMBO_RING_SIZE)
115 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
116                                    TG3_RX_RCB_RING_SIZE(tp))
117 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
118                                  TG3_TX_RING_SIZE)
119 #define TX_RING_GAP(TP) \
120         (TG3_TX_RING_SIZE - (TP)->tx_pending)
121 #define TX_BUFFS_AVAIL(TP)                                              \
122         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
123           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
124           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
125 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126
127 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
129
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
132
133 /* number of ETHTOOL_GSTATS u64's */
134 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135
136 static char version[] __devinitdata =
137         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147
148 static struct pci_device_id tg3_pci_tbl[] = {
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { 0, }
232 };
233
234 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
235
236 static struct {
237         const char string[ETH_GSTRING_LEN];
238 } ethtool_stats_keys[TG3_NUM_STATS] = {
239         { "rx_octets" },
240         { "rx_fragments" },
241         { "rx_ucast_packets" },
242         { "rx_mcast_packets" },
243         { "rx_bcast_packets" },
244         { "rx_fcs_errors" },
245         { "rx_align_errors" },
246         { "rx_xon_pause_rcvd" },
247         { "rx_xoff_pause_rcvd" },
248         { "rx_mac_ctrl_rcvd" },
249         { "rx_xoff_entered" },
250         { "rx_frame_too_long_errors" },
251         { "rx_jabbers" },
252         { "rx_undersize_packets" },
253         { "rx_in_length_errors" },
254         { "rx_out_length_errors" },
255         { "rx_64_or_less_octet_packets" },
256         { "rx_65_to_127_octet_packets" },
257         { "rx_128_to_255_octet_packets" },
258         { "rx_256_to_511_octet_packets" },
259         { "rx_512_to_1023_octet_packets" },
260         { "rx_1024_to_1522_octet_packets" },
261         { "rx_1523_to_2047_octet_packets" },
262         { "rx_2048_to_4095_octet_packets" },
263         { "rx_4096_to_8191_octet_packets" },
264         { "rx_8192_to_9022_octet_packets" },
265
266         { "tx_octets" },
267         { "tx_collisions" },
268
269         { "tx_xon_sent" },
270         { "tx_xoff_sent" },
271         { "tx_flow_control" },
272         { "tx_mac_errors" },
273         { "tx_single_collisions" },
274         { "tx_mult_collisions" },
275         { "tx_deferred" },
276         { "tx_excessive_collisions" },
277         { "tx_late_collisions" },
278         { "tx_collide_2times" },
279         { "tx_collide_3times" },
280         { "tx_collide_4times" },
281         { "tx_collide_5times" },
282         { "tx_collide_6times" },
283         { "tx_collide_7times" },
284         { "tx_collide_8times" },
285         { "tx_collide_9times" },
286         { "tx_collide_10times" },
287         { "tx_collide_11times" },
288         { "tx_collide_12times" },
289         { "tx_collide_13times" },
290         { "tx_collide_14times" },
291         { "tx_collide_15times" },
292         { "tx_ucast_packets" },
293         { "tx_mcast_packets" },
294         { "tx_bcast_packets" },
295         { "tx_carrier_sense_errors" },
296         { "tx_discards" },
297         { "tx_errors" },
298
299         { "dma_writeq_full" },
300         { "dma_write_prioq_full" },
301         { "rxbds_empty" },
302         { "rx_discards" },
303         { "rx_errors" },
304         { "rx_threshold_hit" },
305
306         { "dma_readq_full" },
307         { "dma_read_prioq_full" },
308         { "tx_comp_queue_full" },
309
310         { "ring_set_send_prod_index" },
311         { "ring_status_update" },
312         { "nic_irqs" },
313         { "nic_avoided_irqs" },
314         { "nic_tx_threshold_hit" }
315 };
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
320                 unsigned long flags;
321
322                 spin_lock_irqsave(&tp->indirect_lock, flags);
323                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
326         } else {
327                 writel(val, tp->regs + off);
328                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
329                         readl(tp->regs + off);
330         }
331 }
332
333 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
334 {
335         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
336                 unsigned long flags;
337
338                 spin_lock_irqsave(&tp->indirect_lock, flags);
339                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         } else {
343                 void __iomem *dest = tp->regs + off;
344                 writel(val, dest);
345                 readl(dest);    /* always flush PCI write */
346         }
347 }
348
349 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
350 {
351         void __iomem *mbox = tp->regs + off;
352         writel(val, mbox);
353         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
354                 readl(mbox);
355 }
356
357 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         void __iomem *mbox = tp->regs + off;
360         writel(val, mbox);
361         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
362                 writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
364                 readl(mbox);
365 }
366
367 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
368 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
369 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
370
371 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
372 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
373 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
374 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
375 #define tr32(reg)               readl(tp->regs + (reg))
376 #define tr16(reg)               readw(tp->regs + (reg))
377 #define tr8(reg)                readb(tp->regs + (reg))
378
379 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
380 {
381         unsigned long flags;
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386
387         /* Always leave this as zero. */
388         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 }
391
392 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
393 {
394         unsigned long flags;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
399
400         /* Always leave this as zero. */
401         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402         spin_unlock_irqrestore(&tp->indirect_lock, flags);
403 }
404
405 static void tg3_disable_ints(struct tg3 *tp)
406 {
407         tw32(TG3PCI_MISC_HOST_CTRL,
408              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
409         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
410         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
411 }
412
413 static inline void tg3_cond_int(struct tg3 *tp)
414 {
415         if (tp->hw_status->status & SD_STATUS_UPDATED)
416                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
417 }
418
419 static void tg3_enable_ints(struct tg3 *tp)
420 {
421         tw32(TG3PCI_MISC_HOST_CTRL,
422              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
424         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425
426         tg3_cond_int(tp);
427 }
428
429 /* tg3_restart_ints
430  *  similar to tg3_enable_ints, but it can return without flushing the
431  *  PIO write which reenables interrupts
432  */
433 static void tg3_restart_ints(struct tg3 *tp)
434 {
435         tw32(TG3PCI_MISC_HOST_CTRL,
436                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
437         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
438         mmiowb();
439
440         tg3_cond_int(tp);
441 }
442
443 static inline void tg3_netif_stop(struct tg3 *tp)
444 {
445         netif_poll_disable(tp->dev);
446         netif_tx_disable(tp->dev);
447 }
448
449 static inline void tg3_netif_start(struct tg3 *tp)
450 {
451         netif_wake_queue(tp->dev);
452         /* NOTE: unconditional netif_wake_queue is only appropriate
453          * so long as all callers are assured to have free tx slots
454          * (such as after tg3_init_hw)
455          */
456         netif_poll_enable(tp->dev);
457         tg3_cond_int(tp);
458 }
459
460 static void tg3_switch_clocks(struct tg3 *tp)
461 {
462         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
463         u32 orig_clock_ctrl;
464
465         orig_clock_ctrl = clock_ctrl;
466         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
467                        CLOCK_CTRL_CLKRUN_OENABLE |
468                        0x1f);
469         tp->pci_clock_ctrl = clock_ctrl;
470
471         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
472                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
473                         tw32_f(TG3PCI_CLOCK_CTRL,
474                                clock_ctrl | CLOCK_CTRL_625_CORE);
475                         udelay(40);
476                 }
477         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
478                 tw32_f(TG3PCI_CLOCK_CTRL,
479                      clock_ctrl |
480                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
481                 udelay(40);
482                 tw32_f(TG3PCI_CLOCK_CTRL,
483                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
484                 udelay(40);
485         }
486         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
487         udelay(40);
488 }
489
490 #define PHY_BUSY_LOOPS  5000
491
492 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
493 {
494         u32 frame_val;
495         unsigned int loops;
496         int ret;
497
498         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
499                 tw32_f(MAC_MI_MODE,
500                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
501                 udelay(80);
502         }
503
504         *val = 0x0;
505
506         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
507                       MI_COM_PHY_ADDR_MASK);
508         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
509                       MI_COM_REG_ADDR_MASK);
510         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
511         
512         tw32_f(MAC_MI_COM, frame_val);
513
514         loops = PHY_BUSY_LOOPS;
515         while (loops != 0) {
516                 udelay(10);
517                 frame_val = tr32(MAC_MI_COM);
518
519                 if ((frame_val & MI_COM_BUSY) == 0) {
520                         udelay(5);
521                         frame_val = tr32(MAC_MI_COM);
522                         break;
523                 }
524                 loops -= 1;
525         }
526
527         ret = -EBUSY;
528         if (loops != 0) {
529                 *val = frame_val & MI_COM_DATA_MASK;
530                 ret = 0;
531         }
532
533         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
534                 tw32_f(MAC_MI_MODE, tp->mi_mode);
535                 udelay(80);
536         }
537
538         return ret;
539 }
540
541 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
542 {
543         u32 frame_val;
544         unsigned int loops;
545         int ret;
546
547         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
548                 tw32_f(MAC_MI_MODE,
549                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
550                 udelay(80);
551         }
552
553         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
554                       MI_COM_PHY_ADDR_MASK);
555         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
556                       MI_COM_REG_ADDR_MASK);
557         frame_val |= (val & MI_COM_DATA_MASK);
558         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
559         
560         tw32_f(MAC_MI_COM, frame_val);
561
562         loops = PHY_BUSY_LOOPS;
563         while (loops != 0) {
564                 udelay(10);
565                 frame_val = tr32(MAC_MI_COM);
566                 if ((frame_val & MI_COM_BUSY) == 0) {
567                         udelay(5);
568                         frame_val = tr32(MAC_MI_COM);
569                         break;
570                 }
571                 loops -= 1;
572         }
573
574         ret = -EBUSY;
575         if (loops != 0)
576                 ret = 0;
577
578         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
579                 tw32_f(MAC_MI_MODE, tp->mi_mode);
580                 udelay(80);
581         }
582
583         return ret;
584 }
585
586 static void tg3_phy_set_wirespeed(struct tg3 *tp)
587 {
588         u32 val;
589
590         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
591                 return;
592
593         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
594             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
595                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
596                              (val | (1 << 15) | (1 << 4)));
597 }
598
599 static int tg3_bmcr_reset(struct tg3 *tp)
600 {
601         u32 phy_control;
602         int limit, err;
603
604         /* OK, reset it, and poll the BMCR_RESET bit until it
605          * clears or we time out.
606          */
607         phy_control = BMCR_RESET;
608         err = tg3_writephy(tp, MII_BMCR, phy_control);
609         if (err != 0)
610                 return -EBUSY;
611
612         limit = 5000;
613         while (limit--) {
614                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
615                 if (err != 0)
616                         return -EBUSY;
617
618                 if ((phy_control & BMCR_RESET) == 0) {
619                         udelay(40);
620                         break;
621                 }
622                 udelay(10);
623         }
624         if (limit <= 0)
625                 return -EBUSY;
626
627         return 0;
628 }
629
630 static int tg3_wait_macro_done(struct tg3 *tp)
631 {
632         int limit = 100;
633
634         while (limit--) {
635                 u32 tmp32;
636
637                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
638                         if ((tmp32 & 0x1000) == 0)
639                                 break;
640                 }
641         }
642         if (limit <= 0)
643                 return -EBUSY;
644
645         return 0;
646 }
647
648 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
649 {
650         static const u32 test_pat[4][6] = {
651         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
652         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
653         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
654         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
655         };
656         int chan;
657
658         for (chan = 0; chan < 4; chan++) {
659                 int i;
660
661                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
662                              (chan * 0x2000) | 0x0200);
663                 tg3_writephy(tp, 0x16, 0x0002);
664
665                 for (i = 0; i < 6; i++)
666                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
667                                      test_pat[chan][i]);
668
669                 tg3_writephy(tp, 0x16, 0x0202);
670                 if (tg3_wait_macro_done(tp)) {
671                         *resetp = 1;
672                         return -EBUSY;
673                 }
674
675                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
676                              (chan * 0x2000) | 0x0200);
677                 tg3_writephy(tp, 0x16, 0x0082);
678                 if (tg3_wait_macro_done(tp)) {
679                         *resetp = 1;
680                         return -EBUSY;
681                 }
682
683                 tg3_writephy(tp, 0x16, 0x0802);
684                 if (tg3_wait_macro_done(tp)) {
685                         *resetp = 1;
686                         return -EBUSY;
687                 }
688
689                 for (i = 0; i < 6; i += 2) {
690                         u32 low, high;
691
692                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
693                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
694                             tg3_wait_macro_done(tp)) {
695                                 *resetp = 1;
696                                 return -EBUSY;
697                         }
698                         low &= 0x7fff;
699                         high &= 0x000f;
700                         if (low != test_pat[chan][i] ||
701                             high != test_pat[chan][i+1]) {
702                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
703                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
704                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
705
706                                 return -EBUSY;
707                         }
708                 }
709         }
710
711         return 0;
712 }
713
714 static int tg3_phy_reset_chanpat(struct tg3 *tp)
715 {
716         int chan;
717
718         for (chan = 0; chan < 4; chan++) {
719                 int i;
720
721                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
722                              (chan * 0x2000) | 0x0200);
723                 tg3_writephy(tp, 0x16, 0x0002);
724                 for (i = 0; i < 6; i++)
725                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
726                 tg3_writephy(tp, 0x16, 0x0202);
727                 if (tg3_wait_macro_done(tp))
728                         return -EBUSY;
729         }
730
731         return 0;
732 }
733
734 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
735 {
736         u32 reg32, phy9_orig;
737         int retries, do_phy_reset, err;
738
739         retries = 10;
740         do_phy_reset = 1;
741         do {
742                 if (do_phy_reset) {
743                         err = tg3_bmcr_reset(tp);
744                         if (err)
745                                 return err;
746                         do_phy_reset = 0;
747                 }
748
749                 /* Disable transmitter and interrupt.  */
750                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
751                         continue;
752
753                 reg32 |= 0x3000;
754                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
755
756                 /* Set full-duplex, 1000 mbps.  */
757                 tg3_writephy(tp, MII_BMCR,
758                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
759
760                 /* Set to master mode.  */
761                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
762                         continue;
763
764                 tg3_writephy(tp, MII_TG3_CTRL,
765                              (MII_TG3_CTRL_AS_MASTER |
766                               MII_TG3_CTRL_ENABLE_AS_MASTER));
767
768                 /* Enable SM_DSP_CLOCK and 6dB.  */
769                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
770
771                 /* Block the PHY control access.  */
772                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
773                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
774
775                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
776                 if (!err)
777                         break;
778         } while (--retries);
779
780         err = tg3_phy_reset_chanpat(tp);
781         if (err)
782                 return err;
783
784         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
785         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
786
787         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
788         tg3_writephy(tp, 0x16, 0x0000);
789
790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
791             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
792                 /* Set Extended packet length bit for jumbo frames */
793                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
794         }
795         else {
796                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
797         }
798
799         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
800
801         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
802                 reg32 &= ~0x3000;
803                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
804         } else if (!err)
805                 err = -EBUSY;
806
807         return err;
808 }
809
810 /* This will reset the tigon3 PHY if there is no valid
811  * link unless the FORCE argument is non-zero.
812  */
813 static int tg3_phy_reset(struct tg3 *tp)
814 {
815         u32 phy_status;
816         int err;
817
818         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
819         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
820         if (err != 0)
821                 return -EBUSY;
822
823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
824             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
825             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
826                 err = tg3_phy_reset_5703_4_5(tp);
827                 if (err)
828                         return err;
829                 goto out;
830         }
831
832         err = tg3_bmcr_reset(tp);
833         if (err)
834                 return err;
835
836 out:
837         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
838                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
839                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
840                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
841                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
842                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
843                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
844         }
845         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
846                 tg3_writephy(tp, 0x1c, 0x8d68);
847                 tg3_writephy(tp, 0x1c, 0x8d68);
848         }
849         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
850                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
851                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
852                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
853                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
854                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
855                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
856                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
857                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
858         }
859         /* Set Extended packet length bit (bit 14) on all chips that */
860         /* support jumbo frames */
861         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
862                 /* Cannot do read-modify-write on 5401 */
863                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
864         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
865                 u32 phy_reg;
866
867                 /* Set bit 14 with read-modify-write to preserve other bits */
868                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
869                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
870                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
871         }
872
873         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
874          * jumbo frames transmission.
875          */
876         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
877                 u32 phy_reg;
878
879                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
880                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
881                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
882         }
883
884         tg3_phy_set_wirespeed(tp);
885         return 0;
886 }
887
888 static void tg3_frob_aux_power(struct tg3 *tp)
889 {
890         struct tg3 *tp_peer = tp;
891
892         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
893                 return;
894
895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
896                 tp_peer = pci_get_drvdata(tp->pdev_peer);
897                 if (!tp_peer)
898                         BUG();
899         }
900
901
902         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
903             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
904                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
905                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
906                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
907                              (GRC_LCLCTRL_GPIO_OE0 |
908                               GRC_LCLCTRL_GPIO_OE1 |
909                               GRC_LCLCTRL_GPIO_OE2 |
910                               GRC_LCLCTRL_GPIO_OUTPUT0 |
911                               GRC_LCLCTRL_GPIO_OUTPUT1));
912                         udelay(100);
913                 } else {
914                         u32 no_gpio2;
915                         u32 grc_local_ctrl;
916
917                         if (tp_peer != tp &&
918                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
919                                 return;
920
921                         /* On 5753 and variants, GPIO2 cannot be used. */
922                         no_gpio2 = tp->nic_sram_data_cfg &
923                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
924
925                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
926                                          GRC_LCLCTRL_GPIO_OE1 |
927                                          GRC_LCLCTRL_GPIO_OE2 |
928                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
929                                          GRC_LCLCTRL_GPIO_OUTPUT2;
930                         if (no_gpio2) {
931                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
932                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
933                         }
934                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
935                                                 grc_local_ctrl);
936                         udelay(100);
937
938                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
939
940                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
941                                                 grc_local_ctrl);
942                         udelay(100);
943
944                         if (!no_gpio2) {
945                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
946                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
947                                        grc_local_ctrl);
948                                 udelay(100);
949                         }
950                 }
951         } else {
952                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
953                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
954                         if (tp_peer != tp &&
955                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
956                                 return;
957
958                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
959                              (GRC_LCLCTRL_GPIO_OE1 |
960                               GRC_LCLCTRL_GPIO_OUTPUT1));
961                         udelay(100);
962
963                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
964                              (GRC_LCLCTRL_GPIO_OE1));
965                         udelay(100);
966
967                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
968                              (GRC_LCLCTRL_GPIO_OE1 |
969                               GRC_LCLCTRL_GPIO_OUTPUT1));
970                         udelay(100);
971                 }
972         }
973 }
974
975 static int tg3_setup_phy(struct tg3 *, int);
976
977 #define RESET_KIND_SHUTDOWN     0
978 #define RESET_KIND_INIT         1
979 #define RESET_KIND_SUSPEND      2
980
981 static void tg3_write_sig_post_reset(struct tg3 *, int);
982 static int tg3_halt_cpu(struct tg3 *, u32);
983
984 static int tg3_set_power_state(struct tg3 *tp, int state)
985 {
986         u32 misc_host_ctrl;
987         u16 power_control, power_caps;
988         int pm = tp->pm_cap;
989
990         /* Make sure register accesses (indirect or otherwise)
991          * will function correctly.
992          */
993         pci_write_config_dword(tp->pdev,
994                                TG3PCI_MISC_HOST_CTRL,
995                                tp->misc_host_ctrl);
996
997         pci_read_config_word(tp->pdev,
998                              pm + PCI_PM_CTRL,
999                              &power_control);
1000         power_control |= PCI_PM_CTRL_PME_STATUS;
1001         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1002         switch (state) {
1003         case 0:
1004                 power_control |= 0;
1005                 pci_write_config_word(tp->pdev,
1006                                       pm + PCI_PM_CTRL,
1007                                       power_control);
1008                 udelay(100);    /* Delay after power state change */
1009
1010                 /* Switch out of Vaux if it is not a LOM */
1011                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1012                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1013                         udelay(100);
1014                 }
1015
1016                 return 0;
1017
1018         case 1:
1019                 power_control |= 1;
1020                 break;
1021
1022         case 2:
1023                 power_control |= 2;
1024                 break;
1025
1026         case 3:
1027                 power_control |= 3;
1028                 break;
1029
1030         default:
1031                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1032                        "requested.\n",
1033                        tp->dev->name, state);
1034                 return -EINVAL;
1035         };
1036
1037         power_control |= PCI_PM_CTRL_PME_ENABLE;
1038
1039         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1040         tw32(TG3PCI_MISC_HOST_CTRL,
1041              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1042
1043         if (tp->link_config.phy_is_low_power == 0) {
1044                 tp->link_config.phy_is_low_power = 1;
1045                 tp->link_config.orig_speed = tp->link_config.speed;
1046                 tp->link_config.orig_duplex = tp->link_config.duplex;
1047                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1048         }
1049
1050         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1051                 tp->link_config.speed = SPEED_10;
1052                 tp->link_config.duplex = DUPLEX_HALF;
1053                 tp->link_config.autoneg = AUTONEG_ENABLE;
1054                 tg3_setup_phy(tp, 0);
1055         }
1056
1057         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1058
1059         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1060                 u32 mac_mode;
1061
1062                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1063                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1064                         udelay(40);
1065
1066                         mac_mode = MAC_MODE_PORT_MODE_MII;
1067
1068                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1069                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1070                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1071                 } else {
1072                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1073                 }
1074
1075                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1076                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1077
1078                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1079                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1080                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1081
1082                 tw32_f(MAC_MODE, mac_mode);
1083                 udelay(100);
1084
1085                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1086                 udelay(10);
1087         }
1088
1089         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1090             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1091              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1092                 u32 base_val;
1093
1094                 base_val = tp->pci_clock_ctrl;
1095                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1096                              CLOCK_CTRL_TXCLK_DISABLE);
1097
1098                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1099                      CLOCK_CTRL_ALTCLK |
1100                      CLOCK_CTRL_PWRDOWN_PLL133);
1101                 udelay(40);
1102         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1103                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1104                 u32 newbits1, newbits2;
1105
1106                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1107                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1108                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1109                                     CLOCK_CTRL_TXCLK_DISABLE |
1110                                     CLOCK_CTRL_ALTCLK);
1111                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1112                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1113                         newbits1 = CLOCK_CTRL_625_CORE;
1114                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1115                 } else {
1116                         newbits1 = CLOCK_CTRL_ALTCLK;
1117                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1118                 }
1119
1120                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1121                 udelay(40);
1122
1123                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1124                 udelay(40);
1125
1126                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1127                         u32 newbits3;
1128
1129                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1130                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1131                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1132                                             CLOCK_CTRL_TXCLK_DISABLE |
1133                                             CLOCK_CTRL_44MHZ_CORE);
1134                         } else {
1135                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1136                         }
1137
1138                         tw32_f(TG3PCI_CLOCK_CTRL,
1139                                          tp->pci_clock_ctrl | newbits3);
1140                         udelay(40);
1141                 }
1142         }
1143
1144         tg3_frob_aux_power(tp);
1145
1146         /* Workaround for unstable PLL clock */
1147         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1148             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1149                 u32 val = tr32(0x7d00);
1150
1151                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1152                 tw32(0x7d00, val);
1153                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1154                         tg3_halt_cpu(tp, RX_CPU_BASE);
1155         }
1156
1157         /* Finally, set the new power state. */
1158         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1159         udelay(100);    /* Delay after power state change */
1160
1161         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1162
1163         return 0;
1164 }
1165
1166 static void tg3_link_report(struct tg3 *tp)
1167 {
1168         if (!netif_carrier_ok(tp->dev)) {
1169                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1170         } else {
1171                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1172                        tp->dev->name,
1173                        (tp->link_config.active_speed == SPEED_1000 ?
1174                         1000 :
1175                         (tp->link_config.active_speed == SPEED_100 ?
1176                          100 : 10)),
1177                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1178                         "full" : "half"));
1179
1180                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1181                        "%s for RX.\n",
1182                        tp->dev->name,
1183                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1184                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1185         }
1186 }
1187
1188 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1189 {
1190         u32 new_tg3_flags = 0;
1191         u32 old_rx_mode = tp->rx_mode;
1192         u32 old_tx_mode = tp->tx_mode;
1193
1194         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1195                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1196                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1197                                 if (remote_adv & LPA_PAUSE_CAP)
1198                                         new_tg3_flags |=
1199                                                 (TG3_FLAG_RX_PAUSE |
1200                                                 TG3_FLAG_TX_PAUSE);
1201                                 else if (remote_adv & LPA_PAUSE_ASYM)
1202                                         new_tg3_flags |=
1203                                                 (TG3_FLAG_RX_PAUSE);
1204                         } else {
1205                                 if (remote_adv & LPA_PAUSE_CAP)
1206                                         new_tg3_flags |=
1207                                                 (TG3_FLAG_RX_PAUSE |
1208                                                 TG3_FLAG_TX_PAUSE);
1209                         }
1210                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1211                         if ((remote_adv & LPA_PAUSE_CAP) &&
1212                         (remote_adv & LPA_PAUSE_ASYM))
1213                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1214                 }
1215
1216                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1217                 tp->tg3_flags |= new_tg3_flags;
1218         } else {
1219                 new_tg3_flags = tp->tg3_flags;
1220         }
1221
1222         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1223                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1224         else
1225                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1226
1227         if (old_rx_mode != tp->rx_mode) {
1228                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1229         }
1230         
1231         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1232                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1233         else
1234                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1235
1236         if (old_tx_mode != tp->tx_mode) {
1237                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1238         }
1239 }
1240
1241 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1242 {
1243         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1244         case MII_TG3_AUX_STAT_10HALF:
1245                 *speed = SPEED_10;
1246                 *duplex = DUPLEX_HALF;
1247                 break;
1248
1249         case MII_TG3_AUX_STAT_10FULL:
1250                 *speed = SPEED_10;
1251                 *duplex = DUPLEX_FULL;
1252                 break;
1253
1254         case MII_TG3_AUX_STAT_100HALF:
1255                 *speed = SPEED_100;
1256                 *duplex = DUPLEX_HALF;
1257                 break;
1258
1259         case MII_TG3_AUX_STAT_100FULL:
1260                 *speed = SPEED_100;
1261                 *duplex = DUPLEX_FULL;
1262                 break;
1263
1264         case MII_TG3_AUX_STAT_1000HALF:
1265                 *speed = SPEED_1000;
1266                 *duplex = DUPLEX_HALF;
1267                 break;
1268
1269         case MII_TG3_AUX_STAT_1000FULL:
1270                 *speed = SPEED_1000;
1271                 *duplex = DUPLEX_FULL;
1272                 break;
1273
1274         default:
1275                 *speed = SPEED_INVALID;
1276                 *duplex = DUPLEX_INVALID;
1277                 break;
1278         };
1279 }
1280
1281 static void tg3_phy_copper_begin(struct tg3 *tp)
1282 {
1283         u32 new_adv;
1284         int i;
1285
1286         if (tp->link_config.phy_is_low_power) {
1287                 /* Entering low power mode.  Disable gigabit and
1288                  * 100baseT advertisements.
1289                  */
1290                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1291
1292                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1293                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1294                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1295                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1296
1297                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1298         } else if (tp->link_config.speed == SPEED_INVALID) {
1299                 tp->link_config.advertising =
1300                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1301                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1302                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1303                          ADVERTISED_Autoneg | ADVERTISED_MII);
1304
1305                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1306                         tp->link_config.advertising &=
1307                                 ~(ADVERTISED_1000baseT_Half |
1308                                   ADVERTISED_1000baseT_Full);
1309
1310                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1311                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1312                         new_adv |= ADVERTISE_10HALF;
1313                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1314                         new_adv |= ADVERTISE_10FULL;
1315                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1316                         new_adv |= ADVERTISE_100HALF;
1317                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1318                         new_adv |= ADVERTISE_100FULL;
1319                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1320
1321                 if (tp->link_config.advertising &
1322                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1323                         new_adv = 0;
1324                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1325                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1326                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1327                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1328                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1329                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1330                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1331                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1332                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1333                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1334                 } else {
1335                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1336                 }
1337         } else {
1338                 /* Asking for a specific link mode. */
1339                 if (tp->link_config.speed == SPEED_1000) {
1340                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1341                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1342
1343                         if (tp->link_config.duplex == DUPLEX_FULL)
1344                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1345                         else
1346                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1347                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1348                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1349                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1350                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1351                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1352                 } else {
1353                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1354
1355                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1356                         if (tp->link_config.speed == SPEED_100) {
1357                                 if (tp->link_config.duplex == DUPLEX_FULL)
1358                                         new_adv |= ADVERTISE_100FULL;
1359                                 else
1360                                         new_adv |= ADVERTISE_100HALF;
1361                         } else {
1362                                 if (tp->link_config.duplex == DUPLEX_FULL)
1363                                         new_adv |= ADVERTISE_10FULL;
1364                                 else
1365                                         new_adv |= ADVERTISE_10HALF;
1366                         }
1367                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1368                 }
1369         }
1370
1371         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1372             tp->link_config.speed != SPEED_INVALID) {
1373                 u32 bmcr, orig_bmcr;
1374
1375                 tp->link_config.active_speed = tp->link_config.speed;
1376                 tp->link_config.active_duplex = tp->link_config.duplex;
1377
1378                 bmcr = 0;
1379                 switch (tp->link_config.speed) {
1380                 default:
1381                 case SPEED_10:
1382                         break;
1383
1384                 case SPEED_100:
1385                         bmcr |= BMCR_SPEED100;
1386                         break;
1387
1388                 case SPEED_1000:
1389                         bmcr |= TG3_BMCR_SPEED1000;
1390                         break;
1391                 };
1392
1393                 if (tp->link_config.duplex == DUPLEX_FULL)
1394                         bmcr |= BMCR_FULLDPLX;
1395
1396                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1397                     (bmcr != orig_bmcr)) {
1398                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1399                         for (i = 0; i < 1500; i++) {
1400                                 u32 tmp;
1401
1402                                 udelay(10);
1403                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1404                                     tg3_readphy(tp, MII_BMSR, &tmp))
1405                                         continue;
1406                                 if (!(tmp & BMSR_LSTATUS)) {
1407                                         udelay(40);
1408                                         break;
1409                                 }
1410                         }
1411                         tg3_writephy(tp, MII_BMCR, bmcr);
1412                         udelay(40);
1413                 }
1414         } else {
1415                 tg3_writephy(tp, MII_BMCR,
1416                              BMCR_ANENABLE | BMCR_ANRESTART);
1417         }
1418 }
1419
1420 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1421 {
1422         int err;
1423
1424         /* Turn off tap power management. */
1425         /* Set Extended packet length bit */
1426         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1427
1428         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1429         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1430
1431         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1432         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1433
1434         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1435         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1436
1437         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1438         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1439
1440         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1441         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1442
1443         udelay(40);
1444
1445         return err;
1446 }
1447
1448 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1449 {
1450         u32 adv_reg, all_mask;
1451
1452         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1453                 return 0;
1454
1455         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1456                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1457         if ((adv_reg & all_mask) != all_mask)
1458                 return 0;
1459         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1460                 u32 tg3_ctrl;
1461
1462                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1463                         return 0;
1464
1465                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1466                             MII_TG3_CTRL_ADV_1000_FULL);
1467                 if ((tg3_ctrl & all_mask) != all_mask)
1468                         return 0;
1469         }
1470         return 1;
1471 }
1472
1473 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1474 {
1475         int current_link_up;
1476         u32 bmsr, dummy;
1477         u16 current_speed;
1478         u8 current_duplex;
1479         int i, err;
1480
1481         tw32(MAC_EVENT, 0);
1482
1483         tw32_f(MAC_STATUS,
1484              (MAC_STATUS_SYNC_CHANGED |
1485               MAC_STATUS_CFG_CHANGED |
1486               MAC_STATUS_MI_COMPLETION |
1487               MAC_STATUS_LNKSTATE_CHANGED));
1488         udelay(40);
1489
1490         tp->mi_mode = MAC_MI_MODE_BASE;
1491         tw32_f(MAC_MI_MODE, tp->mi_mode);
1492         udelay(80);
1493
1494         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1495
1496         /* Some third-party PHYs need to be reset on link going
1497          * down.
1498          */
1499         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1500              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1501              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1502             netif_carrier_ok(tp->dev)) {
1503                 tg3_readphy(tp, MII_BMSR, &bmsr);
1504                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1505                     !(bmsr & BMSR_LSTATUS))
1506                         force_reset = 1;
1507         }
1508         if (force_reset)
1509                 tg3_phy_reset(tp);
1510
1511         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1512                 tg3_readphy(tp, MII_BMSR, &bmsr);
1513                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1514                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1515                         bmsr = 0;
1516
1517                 if (!(bmsr & BMSR_LSTATUS)) {
1518                         err = tg3_init_5401phy_dsp(tp);
1519                         if (err)
1520                                 return err;
1521
1522                         tg3_readphy(tp, MII_BMSR, &bmsr);
1523                         for (i = 0; i < 1000; i++) {
1524                                 udelay(10);
1525                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1526                                     (bmsr & BMSR_LSTATUS)) {
1527                                         udelay(40);
1528                                         break;
1529                                 }
1530                         }
1531
1532                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1533                             !(bmsr & BMSR_LSTATUS) &&
1534                             tp->link_config.active_speed == SPEED_1000) {
1535                                 err = tg3_phy_reset(tp);
1536                                 if (!err)
1537                                         err = tg3_init_5401phy_dsp(tp);
1538                                 if (err)
1539                                         return err;
1540                         }
1541                 }
1542         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1543                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1544                 /* 5701 {A0,B0} CRC bug workaround */
1545                 tg3_writephy(tp, 0x15, 0x0a75);
1546                 tg3_writephy(tp, 0x1c, 0x8c68);
1547                 tg3_writephy(tp, 0x1c, 0x8d68);
1548                 tg3_writephy(tp, 0x1c, 0x8c68);
1549         }
1550
1551         /* Clear pending interrupts... */
1552         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1553         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1554
1555         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1556                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1557         else
1558                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1559
1560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1561             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1562                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1563                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1564                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1565                 else
1566                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1567         }
1568
1569         current_link_up = 0;
1570         current_speed = SPEED_INVALID;
1571         current_duplex = DUPLEX_INVALID;
1572
1573         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1574                 u32 val;
1575
1576                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1577                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1578                 if (!(val & (1 << 10))) {
1579                         val |= (1 << 10);
1580                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1581                         goto relink;
1582                 }
1583         }
1584
1585         bmsr = 0;
1586         for (i = 0; i < 100; i++) {
1587                 tg3_readphy(tp, MII_BMSR, &bmsr);
1588                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1589                     (bmsr & BMSR_LSTATUS))
1590                         break;
1591                 udelay(40);
1592         }
1593
1594         if (bmsr & BMSR_LSTATUS) {
1595                 u32 aux_stat, bmcr;
1596
1597                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1598                 for (i = 0; i < 2000; i++) {
1599                         udelay(10);
1600                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1601                             aux_stat)
1602                                 break;
1603                 }
1604
1605                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1606                                              &current_speed,
1607                                              &current_duplex);
1608
1609                 bmcr = 0;
1610                 for (i = 0; i < 200; i++) {
1611                         tg3_readphy(tp, MII_BMCR, &bmcr);
1612                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1613                                 continue;
1614                         if (bmcr && bmcr != 0x7fff)
1615                                 break;
1616                         udelay(10);
1617                 }
1618
1619                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1620                         if (bmcr & BMCR_ANENABLE) {
1621                                 current_link_up = 1;
1622
1623                                 /* Force autoneg restart if we are exiting
1624                                  * low power mode.
1625                                  */
1626                                 if (!tg3_copper_is_advertising_all(tp))
1627                                         current_link_up = 0;
1628                         } else {
1629                                 current_link_up = 0;
1630                         }
1631                 } else {
1632                         if (!(bmcr & BMCR_ANENABLE) &&
1633                             tp->link_config.speed == current_speed &&
1634                             tp->link_config.duplex == current_duplex) {
1635                                 current_link_up = 1;
1636                         } else {
1637                                 current_link_up = 0;
1638                         }
1639                 }
1640
1641                 tp->link_config.active_speed = current_speed;
1642                 tp->link_config.active_duplex = current_duplex;
1643         }
1644
1645         if (current_link_up == 1 &&
1646             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1647             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1648                 u32 local_adv, remote_adv;
1649
1650                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1651                         local_adv = 0;
1652                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1653
1654                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1655                         remote_adv = 0;
1656
1657                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1658
1659                 /* If we are not advertising full pause capability,
1660                  * something is wrong.  Bring the link down and reconfigure.
1661                  */
1662                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1663                         current_link_up = 0;
1664                 } else {
1665                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1666                 }
1667         }
1668 relink:
1669         if (current_link_up == 0) {
1670                 u32 tmp;
1671
1672                 tg3_phy_copper_begin(tp);
1673
1674                 tg3_readphy(tp, MII_BMSR, &tmp);
1675                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1676                     (tmp & BMSR_LSTATUS))
1677                         current_link_up = 1;
1678         }
1679
1680         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1681         if (current_link_up == 1) {
1682                 if (tp->link_config.active_speed == SPEED_100 ||
1683                     tp->link_config.active_speed == SPEED_10)
1684                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1685                 else
1686                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1687         } else
1688                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1689
1690         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1691         if (tp->link_config.active_duplex == DUPLEX_HALF)
1692                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1693
1694         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1695         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1696                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1697                     (current_link_up == 1 &&
1698                      tp->link_config.active_speed == SPEED_10))
1699                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1700         } else {
1701                 if (current_link_up == 1)
1702                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1703         }
1704
1705         /* ??? Without this setting Netgear GA302T PHY does not
1706          * ??? send/receive packets...
1707          */
1708         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1709             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1710                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1711                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1712                 udelay(80);
1713         }
1714
1715         tw32_f(MAC_MODE, tp->mac_mode);
1716         udelay(40);
1717
1718         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1719                 /* Polled via timer. */
1720                 tw32_f(MAC_EVENT, 0);
1721         } else {
1722                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1723         }
1724         udelay(40);
1725
1726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1727             current_link_up == 1 &&
1728             tp->link_config.active_speed == SPEED_1000 &&
1729             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1730              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1731                 udelay(120);
1732                 tw32_f(MAC_STATUS,
1733                      (MAC_STATUS_SYNC_CHANGED |
1734                       MAC_STATUS_CFG_CHANGED));
1735                 udelay(40);
1736                 tg3_write_mem(tp,
1737                               NIC_SRAM_FIRMWARE_MBOX,
1738                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1739         }
1740
1741         if (current_link_up != netif_carrier_ok(tp->dev)) {
1742                 if (current_link_up)
1743                         netif_carrier_on(tp->dev);
1744                 else
1745                         netif_carrier_off(tp->dev);
1746                 tg3_link_report(tp);
1747         }
1748
1749         return 0;
1750 }
1751
1752 struct tg3_fiber_aneginfo {
1753         int state;
1754 #define ANEG_STATE_UNKNOWN              0
1755 #define ANEG_STATE_AN_ENABLE            1
1756 #define ANEG_STATE_RESTART_INIT         2
1757 #define ANEG_STATE_RESTART              3
1758 #define ANEG_STATE_DISABLE_LINK_OK      4
1759 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1760 #define ANEG_STATE_ABILITY_DETECT       6
1761 #define ANEG_STATE_ACK_DETECT_INIT      7
1762 #define ANEG_STATE_ACK_DETECT           8
1763 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1764 #define ANEG_STATE_COMPLETE_ACK         10
1765 #define ANEG_STATE_IDLE_DETECT_INIT     11
1766 #define ANEG_STATE_IDLE_DETECT          12
1767 #define ANEG_STATE_LINK_OK              13
1768 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1769 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1770
1771         u32 flags;
1772 #define MR_AN_ENABLE            0x00000001
1773 #define MR_RESTART_AN           0x00000002
1774 #define MR_AN_COMPLETE          0x00000004
1775 #define MR_PAGE_RX              0x00000008
1776 #define MR_NP_LOADED            0x00000010
1777 #define MR_TOGGLE_TX            0x00000020
1778 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1779 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1780 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1781 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1782 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1783 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1784 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1785 #define MR_TOGGLE_RX            0x00002000
1786 #define MR_NP_RX                0x00004000
1787
1788 #define MR_LINK_OK              0x80000000
1789
1790         unsigned long link_time, cur_time;
1791
1792         u32 ability_match_cfg;
1793         int ability_match_count;
1794
1795         char ability_match, idle_match, ack_match;
1796
1797         u32 txconfig, rxconfig;
1798 #define ANEG_CFG_NP             0x00000080
1799 #define ANEG_CFG_ACK            0x00000040
1800 #define ANEG_CFG_RF2            0x00000020
1801 #define ANEG_CFG_RF1            0x00000010
1802 #define ANEG_CFG_PS2            0x00000001
1803 #define ANEG_CFG_PS1            0x00008000
1804 #define ANEG_CFG_HD             0x00004000
1805 #define ANEG_CFG_FD             0x00002000
1806 #define ANEG_CFG_INVAL          0x00001f06
1807
1808 };
1809 #define ANEG_OK         0
1810 #define ANEG_DONE       1
1811 #define ANEG_TIMER_ENAB 2
1812 #define ANEG_FAILED     -1
1813
1814 #define ANEG_STATE_SETTLE_TIME  10000
1815
1816 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1817                                    struct tg3_fiber_aneginfo *ap)
1818 {
1819         unsigned long delta;
1820         u32 rx_cfg_reg;
1821         int ret;
1822
1823         if (ap->state == ANEG_STATE_UNKNOWN) {
1824                 ap->rxconfig = 0;
1825                 ap->link_time = 0;
1826                 ap->cur_time = 0;
1827                 ap->ability_match_cfg = 0;
1828                 ap->ability_match_count = 0;
1829                 ap->ability_match = 0;
1830                 ap->idle_match = 0;
1831                 ap->ack_match = 0;
1832         }
1833         ap->cur_time++;
1834
1835         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1836                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1837
1838                 if (rx_cfg_reg != ap->ability_match_cfg) {
1839                         ap->ability_match_cfg = rx_cfg_reg;
1840                         ap->ability_match = 0;
1841                         ap->ability_match_count = 0;
1842                 } else {
1843                         if (++ap->ability_match_count > 1) {
1844                                 ap->ability_match = 1;
1845                                 ap->ability_match_cfg = rx_cfg_reg;
1846                         }
1847                 }
1848                 if (rx_cfg_reg & ANEG_CFG_ACK)
1849                         ap->ack_match = 1;
1850                 else
1851                         ap->ack_match = 0;
1852
1853                 ap->idle_match = 0;
1854         } else {
1855                 ap->idle_match = 1;
1856                 ap->ability_match_cfg = 0;
1857                 ap->ability_match_count = 0;
1858                 ap->ability_match = 0;
1859                 ap->ack_match = 0;
1860
1861                 rx_cfg_reg = 0;
1862         }
1863
1864         ap->rxconfig = rx_cfg_reg;
1865         ret = ANEG_OK;
1866
1867         switch(ap->state) {
1868         case ANEG_STATE_UNKNOWN:
1869                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1870                         ap->state = ANEG_STATE_AN_ENABLE;
1871
1872                 /* fallthru */
1873         case ANEG_STATE_AN_ENABLE:
1874                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1875                 if (ap->flags & MR_AN_ENABLE) {
1876                         ap->link_time = 0;
1877                         ap->cur_time = 0;
1878                         ap->ability_match_cfg = 0;
1879                         ap->ability_match_count = 0;
1880                         ap->ability_match = 0;
1881                         ap->idle_match = 0;
1882                         ap->ack_match = 0;
1883
1884                         ap->state = ANEG_STATE_RESTART_INIT;
1885                 } else {
1886                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1887                 }
1888                 break;
1889
1890         case ANEG_STATE_RESTART_INIT:
1891                 ap->link_time = ap->cur_time;
1892                 ap->flags &= ~(MR_NP_LOADED);
1893                 ap->txconfig = 0;
1894                 tw32(MAC_TX_AUTO_NEG, 0);
1895                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1896                 tw32_f(MAC_MODE, tp->mac_mode);
1897                 udelay(40);
1898
1899                 ret = ANEG_TIMER_ENAB;
1900                 ap->state = ANEG_STATE_RESTART;
1901
1902                 /* fallthru */
1903         case ANEG_STATE_RESTART:
1904                 delta = ap->cur_time - ap->link_time;
1905                 if (delta > ANEG_STATE_SETTLE_TIME) {
1906                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1907                 } else {
1908                         ret = ANEG_TIMER_ENAB;
1909                 }
1910                 break;
1911
1912         case ANEG_STATE_DISABLE_LINK_OK:
1913                 ret = ANEG_DONE;
1914                 break;
1915
1916         case ANEG_STATE_ABILITY_DETECT_INIT:
1917                 ap->flags &= ~(MR_TOGGLE_TX);
1918                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1919                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1920                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1921                 tw32_f(MAC_MODE, tp->mac_mode);
1922                 udelay(40);
1923
1924                 ap->state = ANEG_STATE_ABILITY_DETECT;
1925                 break;
1926
1927         case ANEG_STATE_ABILITY_DETECT:
1928                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1929                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1930                 }
1931                 break;
1932
1933         case ANEG_STATE_ACK_DETECT_INIT:
1934                 ap->txconfig |= ANEG_CFG_ACK;
1935                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1936                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1937                 tw32_f(MAC_MODE, tp->mac_mode);
1938                 udelay(40);
1939
1940                 ap->state = ANEG_STATE_ACK_DETECT;
1941
1942                 /* fallthru */
1943         case ANEG_STATE_ACK_DETECT:
1944                 if (ap->ack_match != 0) {
1945                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1946                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1947                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1948                         } else {
1949                                 ap->state = ANEG_STATE_AN_ENABLE;
1950                         }
1951                 } else if (ap->ability_match != 0 &&
1952                            ap->rxconfig == 0) {
1953                         ap->state = ANEG_STATE_AN_ENABLE;
1954                 }
1955                 break;
1956
1957         case ANEG_STATE_COMPLETE_ACK_INIT:
1958                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1959                         ret = ANEG_FAILED;
1960                         break;
1961                 }
1962                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1963                                MR_LP_ADV_HALF_DUPLEX |
1964                                MR_LP_ADV_SYM_PAUSE |
1965                                MR_LP_ADV_ASYM_PAUSE |
1966                                MR_LP_ADV_REMOTE_FAULT1 |
1967                                MR_LP_ADV_REMOTE_FAULT2 |
1968                                MR_LP_ADV_NEXT_PAGE |
1969                                MR_TOGGLE_RX |
1970                                MR_NP_RX);
1971                 if (ap->rxconfig & ANEG_CFG_FD)
1972                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1973                 if (ap->rxconfig & ANEG_CFG_HD)
1974                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1975                 if (ap->rxconfig & ANEG_CFG_PS1)
1976                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1977                 if (ap->rxconfig & ANEG_CFG_PS2)
1978                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1979                 if (ap->rxconfig & ANEG_CFG_RF1)
1980                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1981                 if (ap->rxconfig & ANEG_CFG_RF2)
1982                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1983                 if (ap->rxconfig & ANEG_CFG_NP)
1984                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1985
1986                 ap->link_time = ap->cur_time;
1987
1988                 ap->flags ^= (MR_TOGGLE_TX);
1989                 if (ap->rxconfig & 0x0008)
1990                         ap->flags |= MR_TOGGLE_RX;
1991                 if (ap->rxconfig & ANEG_CFG_NP)
1992                         ap->flags |= MR_NP_RX;
1993                 ap->flags |= MR_PAGE_RX;
1994
1995                 ap->state = ANEG_STATE_COMPLETE_ACK;
1996                 ret = ANEG_TIMER_ENAB;
1997                 break;
1998
1999         case ANEG_STATE_COMPLETE_ACK:
2000                 if (ap->ability_match != 0 &&
2001                     ap->rxconfig == 0) {
2002                         ap->state = ANEG_STATE_AN_ENABLE;
2003                         break;
2004                 }
2005                 delta = ap->cur_time - ap->link_time;
2006                 if (delta > ANEG_STATE_SETTLE_TIME) {
2007                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2008                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2009                         } else {
2010                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2011                                     !(ap->flags & MR_NP_RX)) {
2012                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2013                                 } else {
2014                                         ret = ANEG_FAILED;
2015                                 }
2016                         }
2017                 }
2018                 break;
2019
2020         case ANEG_STATE_IDLE_DETECT_INIT:
2021                 ap->link_time = ap->cur_time;
2022                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2023                 tw32_f(MAC_MODE, tp->mac_mode);
2024                 udelay(40);
2025
2026                 ap->state = ANEG_STATE_IDLE_DETECT;
2027                 ret = ANEG_TIMER_ENAB;
2028                 break;
2029
2030         case ANEG_STATE_IDLE_DETECT:
2031                 if (ap->ability_match != 0 &&
2032                     ap->rxconfig == 0) {
2033                         ap->state = ANEG_STATE_AN_ENABLE;
2034                         break;
2035                 }
2036                 delta = ap->cur_time - ap->link_time;
2037                 if (delta > ANEG_STATE_SETTLE_TIME) {
2038                         /* XXX another gem from the Broadcom driver :( */
2039                         ap->state = ANEG_STATE_LINK_OK;
2040                 }
2041                 break;
2042
2043         case ANEG_STATE_LINK_OK:
2044                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2045                 ret = ANEG_DONE;
2046                 break;
2047
2048         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2049                 /* ??? unimplemented */
2050                 break;
2051
2052         case ANEG_STATE_NEXT_PAGE_WAIT:
2053                 /* ??? unimplemented */
2054                 break;
2055
2056         default:
2057                 ret = ANEG_FAILED;
2058                 break;
2059         };
2060
2061         return ret;
2062 }
2063
2064 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2065 {
2066         int res = 0;
2067         struct tg3_fiber_aneginfo aninfo;
2068         int status = ANEG_FAILED;
2069         unsigned int tick;
2070         u32 tmp;
2071
2072         tw32_f(MAC_TX_AUTO_NEG, 0);
2073
2074         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2075         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2076         udelay(40);
2077
2078         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2079         udelay(40);
2080
2081         memset(&aninfo, 0, sizeof(aninfo));
2082         aninfo.flags |= MR_AN_ENABLE;
2083         aninfo.state = ANEG_STATE_UNKNOWN;
2084         aninfo.cur_time = 0;
2085         tick = 0;
2086         while (++tick < 195000) {
2087                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2088                 if (status == ANEG_DONE || status == ANEG_FAILED)
2089                         break;
2090
2091                 udelay(1);
2092         }
2093
2094         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2095         tw32_f(MAC_MODE, tp->mac_mode);
2096         udelay(40);
2097
2098         *flags = aninfo.flags;
2099
2100         if (status == ANEG_DONE &&
2101             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2102                              MR_LP_ADV_FULL_DUPLEX)))
2103                 res = 1;
2104
2105         return res;
2106 }
2107
2108 static void tg3_init_bcm8002(struct tg3 *tp)
2109 {
2110         u32 mac_status = tr32(MAC_STATUS);
2111         int i;
2112
2113         /* Reset when initting first time or we have a link. */
2114         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2115             !(mac_status & MAC_STATUS_PCS_SYNCED))
2116                 return;
2117
2118         /* Set PLL lock range. */
2119         tg3_writephy(tp, 0x16, 0x8007);
2120
2121         /* SW reset */
2122         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2123
2124         /* Wait for reset to complete. */
2125         /* XXX schedule_timeout() ... */
2126         for (i = 0; i < 500; i++)
2127                 udelay(10);
2128
2129         /* Config mode; select PMA/Ch 1 regs. */
2130         tg3_writephy(tp, 0x10, 0x8411);
2131
2132         /* Enable auto-lock and comdet, select txclk for tx. */
2133         tg3_writephy(tp, 0x11, 0x0a10);
2134
2135         tg3_writephy(tp, 0x18, 0x00a0);
2136         tg3_writephy(tp, 0x16, 0x41ff);
2137
2138         /* Assert and deassert POR. */
2139         tg3_writephy(tp, 0x13, 0x0400);
2140         udelay(40);
2141         tg3_writephy(tp, 0x13, 0x0000);
2142
2143         tg3_writephy(tp, 0x11, 0x0a50);
2144         udelay(40);
2145         tg3_writephy(tp, 0x11, 0x0a10);
2146
2147         /* Wait for signal to stabilize */
2148         /* XXX schedule_timeout() ... */
2149         for (i = 0; i < 15000; i++)
2150                 udelay(10);
2151
2152         /* Deselect the channel register so we can read the PHYID
2153          * later.
2154          */
2155         tg3_writephy(tp, 0x10, 0x8011);
2156 }
2157
2158 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2159 {
2160         u32 sg_dig_ctrl, sg_dig_status;
2161         u32 serdes_cfg, expected_sg_dig_ctrl;
2162         int workaround, port_a;
2163         int current_link_up;
2164
2165         serdes_cfg = 0;
2166         expected_sg_dig_ctrl = 0;
2167         workaround = 0;
2168         port_a = 1;
2169         current_link_up = 0;
2170
2171         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2172             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2173                 workaround = 1;
2174                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2175                         port_a = 0;
2176
2177                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2178                 /* preserve bits 20-23 for voltage regulator */
2179                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2180         }
2181
2182         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2183
2184         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2185                 if (sg_dig_ctrl & (1 << 31)) {
2186                         if (workaround) {
2187                                 u32 val = serdes_cfg;
2188
2189                                 if (port_a)
2190                                         val |= 0xc010000;
2191                                 else
2192                                         val |= 0x4010000;
2193                                 tw32_f(MAC_SERDES_CFG, val);
2194                         }
2195                         tw32_f(SG_DIG_CTRL, 0x01388400);
2196                 }
2197                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2198                         tg3_setup_flow_control(tp, 0, 0);
2199                         current_link_up = 1;
2200                 }
2201                 goto out;
2202         }
2203
2204         /* Want auto-negotiation.  */
2205         expected_sg_dig_ctrl = 0x81388400;
2206
2207         /* Pause capability */
2208         expected_sg_dig_ctrl |= (1 << 11);
2209
2210         /* Asymettric pause */
2211         expected_sg_dig_ctrl |= (1 << 12);
2212
2213         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2214                 if (workaround)
2215                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2216                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2217                 udelay(5);
2218                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2219
2220                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2221         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2222                                  MAC_STATUS_SIGNAL_DET)) {
2223                 int i;
2224
2225                 /* Giver time to negotiate (~200ms) */
2226                 for (i = 0; i < 40000; i++) {
2227                         sg_dig_status = tr32(SG_DIG_STATUS);
2228                         if (sg_dig_status & (0x3))
2229                                 break;
2230                         udelay(5);
2231                 }
2232                 mac_status = tr32(MAC_STATUS);
2233
2234                 if ((sg_dig_status & (1 << 1)) &&
2235                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2236                         u32 local_adv, remote_adv;
2237
2238                         local_adv = ADVERTISE_PAUSE_CAP;
2239                         remote_adv = 0;
2240                         if (sg_dig_status & (1 << 19))
2241                                 remote_adv |= LPA_PAUSE_CAP;
2242                         if (sg_dig_status & (1 << 20))
2243                                 remote_adv |= LPA_PAUSE_ASYM;
2244
2245                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2246                         current_link_up = 1;
2247                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2248                 } else if (!(sg_dig_status & (1 << 1))) {
2249                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2250                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2251                         else {
2252                                 if (workaround) {
2253                                         u32 val = serdes_cfg;
2254
2255                                         if (port_a)
2256                                                 val |= 0xc010000;
2257                                         else
2258                                                 val |= 0x4010000;
2259
2260                                         tw32_f(MAC_SERDES_CFG, val);
2261                                 }
2262
2263                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2264                                 udelay(40);
2265
2266                                 /* Link parallel detection - link is up */
2267                                 /* only if we have PCS_SYNC and not */
2268                                 /* receiving config code words */
2269                                 mac_status = tr32(MAC_STATUS);
2270                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2271                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2272                                         tg3_setup_flow_control(tp, 0, 0);
2273                                         current_link_up = 1;
2274                                 }
2275                         }
2276                 }
2277         }
2278
2279 out:
2280         return current_link_up;
2281 }
2282
2283 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2284 {
2285         int current_link_up = 0;
2286
2287         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2288                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2289                 goto out;
2290         }
2291
2292         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2293                 u32 flags;
2294                 int i;
2295   
2296                 if (fiber_autoneg(tp, &flags)) {
2297                         u32 local_adv, remote_adv;
2298
2299                         local_adv = ADVERTISE_PAUSE_CAP;
2300                         remote_adv = 0;
2301                         if (flags & MR_LP_ADV_SYM_PAUSE)
2302                                 remote_adv |= LPA_PAUSE_CAP;
2303                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2304                                 remote_adv |= LPA_PAUSE_ASYM;
2305
2306                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2307
2308                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2309                         current_link_up = 1;
2310                 }
2311                 for (i = 0; i < 30; i++) {
2312                         udelay(20);
2313                         tw32_f(MAC_STATUS,
2314                                (MAC_STATUS_SYNC_CHANGED |
2315                                 MAC_STATUS_CFG_CHANGED));
2316                         udelay(40);
2317                         if ((tr32(MAC_STATUS) &
2318                              (MAC_STATUS_SYNC_CHANGED |
2319                               MAC_STATUS_CFG_CHANGED)) == 0)
2320                                 break;
2321                 }
2322
2323                 mac_status = tr32(MAC_STATUS);
2324                 if (current_link_up == 0 &&
2325                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2326                     !(mac_status & MAC_STATUS_RCVD_CFG))
2327                         current_link_up = 1;
2328         } else {
2329                 /* Forcing 1000FD link up. */
2330                 current_link_up = 1;
2331                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2332
2333                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2334                 udelay(40);
2335         }
2336
2337 out:
2338         return current_link_up;
2339 }
2340
2341 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2342 {
2343         u32 orig_pause_cfg;
2344         u16 orig_active_speed;
2345         u8 orig_active_duplex;
2346         u32 mac_status;
2347         int current_link_up;
2348         int i;
2349
2350         orig_pause_cfg =
2351                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2352                                   TG3_FLAG_TX_PAUSE));
2353         orig_active_speed = tp->link_config.active_speed;
2354         orig_active_duplex = tp->link_config.active_duplex;
2355
2356         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2357             netif_carrier_ok(tp->dev) &&
2358             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2359                 mac_status = tr32(MAC_STATUS);
2360                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2361                                MAC_STATUS_SIGNAL_DET |
2362                                MAC_STATUS_CFG_CHANGED |
2363                                MAC_STATUS_RCVD_CFG);
2364                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2365                                    MAC_STATUS_SIGNAL_DET)) {
2366                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2367                                             MAC_STATUS_CFG_CHANGED));
2368                         return 0;
2369                 }
2370         }
2371
2372         tw32_f(MAC_TX_AUTO_NEG, 0);
2373
2374         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2375         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2376         tw32_f(MAC_MODE, tp->mac_mode);
2377         udelay(40);
2378
2379         if (tp->phy_id == PHY_ID_BCM8002)
2380                 tg3_init_bcm8002(tp);
2381
2382         /* Enable link change event even when serdes polling.  */
2383         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2384         udelay(40);
2385
2386         current_link_up = 0;
2387         mac_status = tr32(MAC_STATUS);
2388
2389         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2390                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2391         else
2392                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2393
2394         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2395         tw32_f(MAC_MODE, tp->mac_mode);
2396         udelay(40);
2397
2398         tp->hw_status->status =
2399                 (SD_STATUS_UPDATED |
2400                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2401
2402         for (i = 0; i < 100; i++) {
2403                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2404                                     MAC_STATUS_CFG_CHANGED));
2405                 udelay(5);
2406                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2407                                          MAC_STATUS_CFG_CHANGED)) == 0)
2408                         break;
2409         }
2410
2411         mac_status = tr32(MAC_STATUS);
2412         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2413                 current_link_up = 0;
2414                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2415                         tw32_f(MAC_MODE, (tp->mac_mode |
2416                                           MAC_MODE_SEND_CONFIGS));
2417                         udelay(1);
2418                         tw32_f(MAC_MODE, tp->mac_mode);
2419                 }
2420         }
2421
2422         if (current_link_up == 1) {
2423                 tp->link_config.active_speed = SPEED_1000;
2424                 tp->link_config.active_duplex = DUPLEX_FULL;
2425                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2426                                     LED_CTRL_LNKLED_OVERRIDE |
2427                                     LED_CTRL_1000MBPS_ON));
2428         } else {
2429                 tp->link_config.active_speed = SPEED_INVALID;
2430                 tp->link_config.active_duplex = DUPLEX_INVALID;
2431                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2432                                     LED_CTRL_LNKLED_OVERRIDE |
2433                                     LED_CTRL_TRAFFIC_OVERRIDE));
2434         }
2435
2436         if (current_link_up != netif_carrier_ok(tp->dev)) {
2437                 if (current_link_up)
2438                         netif_carrier_on(tp->dev);
2439                 else
2440                         netif_carrier_off(tp->dev);
2441                 tg3_link_report(tp);
2442         } else {
2443                 u32 now_pause_cfg =
2444                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2445                                          TG3_FLAG_TX_PAUSE);
2446                 if (orig_pause_cfg != now_pause_cfg ||
2447                     orig_active_speed != tp->link_config.active_speed ||
2448                     orig_active_duplex != tp->link_config.active_duplex)
2449                         tg3_link_report(tp);
2450         }
2451
2452         return 0;
2453 }
2454
2455 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2456 {
2457         int err;
2458
2459         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2460                 err = tg3_setup_fiber_phy(tp, force_reset);
2461         } else {
2462                 err = tg3_setup_copper_phy(tp, force_reset);
2463         }
2464
2465         if (tp->link_config.active_speed == SPEED_1000 &&
2466             tp->link_config.active_duplex == DUPLEX_HALF)
2467                 tw32(MAC_TX_LENGTHS,
2468                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2469                       (6 << TX_LENGTHS_IPG_SHIFT) |
2470                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2471         else
2472                 tw32(MAC_TX_LENGTHS,
2473                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2474                       (6 << TX_LENGTHS_IPG_SHIFT) |
2475                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2476
2477         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2478                 if (netif_carrier_ok(tp->dev)) {
2479                         tw32(HOSTCC_STAT_COAL_TICKS,
2480                              DEFAULT_STAT_COAL_TICKS);
2481                 } else {
2482                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2483                 }
2484         }
2485
2486         return err;
2487 }
2488
2489 /* Tigon3 never reports partial packet sends.  So we do not
2490  * need special logic to handle SKBs that have not had all
2491  * of their frags sent yet, like SunGEM does.
2492  */
2493 static void tg3_tx(struct tg3 *tp)
2494 {
2495         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2496         u32 sw_idx = tp->tx_cons;
2497
2498         while (sw_idx != hw_idx) {
2499                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2500                 struct sk_buff *skb = ri->skb;
2501                 int i;
2502
2503                 if (unlikely(skb == NULL))
2504                         BUG();
2505
2506                 pci_unmap_single(tp->pdev,
2507                                  pci_unmap_addr(ri, mapping),
2508                                  skb_headlen(skb),
2509                                  PCI_DMA_TODEVICE);
2510
2511                 ri->skb = NULL;
2512
2513                 sw_idx = NEXT_TX(sw_idx);
2514
2515                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2516                         if (unlikely(sw_idx == hw_idx))
2517                                 BUG();
2518
2519                         ri = &tp->tx_buffers[sw_idx];
2520                         if (unlikely(ri->skb != NULL))
2521                                 BUG();
2522
2523                         pci_unmap_page(tp->pdev,
2524                                        pci_unmap_addr(ri, mapping),
2525                                        skb_shinfo(skb)->frags[i].size,
2526                                        PCI_DMA_TODEVICE);
2527
2528                         sw_idx = NEXT_TX(sw_idx);
2529                 }
2530
2531                 dev_kfree_skb_irq(skb);
2532         }
2533
2534         tp->tx_cons = sw_idx;
2535
2536         if (netif_queue_stopped(tp->dev) &&
2537             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2538                 netif_wake_queue(tp->dev);
2539 }
2540
2541 /* Returns size of skb allocated or < 0 on error.
2542  *
2543  * We only need to fill in the address because the other members
2544  * of the RX descriptor are invariant, see tg3_init_rings.
2545  *
2546  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2547  * posting buffers we only dirty the first cache line of the RX
2548  * descriptor (containing the address).  Whereas for the RX status
2549  * buffers the cpu only reads the last cacheline of the RX descriptor
2550  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2551  */
2552 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2553                             int src_idx, u32 dest_idx_unmasked)
2554 {
2555         struct tg3_rx_buffer_desc *desc;
2556         struct ring_info *map, *src_map;
2557         struct sk_buff *skb;
2558         dma_addr_t mapping;
2559         int skb_size, dest_idx;
2560
2561         src_map = NULL;
2562         switch (opaque_key) {
2563         case RXD_OPAQUE_RING_STD:
2564                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2565                 desc = &tp->rx_std[dest_idx];
2566                 map = &tp->rx_std_buffers[dest_idx];
2567                 if (src_idx >= 0)
2568                         src_map = &tp->rx_std_buffers[src_idx];
2569                 skb_size = RX_PKT_BUF_SZ;
2570                 break;
2571
2572         case RXD_OPAQUE_RING_JUMBO:
2573                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2574                 desc = &tp->rx_jumbo[dest_idx];
2575                 map = &tp->rx_jumbo_buffers[dest_idx];
2576                 if (src_idx >= 0)
2577                         src_map = &tp->rx_jumbo_buffers[src_idx];
2578                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2579                 break;
2580
2581         default:
2582                 return -EINVAL;
2583         };
2584
2585         /* Do not overwrite any of the map or rp information
2586          * until we are sure we can commit to a new buffer.
2587          *
2588          * Callers depend upon this behavior and assume that
2589          * we leave everything unchanged if we fail.
2590          */
2591         skb = dev_alloc_skb(skb_size);
2592         if (skb == NULL)
2593                 return -ENOMEM;
2594
2595         skb->dev = tp->dev;
2596         skb_reserve(skb, tp->rx_offset);
2597
2598         mapping = pci_map_single(tp->pdev, skb->data,
2599                                  skb_size - tp->rx_offset,
2600                                  PCI_DMA_FROMDEVICE);
2601
2602         map->skb = skb;
2603         pci_unmap_addr_set(map, mapping, mapping);
2604
2605         if (src_map != NULL)
2606                 src_map->skb = NULL;
2607
2608         desc->addr_hi = ((u64)mapping >> 32);
2609         desc->addr_lo = ((u64)mapping & 0xffffffff);
2610
2611         return skb_size;
2612 }
2613
2614 /* We only need to move over in the address because the other
2615  * members of the RX descriptor are invariant.  See notes above
2616  * tg3_alloc_rx_skb for full details.
2617  */
2618 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2619                            int src_idx, u32 dest_idx_unmasked)
2620 {
2621         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2622         struct ring_info *src_map, *dest_map;
2623         int dest_idx;
2624
2625         switch (opaque_key) {
2626         case RXD_OPAQUE_RING_STD:
2627                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2628                 dest_desc = &tp->rx_std[dest_idx];
2629                 dest_map = &tp->rx_std_buffers[dest_idx];
2630                 src_desc = &tp->rx_std[src_idx];
2631                 src_map = &tp->rx_std_buffers[src_idx];
2632                 break;
2633
2634         case RXD_OPAQUE_RING_JUMBO:
2635                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2636                 dest_desc = &tp->rx_jumbo[dest_idx];
2637                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2638                 src_desc = &tp->rx_jumbo[src_idx];
2639                 src_map = &tp->rx_jumbo_buffers[src_idx];
2640                 break;
2641
2642         default:
2643                 return;
2644         };
2645
2646         dest_map->skb = src_map->skb;
2647         pci_unmap_addr_set(dest_map, mapping,
2648                            pci_unmap_addr(src_map, mapping));
2649         dest_desc->addr_hi = src_desc->addr_hi;
2650         dest_desc->addr_lo = src_desc->addr_lo;
2651
2652         src_map->skb = NULL;
2653 }
2654
2655 #if TG3_VLAN_TAG_USED
2656 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2657 {
2658         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2659 }
2660 #endif
2661
2662 /* The RX ring scheme is composed of multiple rings which post fresh
2663  * buffers to the chip, and one special ring the chip uses to report
2664  * status back to the host.
2665  *
2666  * The special ring reports the status of received packets to the
2667  * host.  The chip does not write into the original descriptor the
2668  * RX buffer was obtained from.  The chip simply takes the original
2669  * descriptor as provided by the host, updates the status and length
2670  * field, then writes this into the next status ring entry.
2671  *
2672  * Each ring the host uses to post buffers to the chip is described
2673  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2674  * it is first placed into the on-chip ram.  When the packet's length
2675  * is known, it walks down the TG3_BDINFO entries to select the ring.
2676  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2677  * which is within the range of the new packet's length is chosen.
2678  *
2679  * The "separate ring for rx status" scheme may sound queer, but it makes
2680  * sense from a cache coherency perspective.  If only the host writes
2681  * to the buffer post rings, and only the chip writes to the rx status
2682  * rings, then cache lines never move beyond shared-modified state.
2683  * If both the host and chip were to write into the same ring, cache line
2684  * eviction could occur since both entities want it in an exclusive state.
2685  */
2686 static int tg3_rx(struct tg3 *tp, int budget)
2687 {
2688         u32 work_mask;
2689         u32 sw_idx = tp->rx_rcb_ptr;
2690         u16 hw_idx;
2691         int received;
2692
2693         hw_idx = tp->hw_status->idx[0].rx_producer;
2694         /*
2695          * We need to order the read of hw_idx and the read of
2696          * the opaque cookie.
2697          */
2698         rmb();
2699         work_mask = 0;
2700         received = 0;
2701         while (sw_idx != hw_idx && budget > 0) {
2702                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2703                 unsigned int len;
2704                 struct sk_buff *skb;
2705                 dma_addr_t dma_addr;
2706                 u32 opaque_key, desc_idx, *post_ptr;
2707
2708                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2709                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2710                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2711                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2712                                                   mapping);
2713                         skb = tp->rx_std_buffers[desc_idx].skb;
2714                         post_ptr = &tp->rx_std_ptr;
2715                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2716                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2717                                                   mapping);
2718                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2719                         post_ptr = &tp->rx_jumbo_ptr;
2720                 }
2721                 else {
2722                         goto next_pkt_nopost;
2723                 }
2724
2725                 work_mask |= opaque_key;
2726
2727                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2728                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2729                 drop_it:
2730                         tg3_recycle_rx(tp, opaque_key,
2731                                        desc_idx, *post_ptr);
2732                 drop_it_no_recycle:
2733                         /* Other statistics kept track of by card. */
2734                         tp->net_stats.rx_dropped++;
2735                         goto next_pkt;
2736                 }
2737
2738                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2739
2740                 if (len > RX_COPY_THRESHOLD 
2741                         && tp->rx_offset == 2
2742                         /* rx_offset != 2 iff this is a 5701 card running
2743                          * in PCI-X mode [see tg3_get_invariants()] */
2744                 ) {
2745                         int skb_size;
2746
2747                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2748                                                     desc_idx, *post_ptr);
2749                         if (skb_size < 0)
2750                                 goto drop_it;
2751
2752                         pci_unmap_single(tp->pdev, dma_addr,
2753                                          skb_size - tp->rx_offset,
2754                                          PCI_DMA_FROMDEVICE);
2755
2756                         skb_put(skb, len);
2757                 } else {
2758                         struct sk_buff *copy_skb;
2759
2760                         tg3_recycle_rx(tp, opaque_key,
2761                                        desc_idx, *post_ptr);
2762
2763                         copy_skb = dev_alloc_skb(len + 2);
2764                         if (copy_skb == NULL)
2765                                 goto drop_it_no_recycle;
2766
2767                         copy_skb->dev = tp->dev;
2768                         skb_reserve(copy_skb, 2);
2769                         skb_put(copy_skb, len);
2770                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2771                         memcpy(copy_skb->data, skb->data, len);
2772                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2773
2774                         /* We'll reuse the original ring buffer. */
2775                         skb = copy_skb;
2776                 }
2777
2778                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2779                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2780                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2781                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2782                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2783                 else
2784                         skb->ip_summed = CHECKSUM_NONE;
2785
2786                 skb->protocol = eth_type_trans(skb, tp->dev);
2787 #if TG3_VLAN_TAG_USED
2788                 if (tp->vlgrp != NULL &&
2789                     desc->type_flags & RXD_FLAG_VLAN) {
2790                         tg3_vlan_rx(tp, skb,
2791                                     desc->err_vlan & RXD_VLAN_MASK);
2792                 } else
2793 #endif
2794                         netif_receive_skb(skb);
2795
2796                 tp->dev->last_rx = jiffies;
2797                 received++;
2798                 budget--;
2799
2800 next_pkt:
2801                 (*post_ptr)++;
2802 next_pkt_nopost:
2803                 sw_idx++;
2804                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
2805
2806                 /* Refresh hw_idx to see if there is new work */
2807                 if (sw_idx == hw_idx) {
2808                         hw_idx = tp->hw_status->idx[0].rx_producer;
2809                         rmb();
2810                 }
2811         }
2812
2813         /* ACK the status ring. */
2814         tp->rx_rcb_ptr = sw_idx;
2815         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
2816
2817         /* Refill RX ring(s). */
2818         if (work_mask & RXD_OPAQUE_RING_STD) {
2819                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2820                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2821                              sw_idx);
2822         }
2823         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2824                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2825                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2826                              sw_idx);
2827         }
2828         mmiowb();
2829
2830         return received;
2831 }
2832
2833 static int tg3_poll(struct net_device *netdev, int *budget)
2834 {
2835         struct tg3 *tp = netdev_priv(netdev);
2836         struct tg3_hw_status *sblk = tp->hw_status;
2837         unsigned long flags;
2838         int done;
2839
2840         spin_lock_irqsave(&tp->lock, flags);
2841
2842         /* handle link change and other phy events */
2843         if (!(tp->tg3_flags &
2844               (TG3_FLAG_USE_LINKCHG_REG |
2845                TG3_FLAG_POLL_SERDES))) {
2846                 if (sblk->status & SD_STATUS_LINK_CHG) {
2847                         sblk->status = SD_STATUS_UPDATED |
2848                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2849                         tg3_setup_phy(tp, 0);
2850                 }
2851         }
2852
2853         /* run TX completion thread */
2854         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2855                 spin_lock(&tp->tx_lock);
2856                 tg3_tx(tp);
2857                 spin_unlock(&tp->tx_lock);
2858         }
2859
2860         spin_unlock_irqrestore(&tp->lock, flags);
2861
2862         /* run RX thread, within the bounds set by NAPI.
2863          * All RX "locking" is done by ensuring outside
2864          * code synchronizes with dev->poll()
2865          */
2866         done = 1;
2867         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2868                 int orig_budget = *budget;
2869                 int work_done;
2870
2871                 if (orig_budget > netdev->quota)
2872                         orig_budget = netdev->quota;
2873
2874                 work_done = tg3_rx(tp, orig_budget);
2875
2876                 *budget -= work_done;
2877                 netdev->quota -= work_done;
2878
2879                 if (work_done >= orig_budget)
2880                         done = 0;
2881         }
2882
2883         /* if no more work, tell net stack and NIC we're done */
2884         if (done) {
2885                 spin_lock_irqsave(&tp->lock, flags);
2886                 __netif_rx_complete(netdev);
2887                 tg3_restart_ints(tp);
2888                 spin_unlock_irqrestore(&tp->lock, flags);
2889         }
2890
2891         return (done ? 0 : 1);
2892 }
2893
2894 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2895 {
2896         struct tg3_hw_status *sblk = tp->hw_status;
2897         unsigned int work_exists = 0;
2898
2899         /* check for phy events */
2900         if (!(tp->tg3_flags &
2901               (TG3_FLAG_USE_LINKCHG_REG |
2902                TG3_FLAG_POLL_SERDES))) {
2903                 if (sblk->status & SD_STATUS_LINK_CHG)
2904                         work_exists = 1;
2905         }
2906         /* check for RX/TX work to do */
2907         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2908             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2909                 work_exists = 1;
2910
2911         return work_exists;
2912 }
2913
2914 /* MSI ISR - No need to check for interrupt sharing and no need to
2915  * flush status block and interrupt mailbox. PCI ordering rules
2916  * guarantee that MSI will arrive after the status block.
2917  */
2918 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2919 {
2920         struct net_device *dev = dev_id;
2921         struct tg3 *tp = netdev_priv(dev);
2922         struct tg3_hw_status *sblk = tp->hw_status;
2923         unsigned long flags;
2924
2925         spin_lock_irqsave(&tp->lock, flags);
2926
2927         /*
2928          * writing any value to intr-mbox-0 clears PCI INTA# and
2929          * chip-internal interrupt pending events.
2930          * writing non-zero to intr-mbox-0 additional tells the
2931          * NIC to stop sending us irqs, engaging "in-intr-handler"
2932          * event coalescing.
2933          */
2934         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2935         sblk->status &= ~SD_STATUS_UPDATED;
2936
2937         if (likely(tg3_has_work(dev, tp)))
2938                 netif_rx_schedule(dev);         /* schedule NAPI poll */
2939         else {
2940                 /* no work, re-enable interrupts
2941                  */
2942                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2943                              0x00000000);
2944         }
2945
2946         spin_unlock_irqrestore(&tp->lock, flags);
2947
2948         return IRQ_RETVAL(1);
2949 }
2950
2951 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2952 {
2953         struct net_device *dev = dev_id;
2954         struct tg3 *tp = netdev_priv(dev);
2955         struct tg3_hw_status *sblk = tp->hw_status;
2956         unsigned long flags;
2957         unsigned int handled = 1;
2958
2959         spin_lock_irqsave(&tp->lock, flags);
2960
2961         /* In INTx mode, it is possible for the interrupt to arrive at
2962          * the CPU before the status block posted prior to the interrupt.
2963          * Reading the PCI State register will confirm whether the
2964          * interrupt is ours and will flush the status block.
2965          */
2966         if ((sblk->status & SD_STATUS_UPDATED) ||
2967             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2968                 /*
2969                  * writing any value to intr-mbox-0 clears PCI INTA# and
2970                  * chip-internal interrupt pending events.
2971                  * writing non-zero to intr-mbox-0 additional tells the
2972                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2973                  * event coalescing.
2974                  */
2975                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2976                              0x00000001);
2977                 /*
2978                  * Flush PCI write.  This also guarantees that our
2979                  * status block has been flushed to host memory.
2980                  */
2981                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2982                 sblk->status &= ~SD_STATUS_UPDATED;
2983
2984                 if (likely(tg3_has_work(dev, tp)))
2985                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2986                 else {
2987                         /* no work, shared interrupt perhaps?  re-enable
2988                          * interrupts, and flush that PCI write
2989                          */
2990                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2991                                 0x00000000);
2992                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2993                 }
2994         } else {        /* shared interrupt */
2995                 handled = 0;
2996         }
2997
2998         spin_unlock_irqrestore(&tp->lock, flags);
2999
3000         return IRQ_RETVAL(handled);
3001 }
3002
3003 /* ISR for interrupt test */
3004 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3005                 struct pt_regs *regs)
3006 {
3007         struct net_device *dev = dev_id;
3008         struct tg3 *tp = netdev_priv(dev);
3009         struct tg3_hw_status *sblk = tp->hw_status;
3010
3011         if (sblk->status & SD_STATUS_UPDATED) {
3012                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3013                              0x00000001);
3014                 return IRQ_RETVAL(1);
3015         }
3016         return IRQ_RETVAL(0);
3017 }
3018
3019 static int tg3_init_hw(struct tg3 *);
3020 static int tg3_halt(struct tg3 *);
3021
3022 #ifdef CONFIG_NET_POLL_CONTROLLER
3023 static void tg3_poll_controller(struct net_device *dev)
3024 {
3025         struct tg3 *tp = netdev_priv(dev);
3026
3027         tg3_interrupt(tp->pdev->irq, dev, NULL);
3028 }
3029 #endif
3030
3031 static void tg3_reset_task(void *_data)
3032 {
3033         struct tg3 *tp = _data;
3034         unsigned int restart_timer;
3035
3036         tg3_netif_stop(tp);
3037
3038         spin_lock_irq(&tp->lock);
3039         spin_lock(&tp->tx_lock);
3040
3041         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3042         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3043
3044         tg3_halt(tp);
3045         tg3_init_hw(tp);
3046
3047         tg3_netif_start(tp);
3048
3049         spin_unlock(&tp->tx_lock);
3050         spin_unlock_irq(&tp->lock);
3051
3052         if (restart_timer)
3053                 mod_timer(&tp->timer, jiffies + 1);
3054 }
3055
3056 static void tg3_tx_timeout(struct net_device *dev)
3057 {
3058         struct tg3 *tp = netdev_priv(dev);
3059
3060         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3061                dev->name);
3062
3063         schedule_work(&tp->reset_task);
3064 }
3065
3066 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3067
3068 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3069                                        u32 guilty_entry, int guilty_len,
3070                                        u32 last_plus_one, u32 *start, u32 mss)
3071 {
3072         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3073         dma_addr_t new_addr;
3074         u32 entry = *start;
3075         int i;
3076
3077         if (!new_skb) {
3078                 dev_kfree_skb(skb);
3079                 return -1;
3080         }
3081
3082         /* New SKB is guaranteed to be linear. */
3083         entry = *start;
3084         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3085                                   PCI_DMA_TODEVICE);
3086         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3087                     (skb->ip_summed == CHECKSUM_HW) ?
3088                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3089         *start = NEXT_TX(entry);
3090
3091         /* Now clean up the sw ring entries. */
3092         i = 0;
3093         while (entry != last_plus_one) {
3094                 int len;
3095
3096                 if (i == 0)
3097                         len = skb_headlen(skb);
3098                 else
3099                         len = skb_shinfo(skb)->frags[i-1].size;
3100                 pci_unmap_single(tp->pdev,
3101                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3102                                  len, PCI_DMA_TODEVICE);
3103                 if (i == 0) {
3104                         tp->tx_buffers[entry].skb = new_skb;
3105                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3106                 } else {
3107                         tp->tx_buffers[entry].skb = NULL;
3108                 }
3109                 entry = NEXT_TX(entry);
3110                 i++;
3111         }
3112
3113         dev_kfree_skb(skb);
3114
3115         return 0;
3116 }
3117
3118 static void tg3_set_txd(struct tg3 *tp, int entry,
3119                         dma_addr_t mapping, int len, u32 flags,
3120                         u32 mss_and_is_end)
3121 {
3122         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3123         int is_end = (mss_and_is_end & 0x1);
3124         u32 mss = (mss_and_is_end >> 1);
3125         u32 vlan_tag = 0;
3126
3127         if (is_end)
3128                 flags |= TXD_FLAG_END;
3129         if (flags & TXD_FLAG_VLAN) {
3130                 vlan_tag = flags >> 16;
3131                 flags &= 0xffff;
3132         }
3133         vlan_tag |= (mss << TXD_MSS_SHIFT);
3134
3135         txd->addr_hi = ((u64) mapping >> 32);
3136         txd->addr_lo = ((u64) mapping & 0xffffffff);
3137         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3138         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3139 }
3140
3141 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3142 {
3143         u32 base = (u32) mapping & 0xffffffff;
3144
3145         return ((base > 0xffffdcc0) &&
3146                 (base + len + 8 < base));
3147 }
3148
3149 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3150 {
3151         struct tg3 *tp = netdev_priv(dev);
3152         dma_addr_t mapping;
3153         unsigned int i;
3154         u32 len, entry, base_flags, mss;
3155         int would_hit_hwbug;
3156         unsigned long flags;
3157
3158         len = skb_headlen(skb);
3159
3160         /* No BH disabling for tx_lock here.  We are running in BH disabled
3161          * context and TX reclaim runs via tp->poll inside of a software
3162          * interrupt.  Rejoice!
3163          *
3164          * Actually, things are not so simple.  If we are to take a hw
3165          * IRQ here, we can deadlock, consider:
3166          *
3167          *       CPU1           CPU2
3168          *   tg3_start_xmit
3169          *   take tp->tx_lock
3170          *                      tg3_timer
3171          *                      take tp->lock
3172          *   tg3_interrupt
3173          *   spin on tp->lock
3174          *                      spin on tp->tx_lock
3175          *
3176          * So we really do need to disable interrupts when taking
3177          * tx_lock here.
3178          */
3179         local_irq_save(flags);
3180         if (!spin_trylock(&tp->tx_lock)) { 
3181                 local_irq_restore(flags);
3182                 return NETDEV_TX_LOCKED; 
3183         } 
3184
3185         /* This is a hard error, log it. */
3186         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3187                 netif_stop_queue(dev);
3188                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3189                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3190                        dev->name);
3191                 return NETDEV_TX_BUSY;
3192         }
3193
3194         entry = tp->tx_prod;
3195         base_flags = 0;
3196         if (skb->ip_summed == CHECKSUM_HW)
3197                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3198 #if TG3_TSO_SUPPORT != 0
3199         mss = 0;
3200         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3201             (mss = skb_shinfo(skb)->tso_size) != 0) {
3202                 int tcp_opt_len, ip_tcp_len;
3203
3204                 if (skb_header_cloned(skb) &&
3205                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3206                         dev_kfree_skb(skb);
3207                         goto out_unlock;
3208                 }
3209
3210                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3211                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3212
3213                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3214                                TXD_FLAG_CPU_POST_DMA);
3215
3216                 skb->nh.iph->check = 0;
3217                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3218                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3219                         skb->h.th->check = 0;
3220                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3221                 }
3222                 else {
3223                         skb->h.th->check =
3224                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3225                                                    skb->nh.iph->daddr,
3226                                                    0, IPPROTO_TCP, 0);
3227                 }
3228
3229                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3230                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3231                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3232                                 int tsflags;
3233
3234                                 tsflags = ((skb->nh.iph->ihl - 5) +
3235                                            (tcp_opt_len >> 2));
3236                                 mss |= (tsflags << 11);
3237                         }
3238                 } else {
3239                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3240                                 int tsflags;
3241
3242                                 tsflags = ((skb->nh.iph->ihl - 5) +
3243                                            (tcp_opt_len >> 2));
3244                                 base_flags |= tsflags << 12;
3245                         }
3246                 }
3247         }
3248 #else
3249         mss = 0;
3250 #endif
3251 #if TG3_VLAN_TAG_USED
3252         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3253                 base_flags |= (TXD_FLAG_VLAN |
3254                                (vlan_tx_tag_get(skb) << 16));
3255 #endif
3256
3257         /* Queue skb data, a.k.a. the main skb fragment. */
3258         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3259
3260         tp->tx_buffers[entry].skb = skb;
3261         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3262
3263         would_hit_hwbug = 0;
3264
3265         if (tg3_4g_overflow_test(mapping, len))
3266                 would_hit_hwbug = entry + 1;
3267
3268         tg3_set_txd(tp, entry, mapping, len, base_flags,
3269                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3270
3271         entry = NEXT_TX(entry);
3272
3273         /* Now loop through additional data fragments, and queue them. */
3274         if (skb_shinfo(skb)->nr_frags > 0) {
3275                 unsigned int i, last;
3276
3277                 last = skb_shinfo(skb)->nr_frags - 1;
3278                 for (i = 0; i <= last; i++) {
3279                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3280
3281                         len = frag->size;
3282                         mapping = pci_map_page(tp->pdev,
3283                                                frag->page,
3284                                                frag->page_offset,
3285                                                len, PCI_DMA_TODEVICE);
3286
3287                         tp->tx_buffers[entry].skb = NULL;
3288                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3289
3290                         if (tg3_4g_overflow_test(mapping, len)) {
3291                                 /* Only one should match. */
3292                                 if (would_hit_hwbug)
3293                                         BUG();
3294                                 would_hit_hwbug = entry + 1;
3295                         }
3296
3297                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3298                                 tg3_set_txd(tp, entry, mapping, len,
3299                                             base_flags, (i == last)|(mss << 1));
3300                         else
3301                                 tg3_set_txd(tp, entry, mapping, len,
3302                                             base_flags, (i == last));
3303
3304                         entry = NEXT_TX(entry);
3305                 }
3306         }
3307
3308         if (would_hit_hwbug) {
3309                 u32 last_plus_one = entry;
3310                 u32 start;
3311                 unsigned int len = 0;
3312
3313                 would_hit_hwbug -= 1;
3314                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3315                 entry &= (TG3_TX_RING_SIZE - 1);
3316                 start = entry;
3317                 i = 0;
3318                 while (entry != last_plus_one) {
3319                         if (i == 0)
3320                                 len = skb_headlen(skb);
3321                         else
3322                                 len = skb_shinfo(skb)->frags[i-1].size;
3323
3324                         if (entry == would_hit_hwbug)
3325                                 break;
3326
3327                         i++;
3328                         entry = NEXT_TX(entry);
3329
3330                 }
3331
3332                 /* If the workaround fails due to memory/mapping
3333                  * failure, silently drop this packet.
3334                  */
3335                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3336                                                 entry, len,
3337                                                 last_plus_one,
3338                                                 &start, mss))
3339                         goto out_unlock;
3340
3341                 entry = start;
3342         }
3343
3344         /* Packets are ready, update Tx producer idx local and on card. */
3345         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3346
3347         tp->tx_prod = entry;
3348         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3349                 netif_stop_queue(dev);
3350
3351 out_unlock:
3352         mmiowb();
3353         spin_unlock_irqrestore(&tp->tx_lock, flags);
3354
3355         dev->trans_start = jiffies;
3356
3357         return NETDEV_TX_OK;
3358 }
3359
3360 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3361                                int new_mtu)
3362 {
3363         dev->mtu = new_mtu;
3364
3365         if (new_mtu > ETH_DATA_LEN)
3366                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3367         else
3368                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3369 }
3370
3371 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3372 {
3373         struct tg3 *tp = netdev_priv(dev);
3374
3375         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3376                 return -EINVAL;
3377
3378         if (!netif_running(dev)) {
3379                 /* We'll just catch it later when the
3380                  * device is up'd.
3381                  */
3382                 tg3_set_mtu(dev, tp, new_mtu);
3383                 return 0;
3384         }
3385
3386         tg3_netif_stop(tp);
3387         spin_lock_irq(&tp->lock);
3388         spin_lock(&tp->tx_lock);
3389
3390         tg3_halt(tp);
3391
3392         tg3_set_mtu(dev, tp, new_mtu);
3393
3394         tg3_init_hw(tp);
3395
3396         tg3_netif_start(tp);
3397
3398         spin_unlock(&tp->tx_lock);
3399         spin_unlock_irq(&tp->lock);
3400
3401         return 0;
3402 }
3403
3404 /* Free up pending packets in all rx/tx rings.
3405  *
3406  * The chip has been shut down and the driver detached from
3407  * the networking, so no interrupts or new tx packets will
3408  * end up in the driver.  tp->{tx,}lock is not held and we are not
3409  * in an interrupt context and thus may sleep.
3410  */
3411 static void tg3_free_rings(struct tg3 *tp)
3412 {
3413         struct ring_info *rxp;
3414         int i;
3415
3416         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3417                 rxp = &tp->rx_std_buffers[i];
3418
3419                 if (rxp->skb == NULL)
3420                         continue;
3421                 pci_unmap_single(tp->pdev,
3422                                  pci_unmap_addr(rxp, mapping),
3423                                  RX_PKT_BUF_SZ - tp->rx_offset,
3424                                  PCI_DMA_FROMDEVICE);
3425                 dev_kfree_skb_any(rxp->skb);
3426                 rxp->skb = NULL;
3427         }
3428
3429         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3430                 rxp = &tp->rx_jumbo_buffers[i];
3431
3432                 if (rxp->skb == NULL)
3433                         continue;
3434                 pci_unmap_single(tp->pdev,
3435                                  pci_unmap_addr(rxp, mapping),
3436                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3437                                  PCI_DMA_FROMDEVICE);
3438                 dev_kfree_skb_any(rxp->skb);
3439                 rxp->skb = NULL;
3440         }
3441
3442         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3443                 struct tx_ring_info *txp;
3444                 struct sk_buff *skb;
3445                 int j;
3446
3447                 txp = &tp->tx_buffers[i];
3448                 skb = txp->skb;
3449
3450                 if (skb == NULL) {
3451                         i++;
3452                         continue;
3453                 }
3454
3455                 pci_unmap_single(tp->pdev,
3456                                  pci_unmap_addr(txp, mapping),
3457                                  skb_headlen(skb),
3458                                  PCI_DMA_TODEVICE);
3459                 txp->skb = NULL;
3460
3461                 i++;
3462
3463                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3464                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3465                         pci_unmap_page(tp->pdev,
3466                                        pci_unmap_addr(txp, mapping),
3467                                        skb_shinfo(skb)->frags[j].size,
3468                                        PCI_DMA_TODEVICE);
3469                         i++;
3470                 }
3471
3472                 dev_kfree_skb_any(skb);
3473         }
3474 }
3475
3476 /* Initialize tx/rx rings for packet processing.
3477  *
3478  * The chip has been shut down and the driver detached from
3479  * the networking, so no interrupts or new tx packets will
3480  * end up in the driver.  tp->{tx,}lock are held and thus
3481  * we may not sleep.
3482  */
3483 static void tg3_init_rings(struct tg3 *tp)
3484 {
3485         u32 i;
3486
3487         /* Free up all the SKBs. */
3488         tg3_free_rings(tp);
3489
3490         /* Zero out all descriptors. */
3491         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3492         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3493         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3494         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3495
3496         /* Initialize invariants of the rings, we only set this
3497          * stuff once.  This works because the card does not
3498          * write into the rx buffer posting rings.
3499          */
3500         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3501                 struct tg3_rx_buffer_desc *rxd;
3502
3503                 rxd = &tp->rx_std[i];
3504                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3505                         << RXD_LEN_SHIFT;
3506                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3507                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3508                                (i << RXD_OPAQUE_INDEX_SHIFT));
3509         }
3510
3511         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3512                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3513                         struct tg3_rx_buffer_desc *rxd;
3514
3515                         rxd = &tp->rx_jumbo[i];
3516                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3517                                 << RXD_LEN_SHIFT;
3518                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3519                                 RXD_FLAG_JUMBO;
3520                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3521                                (i << RXD_OPAQUE_INDEX_SHIFT));
3522                 }
3523         }
3524
3525         /* Now allocate fresh SKBs for each rx ring. */
3526         for (i = 0; i < tp->rx_pending; i++) {
3527                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3528                                      -1, i) < 0)
3529                         break;
3530         }
3531
3532         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3533                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3534                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3535                                              -1, i) < 0)
3536                                 break;
3537                 }
3538         }
3539 }
3540
3541 /*
3542  * Must not be invoked with interrupt sources disabled and
3543  * the hardware shutdown down.
3544  */
3545 static void tg3_free_consistent(struct tg3 *tp)
3546 {
3547         if (tp->rx_std_buffers) {
3548                 kfree(tp->rx_std_buffers);
3549                 tp->rx_std_buffers = NULL;
3550         }
3551         if (tp->rx_std) {
3552                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3553                                     tp->rx_std, tp->rx_std_mapping);
3554                 tp->rx_std = NULL;
3555         }
3556         if (tp->rx_jumbo) {
3557                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3558                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3559                 tp->rx_jumbo = NULL;
3560         }
3561         if (tp->rx_rcb) {
3562                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3563                                     tp->rx_rcb, tp->rx_rcb_mapping);
3564                 tp->rx_rcb = NULL;
3565         }
3566         if (tp->tx_ring) {
3567                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3568                         tp->tx_ring, tp->tx_desc_mapping);
3569                 tp->tx_ring = NULL;
3570         }
3571         if (tp->hw_status) {
3572                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3573                                     tp->hw_status, tp->status_mapping);
3574                 tp->hw_status = NULL;
3575         }
3576         if (tp->hw_stats) {
3577                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3578                                     tp->hw_stats, tp->stats_mapping);
3579                 tp->hw_stats = NULL;
3580         }
3581 }
3582
3583 /*
3584  * Must not be invoked with interrupt sources disabled and
3585  * the hardware shutdown down.  Can sleep.
3586  */
3587 static int tg3_alloc_consistent(struct tg3 *tp)
3588 {
3589         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3590                                       (TG3_RX_RING_SIZE +
3591                                        TG3_RX_JUMBO_RING_SIZE)) +
3592                                      (sizeof(struct tx_ring_info) *
3593                                       TG3_TX_RING_SIZE),
3594                                      GFP_KERNEL);
3595         if (!tp->rx_std_buffers)
3596                 return -ENOMEM;
3597
3598         memset(tp->rx_std_buffers, 0,
3599                (sizeof(struct ring_info) *
3600                 (TG3_RX_RING_SIZE +
3601                  TG3_RX_JUMBO_RING_SIZE)) +
3602                (sizeof(struct tx_ring_info) *
3603                 TG3_TX_RING_SIZE));
3604
3605         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3606         tp->tx_buffers = (struct tx_ring_info *)
3607                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3608
3609         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3610                                           &tp->rx_std_mapping);
3611         if (!tp->rx_std)
3612                 goto err_out;
3613
3614         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3615                                             &tp->rx_jumbo_mapping);
3616
3617         if (!tp->rx_jumbo)
3618                 goto err_out;
3619
3620         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3621                                           &tp->rx_rcb_mapping);
3622         if (!tp->rx_rcb)
3623                 goto err_out;
3624
3625         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3626                                            &tp->tx_desc_mapping);
3627         if (!tp->tx_ring)
3628                 goto err_out;
3629
3630         tp->hw_status = pci_alloc_consistent(tp->pdev,
3631                                              TG3_HW_STATUS_SIZE,
3632                                              &tp->status_mapping);
3633         if (!tp->hw_status)
3634                 goto err_out;
3635
3636         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3637                                             sizeof(struct tg3_hw_stats),
3638                                             &tp->stats_mapping);
3639         if (!tp->hw_stats)
3640                 goto err_out;
3641
3642         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3643         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3644
3645         return 0;
3646
3647 err_out:
3648         tg3_free_consistent(tp);
3649         return -ENOMEM;
3650 }
3651
3652 #define MAX_WAIT_CNT 1000
3653
3654 /* To stop a block, clear the enable bit and poll till it
3655  * clears.  tp->lock is held.
3656  */
3657 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3658 {
3659         unsigned int i;
3660         u32 val;
3661
3662         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3663                 switch (ofs) {
3664                 case RCVLSC_MODE:
3665                 case DMAC_MODE:
3666                 case MBFREE_MODE:
3667                 case BUFMGR_MODE:
3668                 case MEMARB_MODE:
3669                         /* We can't enable/disable these bits of the
3670                          * 5705/5750, just say success.
3671                          */
3672                         return 0;
3673
3674                 default:
3675                         break;
3676                 };
3677         }
3678
3679         val = tr32(ofs);
3680         val &= ~enable_bit;
3681         tw32_f(ofs, val);
3682
3683         for (i = 0; i < MAX_WAIT_CNT; i++) {
3684                 udelay(100);
3685                 val = tr32(ofs);
3686                 if ((val & enable_bit) == 0)
3687                         break;
3688         }
3689
3690         if (i == MAX_WAIT_CNT) {
3691                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3692                        "ofs=%lx enable_bit=%x\n",
3693                        ofs, enable_bit);
3694                 return -ENODEV;
3695         }
3696
3697         return 0;
3698 }
3699
3700 /* tp->lock is held. */
3701 static int tg3_abort_hw(struct tg3 *tp)
3702 {
3703         int i, err;
3704
3705         tg3_disable_ints(tp);
3706
3707         tp->rx_mode &= ~RX_MODE_ENABLE;
3708         tw32_f(MAC_RX_MODE, tp->rx_mode);
3709         udelay(10);
3710
3711         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3712         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3713         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3714         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3715         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3716         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3717
3718         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3719         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3720         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3721         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3722         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3723         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3724         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3725         if (err)
3726                 goto out;
3727
3728         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3729         tw32_f(MAC_MODE, tp->mac_mode);
3730         udelay(40);
3731
3732         tp->tx_mode &= ~TX_MODE_ENABLE;
3733         tw32_f(MAC_TX_MODE, tp->tx_mode);
3734
3735         for (i = 0; i < MAX_WAIT_CNT; i++) {
3736                 udelay(100);
3737                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3738                         break;
3739         }
3740         if (i >= MAX_WAIT_CNT) {
3741                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3742                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3743                        tp->dev->name, tr32(MAC_TX_MODE));
3744                 return -ENODEV;
3745         }
3746
3747         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3748         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3749         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3750
3751         tw32(FTQ_RESET, 0xffffffff);
3752         tw32(FTQ_RESET, 0x00000000);
3753
3754         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3755         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3756         if (err)
3757                 goto out;
3758
3759         if (tp->hw_status)
3760                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3761         if (tp->hw_stats)
3762                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3763
3764 out:
3765         return err;
3766 }
3767
3768 /* tp->lock is held. */
3769 static int tg3_nvram_lock(struct tg3 *tp)
3770 {
3771         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3772                 int i;
3773
3774                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3775                 for (i = 0; i < 8000; i++) {
3776                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3777                                 break;
3778                         udelay(20);
3779                 }
3780                 if (i == 8000)
3781                         return -ENODEV;
3782         }
3783         return 0;
3784 }
3785
3786 /* tp->lock is held. */
3787 static void tg3_nvram_unlock(struct tg3 *tp)
3788 {
3789         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3790                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3791 }
3792
3793 /* tp->lock is held. */
3794 static void tg3_enable_nvram_access(struct tg3 *tp)
3795 {
3796         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3797             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3798                 u32 nvaccess = tr32(NVRAM_ACCESS);
3799
3800                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3801         }
3802 }
3803
3804 /* tp->lock is held. */
3805 static void tg3_disable_nvram_access(struct tg3 *tp)
3806 {
3807         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3808             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3809                 u32 nvaccess = tr32(NVRAM_ACCESS);
3810
3811                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3812         }
3813 }
3814
3815 /* tp->lock is held. */
3816 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3817 {
3818         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3819                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3820                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3821
3822         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3823                 switch (kind) {
3824                 case RESET_KIND_INIT:
3825                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3826                                       DRV_STATE_START);
3827                         break;
3828
3829                 case RESET_KIND_SHUTDOWN:
3830                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3831                                       DRV_STATE_UNLOAD);
3832                         break;
3833
3834                 case RESET_KIND_SUSPEND:
3835                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3836                                       DRV_STATE_SUSPEND);
3837                         break;
3838
3839                 default:
3840                         break;
3841                 };
3842         }
3843 }
3844
3845 /* tp->lock is held. */
3846 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3847 {
3848         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3849                 switch (kind) {
3850                 case RESET_KIND_INIT:
3851                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3852                                       DRV_STATE_START_DONE);
3853                         break;
3854
3855                 case RESET_KIND_SHUTDOWN:
3856                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3857                                       DRV_STATE_UNLOAD_DONE);
3858                         break;
3859
3860                 default:
3861                         break;
3862                 };
3863         }
3864 }
3865
3866 /* tp->lock is held. */
3867 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3868 {
3869         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3870                 switch (kind) {
3871                 case RESET_KIND_INIT:
3872                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3873                                       DRV_STATE_START);
3874                         break;
3875
3876                 case RESET_KIND_SHUTDOWN:
3877                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3878                                       DRV_STATE_UNLOAD);
3879                         break;
3880
3881                 case RESET_KIND_SUSPEND:
3882                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3883                                       DRV_STATE_SUSPEND);
3884                         break;
3885
3886                 default:
3887                         break;
3888                 };
3889         }
3890 }
3891
3892 static void tg3_stop_fw(struct tg3 *);
3893
3894 /* tp->lock is held. */
3895 static int tg3_chip_reset(struct tg3 *tp)
3896 {
3897         u32 val;
3898         u32 flags_save;
3899         int i;
3900
3901         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3902                 tg3_nvram_lock(tp);
3903
3904         /*
3905          * We must avoid the readl() that normally takes place.
3906          * It locks machines, causes machine checks, and other
3907          * fun things.  So, temporarily disable the 5701
3908          * hardware workaround, while we do the reset.
3909          */
3910         flags_save = tp->tg3_flags;
3911         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3912
3913         /* do the reset */
3914         val = GRC_MISC_CFG_CORECLK_RESET;
3915
3916         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3917                 if (tr32(0x7e2c) == 0x60) {
3918                         tw32(0x7e2c, 0x20);
3919                 }
3920                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3921                         tw32(GRC_MISC_CFG, (1 << 29));
3922                         val |= (1 << 29);
3923                 }
3924         }
3925
3926         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3927                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3928         tw32(GRC_MISC_CFG, val);
3929
3930         /* restore 5701 hardware bug workaround flag */
3931         tp->tg3_flags = flags_save;
3932
3933         /* Unfortunately, we have to delay before the PCI read back.
3934          * Some 575X chips even will not respond to a PCI cfg access
3935          * when the reset command is given to the chip.
3936          *
3937          * How do these hardware designers expect things to work
3938          * properly if the PCI write is posted for a long period
3939          * of time?  It is always necessary to have some method by
3940          * which a register read back can occur to push the write
3941          * out which does the reset.
3942          *
3943          * For most tg3 variants the trick below was working.
3944          * Ho hum...
3945          */
3946         udelay(120);
3947
3948         /* Flush PCI posted writes.  The normal MMIO registers
3949          * are inaccessible at this time so this is the only
3950          * way to make this reliably (actually, this is no longer
3951          * the case, see above).  I tried to use indirect
3952          * register read/write but this upset some 5701 variants.
3953          */
3954         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3955
3956         udelay(120);
3957
3958         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3959                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3960                         int i;
3961                         u32 cfg_val;
3962
3963                         /* Wait for link training to complete.  */
3964                         for (i = 0; i < 5000; i++)
3965                                 udelay(100);
3966
3967                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3968                         pci_write_config_dword(tp->pdev, 0xc4,
3969                                                cfg_val | (1 << 15));
3970                 }
3971                 /* Set PCIE max payload size and clear error status.  */
3972                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3973         }
3974
3975         /* Re-enable indirect register accesses. */
3976         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3977                                tp->misc_host_ctrl);
3978
3979         /* Set MAX PCI retry to zero. */
3980         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3981         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3982             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3983                 val |= PCISTATE_RETRY_SAME_DMA;
3984         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3985
3986         pci_restore_state(tp->pdev);
3987
3988         /* Make sure PCI-X relaxed ordering bit is clear. */
3989         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3990         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3991         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3992
3993         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3994
3995         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3996                 tg3_stop_fw(tp);
3997                 tw32(0x5000, 0x400);
3998         }
3999
4000         tw32(GRC_MODE, tp->grc_mode);
4001
4002         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4003                 u32 val = tr32(0xc4);
4004
4005                 tw32(0xc4, val | (1 << 15));
4006         }
4007
4008         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4009             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4010                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4011                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4012                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4013                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4014         }
4015
4016         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4017                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4018                 tw32_f(MAC_MODE, tp->mac_mode);
4019         } else
4020                 tw32_f(MAC_MODE, 0);
4021         udelay(40);
4022
4023         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4024                 /* Wait for firmware initialization to complete. */
4025                 for (i = 0; i < 100000; i++) {
4026                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4027                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4028                                 break;
4029                         udelay(10);
4030                 }
4031                 if (i >= 100000) {
4032                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4033                                "firmware will not restart magic=%08x\n",
4034                                tp->dev->name, val);
4035                         return -ENODEV;
4036                 }
4037         }
4038
4039         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4040             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4041                 u32 val = tr32(0x7c00);
4042
4043                 tw32(0x7c00, val | (1 << 25));
4044         }
4045
4046         /* Reprobe ASF enable state.  */
4047         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4048         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4049         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4050         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4051                 u32 nic_cfg;
4052
4053                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4054                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4055                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4056                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4057                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4058                 }
4059         }
4060
4061         return 0;
4062 }
4063
4064 /* tp->lock is held. */
4065 static void tg3_stop_fw(struct tg3 *tp)
4066 {
4067         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4068                 u32 val;
4069                 int i;
4070
4071                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4072                 val = tr32(GRC_RX_CPU_EVENT);
4073                 val |= (1 << 14);
4074                 tw32(GRC_RX_CPU_EVENT, val);
4075
4076                 /* Wait for RX cpu to ACK the event.  */
4077                 for (i = 0; i < 100; i++) {
4078                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4079                                 break;
4080                         udelay(1);
4081                 }
4082         }
4083 }
4084
4085 /* tp->lock is held. */
4086 static int tg3_halt(struct tg3 *tp)
4087 {
4088         int err;
4089
4090         tg3_stop_fw(tp);
4091
4092         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
4093
4094         tg3_abort_hw(tp);
4095         err = tg3_chip_reset(tp);
4096
4097         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
4098         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4099
4100         if (err)
4101                 return err;
4102
4103         return 0;
4104 }
4105
4106 #define TG3_FW_RELEASE_MAJOR    0x0
4107 #define TG3_FW_RELASE_MINOR     0x0
4108 #define TG3_FW_RELEASE_FIX      0x0
4109 #define TG3_FW_START_ADDR       0x08000000
4110 #define TG3_FW_TEXT_ADDR        0x08000000
4111 #define TG3_FW_TEXT_LEN         0x9c0
4112 #define TG3_FW_RODATA_ADDR      0x080009c0
4113 #define TG3_FW_RODATA_LEN       0x60
4114 #define TG3_FW_DATA_ADDR        0x08000a40
4115 #define TG3_FW_DATA_LEN         0x20
4116 #define TG3_FW_SBSS_ADDR        0x08000a60
4117 #define TG3_FW_SBSS_LEN         0xc
4118 #define TG3_FW_BSS_ADDR         0x08000a70
4119 #define TG3_FW_BSS_LEN          0x10
4120
4121 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4122         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4123         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4124         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4125         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4126         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4127         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4128         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4129         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4130         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4131         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4132         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4133         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4134         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4135         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4136         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4137         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4138         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4139         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4140         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4141         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4142         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4143         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4144         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4145         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4146         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4147         0, 0, 0, 0, 0, 0,
4148         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4149         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4150         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4151         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4152         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4153         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4154         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4155         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4156         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4157         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4158         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4159         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4160         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4161         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4162         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4163         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4164         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4165         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4166         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4167         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4168         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4169         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4170         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4171         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4172         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4173         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4174         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4175         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4176         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4177         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4178         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4179         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4180         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4181         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4182         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4183         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4184         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4185         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4186         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4187         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4188         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4189         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4190         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4191         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4192         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4193         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4194         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4195         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4196         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4197         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4198         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4199         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4200         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4201         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4202         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4203         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4204         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4205         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4206         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4207         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4208         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4209         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4210         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4211         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4212         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4213 };
4214
4215 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4216         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4217         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4218         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4219         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4220         0x00000000
4221 };
4222
4223 #if 0 /* All zeros, don't eat up space with it. */
4224 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4225         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4226         0x00000000, 0x00000000, 0x00000000, 0x00000000
4227 };
4228 #endif
4229
4230 #define RX_CPU_SCRATCH_BASE     0x30000
4231 #define RX_CPU_SCRATCH_SIZE     0x04000
4232 #define TX_CPU_SCRATCH_BASE     0x34000
4233 #define TX_CPU_SCRATCH_SIZE     0x04000
4234
4235 /* tp->lock is held. */
4236 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4237 {
4238         int i;
4239
4240         if (offset == TX_CPU_BASE &&
4241             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4242                 BUG();
4243
4244         if (offset == RX_CPU_BASE) {
4245                 for (i = 0; i < 10000; i++) {
4246                         tw32(offset + CPU_STATE, 0xffffffff);
4247                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4248                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4249                                 break;
4250                 }
4251
4252                 tw32(offset + CPU_STATE, 0xffffffff);
4253                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4254                 udelay(10);
4255         } else {
4256                 for (i = 0; i < 10000; i++) {
4257                         tw32(offset + CPU_STATE, 0xffffffff);
4258                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4259                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4260                                 break;
4261                 }
4262         }
4263
4264         if (i >= 10000) {
4265                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4266                        "and %s CPU\n",
4267                        tp->dev->name,
4268                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4269                 return -ENODEV;
4270         }
4271         return 0;
4272 }
4273
4274 struct fw_info {
4275         unsigned int text_base;
4276         unsigned int text_len;
4277         u32 *text_data;
4278         unsigned int rodata_base;
4279         unsigned int rodata_len;
4280         u32 *rodata_data;
4281         unsigned int data_base;
4282         unsigned int data_len;
4283         u32 *data_data;
4284 };
4285
4286 /* tp->lock is held. */
4287 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4288                                  int cpu_scratch_size, struct fw_info *info)
4289 {
4290         int err, i;
4291         u32 orig_tg3_flags = tp->tg3_flags;
4292         void (*write_op)(struct tg3 *, u32, u32);
4293
4294         if (cpu_base == TX_CPU_BASE &&
4295             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4296                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4297                        "TX cpu firmware on %s which is 5705.\n",
4298                        tp->dev->name);
4299                 return -EINVAL;
4300         }
4301
4302         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4303                 write_op = tg3_write_mem;
4304         else
4305                 write_op = tg3_write_indirect_reg32;
4306
4307         /* Force use of PCI config space for indirect register
4308          * write calls.
4309          */
4310         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4311
4312         err = tg3_halt_cpu(tp, cpu_base);
4313         if (err)
4314                 goto out;
4315
4316         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4317                 write_op(tp, cpu_scratch_base + i, 0);
4318         tw32(cpu_base + CPU_STATE, 0xffffffff);
4319         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4320         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4321                 write_op(tp, (cpu_scratch_base +
4322                               (info->text_base & 0xffff) +
4323                               (i * sizeof(u32))),
4324                          (info->text_data ?
4325                           info->text_data[i] : 0));
4326         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4327                 write_op(tp, (cpu_scratch_base +
4328                               (info->rodata_base & 0xffff) +
4329                               (i * sizeof(u32))),
4330                          (info->rodata_data ?
4331                           info->rodata_data[i] : 0));
4332         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4333                 write_op(tp, (cpu_scratch_base +
4334                               (info->data_base & 0xffff) +
4335                               (i * sizeof(u32))),
4336                          (info->data_data ?
4337                           info->data_data[i] : 0));
4338
4339         err = 0;
4340
4341 out:
4342         tp->tg3_flags = orig_tg3_flags;
4343         return err;
4344 }
4345
4346 /* tp->lock is held. */
4347 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4348 {
4349         struct fw_info info;
4350         int err, i;
4351
4352         info.text_base = TG3_FW_TEXT_ADDR;
4353         info.text_len = TG3_FW_TEXT_LEN;
4354         info.text_data = &tg3FwText[0];
4355         info.rodata_base = TG3_FW_RODATA_ADDR;
4356         info.rodata_len = TG3_FW_RODATA_LEN;
4357         info.rodata_data = &tg3FwRodata[0];
4358         info.data_base = TG3_FW_DATA_ADDR;
4359         info.data_len = TG3_FW_DATA_LEN;
4360         info.data_data = NULL;
4361
4362         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4363                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4364                                     &info);
4365         if (err)
4366                 return err;
4367
4368         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4369                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4370                                     &info);
4371         if (err)
4372                 return err;
4373
4374         /* Now startup only the RX cpu. */
4375         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4376         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4377
4378         for (i = 0; i < 5; i++) {
4379                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4380                         break;
4381                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4382                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4383                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4384                 udelay(1000);
4385         }
4386         if (i >= 5) {
4387                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4388                        "to set RX CPU PC, is %08x should be %08x\n",
4389                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4390                        TG3_FW_TEXT_ADDR);
4391                 return -ENODEV;
4392         }
4393         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4394         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4395
4396         return 0;
4397 }
4398
4399 #if TG3_TSO_SUPPORT != 0
4400
4401 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4402 #define TG3_TSO_FW_RELASE_MINOR         0x6
4403 #define TG3_TSO_FW_RELEASE_FIX          0x0
4404 #define TG3_TSO_FW_START_ADDR           0x08000000
4405 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4406 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4407 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4408 #define TG3_TSO_FW_RODATA_LEN           0x60
4409 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4410 #define TG3_TSO_FW_DATA_LEN             0x30
4411 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4412 #define TG3_TSO_FW_SBSS_LEN             0x2c
4413 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4414 #define TG3_TSO_FW_BSS_LEN              0x894
4415
4416 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4417         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4418         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4419         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4420         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4421         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4422         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4423         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4424         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4425         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4426         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4427         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4428         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4429         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4430         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4431         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4432         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4433         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4434         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4435         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4436         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4437         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4438         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4439         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4440         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4441         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4442         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4443         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4444         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4445         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4446         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4447         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4448         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4449         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4450         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4451         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4452         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4453         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4454         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4455         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4456         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4457         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4458         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4459         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4460         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4461         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4462         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4463         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4464         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4465         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4466         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4467         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4468         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4469         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4470         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4471         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4472         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4473         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4474         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4475         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4476         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4477         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4478         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4479         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4480         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4481         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4482         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4483         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4484         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4485         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4486         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4487         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4488         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4489         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4490         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4491         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4492         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4493         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4494         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4495         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4496         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4497         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4498         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4499         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4500         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4501         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4502         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4503         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4504         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4505         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4506         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4507         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4508         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4509         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4510         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4511         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4512         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4513         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4514         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4515         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4516         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4517         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4518         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4519         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4520         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4521         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4522         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4523         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4524         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4525         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4526         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4527         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4528         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4529         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4530         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4531         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4532         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4533         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4534         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4535         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4536         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4537         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4538         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4539         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4540         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4541         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4542         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4543         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4544         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4545         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4546         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4547         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4548         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4549         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4550         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4551         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4552         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4553         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4554         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4555         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4556         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4557         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4558         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4559         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4560         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4561         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4562         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4563         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4564         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4565         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4566         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4567         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4568         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4569         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4570         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4571         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4572         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4573         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4574         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4575         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4576         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4577         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4578         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4579         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4580         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4581         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4582         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4583         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4584         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4585         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4586         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4587         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4588         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4589         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4590         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4591         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4592         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4593         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4594         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4595         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4596         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4597         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4598         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4599         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4600         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4601         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4602         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4603         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4604         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4605         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4606         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4607         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4608         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4609         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4610         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4611         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4612         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4613         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4614         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4615         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4616         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4617         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4618         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4619         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4620         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4621         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4622         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4623         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4624         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4625         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4626         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4627         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4628         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4629         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4630         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4631         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4632         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4633         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4634         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4635         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4636         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4637         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4638         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4639         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4640         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4641         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4642         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4643         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4644         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4645         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4646         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4647         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4648         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4649         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4650         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4651         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4652         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4653         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4654         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4655         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4656         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4657         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4658         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4659         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4660         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4661         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4662         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4663         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4664         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4665         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4666         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4667         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4668         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4669         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4670         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4671         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4672         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4673         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4674         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4675         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4676         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4677         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4678         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4679         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4680         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4681         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4682         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4683         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4684         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4685         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4686         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4687         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4688         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4689         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4690         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4691         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4692         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4693         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4694         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4695         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4696         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4697         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4698         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4699         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4700         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4701 };
4702
4703 static u32 tg3TsoFwRodata[] = {
4704         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4705         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4706         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4707         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4708         0x00000000,
4709 };
4710
4711 static u32 tg3TsoFwData[] = {
4712         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4713         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4714         0x00000000,
4715 };
4716
4717 /* 5705 needs a special version of the TSO firmware.  */
4718 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4719 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4720 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4721 #define TG3_TSO5_FW_START_ADDR          0x00010000
4722 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4723 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4724 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4725 #define TG3_TSO5_FW_RODATA_LEN          0x50
4726 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4727 #define TG3_TSO5_FW_DATA_LEN            0x20
4728 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4729 #define TG3_TSO5_FW_SBSS_LEN            0x28
4730 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4731 #define TG3_TSO5_FW_BSS_LEN             0x88
4732
4733 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4734         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4735         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4736         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4737         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4738         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4739         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4740         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4741         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4742         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4743         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4744         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4745         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4746         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4747         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4748         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4749         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4750         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4751         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4752         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4753         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4754         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4755         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4756         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4757         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4758         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4759         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4760         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4761         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4762         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4763         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4764         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4765         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4766         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4767         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4768         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4769         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4770         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4771         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4772         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4773         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4774         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4775         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4776         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4777         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4778         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4779         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4780         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4781         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4782         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4783         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4784         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4785         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4786         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4787         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4788         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4789         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4790         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4791         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4792         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4793         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4794         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4795         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4796         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4797         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4798         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4799         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4800         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4801         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4802         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4803         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4804         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4805         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4806         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4807         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4808         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4809         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4810         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4811         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4812         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4813         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4814         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4815         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4816         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4817         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4818         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4819         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4820         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4821         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4822         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4823         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4824         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4825         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4826         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4827         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4828         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4829         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4830         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4831         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4832         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4833         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4834         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4835         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4836         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4837         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4838         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4839         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4840         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4841         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4842         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4843         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4844         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4845         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4846         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4847         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4848         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4849         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4850         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4851         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4852         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4853         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4854         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4855         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4856         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4857         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4858         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4859         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4860         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4861         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4862         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4863         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4864         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4865         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4866         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4867         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4868         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4869         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4870         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4871         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4872         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4873         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4874         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4875         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4876         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4877         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4878         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4879         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4880         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4881         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4882         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4883         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4884         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4885         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4886         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4887         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4888         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4889         0x00000000, 0x00000000, 0x00000000,
4890 };
4891
4892 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4893         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4894         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4895         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4896         0x00000000, 0x00000000, 0x00000000,
4897 };
4898
4899 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4900         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4901         0x00000000, 0x00000000, 0x00000000,
4902 };
4903
4904 /* tp->lock is held. */
4905 static int tg3_load_tso_firmware(struct tg3 *tp)
4906 {
4907         struct fw_info info;
4908         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4909         int err, i;
4910
4911         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4912                 return 0;
4913
4914         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4915                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4916                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4917                 info.text_data = &tg3Tso5FwText[0];
4918                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4919                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4920                 info.rodata_data = &tg3Tso5FwRodata[0];
4921                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4922                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4923                 info.data_data = &tg3Tso5FwData[0];
4924                 cpu_base = RX_CPU_BASE;
4925                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4926                 cpu_scratch_size = (info.text_len +
4927                                     info.rodata_len +
4928                                     info.data_len +
4929                                     TG3_TSO5_FW_SBSS_LEN +
4930                                     TG3_TSO5_FW_BSS_LEN);
4931         } else {
4932                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4933                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4934                 info.text_data = &tg3TsoFwText[0];
4935                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4936                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4937                 info.rodata_data = &tg3TsoFwRodata[0];
4938                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4939                 info.data_len = TG3_TSO_FW_DATA_LEN;
4940                 info.data_data = &tg3TsoFwData[0];
4941                 cpu_base = TX_CPU_BASE;
4942                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4943                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4944         }
4945
4946         err = tg3_load_firmware_cpu(tp, cpu_base,
4947                                     cpu_scratch_base, cpu_scratch_size,
4948                                     &info);
4949         if (err)
4950                 return err;
4951
4952         /* Now startup the cpu. */
4953         tw32(cpu_base + CPU_STATE, 0xffffffff);
4954         tw32_f(cpu_base + CPU_PC,    info.text_base);
4955
4956         for (i = 0; i < 5; i++) {
4957                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4958                         break;
4959                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4960                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4961                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4962                 udelay(1000);
4963         }
4964         if (i >= 5) {
4965                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4966                        "to set CPU PC, is %08x should be %08x\n",
4967                        tp->dev->name, tr32(cpu_base + CPU_PC),
4968                        info.text_base);
4969                 return -ENODEV;
4970         }
4971         tw32(cpu_base + CPU_STATE, 0xffffffff);
4972         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4973         return 0;
4974 }
4975
4976 #endif /* TG3_TSO_SUPPORT != 0 */
4977
4978 /* tp->lock is held. */
4979 static void __tg3_set_mac_addr(struct tg3 *tp)
4980 {
4981         u32 addr_high, addr_low;
4982         int i;
4983
4984         addr_high = ((tp->dev->dev_addr[0] << 8) |
4985                      tp->dev->dev_addr[1]);
4986         addr_low = ((tp->dev->dev_addr[2] << 24) |
4987                     (tp->dev->dev_addr[3] << 16) |
4988                     (tp->dev->dev_addr[4] <<  8) |
4989                     (tp->dev->dev_addr[5] <<  0));
4990         for (i = 0; i < 4; i++) {
4991                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4992                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4993         }
4994
4995         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4996             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4997                 for (i = 0; i < 12; i++) {
4998                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4999                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5000                 }
5001         }
5002
5003         addr_high = (tp->dev->dev_addr[0] +
5004                      tp->dev->dev_addr[1] +
5005                      tp->dev->dev_addr[2] +
5006                      tp->dev->dev_addr[3] +
5007                      tp->dev->dev_addr[4] +
5008                      tp->dev->dev_addr[5]) &
5009                 TX_BACKOFF_SEED_MASK;
5010         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5011 }
5012
5013 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5014 {
5015         struct tg3 *tp = netdev_priv(dev);
5016         struct sockaddr *addr = p;
5017
5018         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5019
5020         spin_lock_irq(&tp->lock);
5021         __tg3_set_mac_addr(tp);
5022         spin_unlock_irq(&tp->lock);
5023
5024         return 0;
5025 }
5026
5027 /* tp->lock is held. */
5028 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5029                            dma_addr_t mapping, u32 maxlen_flags,
5030                            u32 nic_addr)
5031 {
5032         tg3_write_mem(tp,
5033                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5034                       ((u64) mapping >> 32));
5035         tg3_write_mem(tp,
5036                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5037                       ((u64) mapping & 0xffffffff));
5038         tg3_write_mem(tp,
5039                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5040                        maxlen_flags);
5041
5042         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5043                 tg3_write_mem(tp,
5044                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5045                               nic_addr);
5046 }
5047
5048 static void __tg3_set_rx_mode(struct net_device *);
5049
5050 /* tp->lock is held. */
5051 static int tg3_reset_hw(struct tg3 *tp)
5052 {
5053         u32 val, rdmac_mode;
5054         int i, err, limit;
5055
5056         tg3_disable_ints(tp);
5057
5058         tg3_stop_fw(tp);
5059
5060         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5061
5062         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5063                 err = tg3_abort_hw(tp);
5064                 if (err)
5065                         return err;
5066         }
5067
5068         err = tg3_chip_reset(tp);
5069         if (err)
5070                 return err;
5071
5072         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5073
5074         /* This works around an issue with Athlon chipsets on
5075          * B3 tigon3 silicon.  This bit has no effect on any
5076          * other revision.  But do not set this on PCI Express
5077          * chips.
5078          */
5079         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5080                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5081         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5082
5083         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5084             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5085                 val = tr32(TG3PCI_PCISTATE);
5086                 val |= PCISTATE_RETRY_SAME_DMA;
5087                 tw32(TG3PCI_PCISTATE, val);
5088         }
5089
5090         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5091                 /* Enable some hw fixes.  */
5092                 val = tr32(TG3PCI_MSI_DATA);
5093                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5094                 tw32(TG3PCI_MSI_DATA, val);
5095         }
5096
5097         /* Descriptor ring init may make accesses to the
5098          * NIC SRAM area to setup the TX descriptors, so we
5099          * can only do this after the hardware has been
5100          * successfully reset.
5101          */
5102         tg3_init_rings(tp);
5103
5104         /* This value is determined during the probe time DMA
5105          * engine test, tg3_test_dma.
5106          */
5107         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5108
5109         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5110                           GRC_MODE_4X_NIC_SEND_RINGS |
5111                           GRC_MODE_NO_TX_PHDR_CSUM |
5112                           GRC_MODE_NO_RX_PHDR_CSUM);
5113         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5114         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5115                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5116         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5117                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5118
5119         tw32(GRC_MODE,
5120              tp->grc_mode |
5121              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5122
5123         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5124         val = tr32(GRC_MISC_CFG);
5125         val &= ~0xff;
5126         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5127         tw32(GRC_MISC_CFG, val);
5128
5129         /* Initialize MBUF/DESC pool. */
5130         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5131                 /* Do nothing.  */
5132         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5133                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5134                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5135                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5136                 else
5137                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5138                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5139                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5140         }
5141 #if TG3_TSO_SUPPORT != 0
5142         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5143                 int fw_len;
5144
5145                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5146                           TG3_TSO5_FW_RODATA_LEN +
5147                           TG3_TSO5_FW_DATA_LEN +
5148                           TG3_TSO5_FW_SBSS_LEN +
5149                           TG3_TSO5_FW_BSS_LEN);
5150                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5151                 tw32(BUFMGR_MB_POOL_ADDR,
5152                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5153                 tw32(BUFMGR_MB_POOL_SIZE,
5154                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5155         }
5156 #endif
5157
5158         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5159                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5160                      tp->bufmgr_config.mbuf_read_dma_low_water);
5161                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5162                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5163                 tw32(BUFMGR_MB_HIGH_WATER,
5164                      tp->bufmgr_config.mbuf_high_water);
5165         } else {
5166                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5167                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5168                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5169                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5170                 tw32(BUFMGR_MB_HIGH_WATER,
5171                      tp->bufmgr_config.mbuf_high_water_jumbo);
5172         }
5173         tw32(BUFMGR_DMA_LOW_WATER,
5174              tp->bufmgr_config.dma_low_water);
5175         tw32(BUFMGR_DMA_HIGH_WATER,
5176              tp->bufmgr_config.dma_high_water);
5177
5178         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5179         for (i = 0; i < 2000; i++) {
5180                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5181                         break;
5182                 udelay(10);
5183         }
5184         if (i >= 2000) {
5185                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5186                        tp->dev->name);
5187                 return -ENODEV;
5188         }
5189
5190         /* Setup replenish threshold. */
5191         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5192
5193         /* Initialize TG3_BDINFO's at:
5194          *  RCVDBDI_STD_BD:     standard eth size rx ring
5195          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5196          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5197          *
5198          * like so:
5199          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5200          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5201          *                              ring attribute flags
5202          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5203          *
5204          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5205          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5206          *
5207          * The size of each ring is fixed in the firmware, but the location is
5208          * configurable.
5209          */
5210         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5211              ((u64) tp->rx_std_mapping >> 32));
5212         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5213              ((u64) tp->rx_std_mapping & 0xffffffff));
5214         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5215              NIC_SRAM_RX_BUFFER_DESC);
5216
5217         /* Don't even try to program the JUMBO/MINI buffer descriptor
5218          * configs on 5705.
5219          */
5220         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5221                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5222                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5223         } else {
5224                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5225                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5226
5227                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5228                      BDINFO_FLAGS_DISABLED);
5229
5230                 /* Setup replenish threshold. */
5231                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5232
5233                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5234                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5235                              ((u64) tp->rx_jumbo_mapping >> 32));
5236                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5237                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5238                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5239                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5240                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5241                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5242                 } else {
5243                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5244                              BDINFO_FLAGS_DISABLED);
5245                 }
5246
5247         }
5248
5249         /* There is only one send ring on 5705/5750, no need to explicitly
5250          * disable the others.
5251          */
5252         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5253                 /* Clear out send RCB ring in SRAM. */
5254                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5255                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5256                                       BDINFO_FLAGS_DISABLED);
5257         }
5258
5259         tp->tx_prod = 0;
5260         tp->tx_cons = 0;
5261         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5262         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5263
5264         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5265                        tp->tx_desc_mapping,
5266                        (TG3_TX_RING_SIZE <<
5267                         BDINFO_FLAGS_MAXLEN_SHIFT),
5268                        NIC_SRAM_TX_BUFFER_DESC);
5269
5270         /* There is only one receive return ring on 5705/5750, no need
5271          * to explicitly disable the others.
5272          */
5273         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5274                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5275                      i += TG3_BDINFO_SIZE) {
5276                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5277                                       BDINFO_FLAGS_DISABLED);
5278                 }
5279         }
5280
5281         tp->rx_rcb_ptr = 0;
5282         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5283
5284         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5285                        tp->rx_rcb_mapping,
5286                        (TG3_RX_RCB_RING_SIZE(tp) <<
5287                         BDINFO_FLAGS_MAXLEN_SHIFT),
5288                        0);
5289
5290         tp->rx_std_ptr = tp->rx_pending;
5291         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5292                      tp->rx_std_ptr);
5293
5294         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5295                                                 tp->rx_jumbo_pending : 0;
5296         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5297                      tp->rx_jumbo_ptr);
5298
5299         /* Initialize MAC address and backoff seed. */
5300         __tg3_set_mac_addr(tp);
5301
5302         /* MTU + ethernet header + FCS + optional VLAN tag */
5303         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5304
5305         /* The slot time is changed by tg3_setup_phy if we
5306          * run at gigabit with half duplex.
5307          */
5308         tw32(MAC_TX_LENGTHS,
5309              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5310              (6 << TX_LENGTHS_IPG_SHIFT) |
5311              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5312
5313         /* Receive rules. */
5314         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5315         tw32(RCVLPC_CONFIG, 0x0181);
5316
5317         /* Calculate RDMAC_MODE setting early, we need it to determine
5318          * the RCVLPC_STATE_ENABLE mask.
5319          */
5320         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5321                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5322                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5323                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5324                       RDMAC_MODE_LNGREAD_ENAB);
5325         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5326                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5327
5328         /* If statement applies to 5705 and 5750 PCI devices only */
5329         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5330              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5331             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5332                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5333                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5334                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5335                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5336                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5337                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5338                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5339                 }
5340         }
5341
5342         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5343                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5344
5345 #if TG3_TSO_SUPPORT != 0
5346         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5347                 rdmac_mode |= (1 << 27);
5348 #endif
5349
5350         /* Receive/send statistics. */
5351         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5352             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5353                 val = tr32(RCVLPC_STATS_ENABLE);
5354                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5355                 tw32(RCVLPC_STATS_ENABLE, val);
5356         } else {
5357                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5358         }
5359         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5360         tw32(SNDDATAI_STATSENAB, 0xffffff);
5361         tw32(SNDDATAI_STATSCTRL,
5362              (SNDDATAI_SCTRL_ENABLE |
5363               SNDDATAI_SCTRL_FASTUPD));
5364
5365         /* Setup host coalescing engine. */
5366         tw32(HOSTCC_MODE, 0);
5367         for (i = 0; i < 2000; i++) {
5368                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5369                         break;
5370                 udelay(10);
5371         }
5372
5373         tw32(HOSTCC_RXCOL_TICKS, 0);
5374         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5375         tw32(HOSTCC_RXMAX_FRAMES, 1);
5376         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5377         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5378                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5379                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5380         }
5381         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5382         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5383
5384         /* set status block DMA address */
5385         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5386              ((u64) tp->status_mapping >> 32));
5387         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5388              ((u64) tp->status_mapping & 0xffffffff));
5389
5390         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5391                 /* Status/statistics block address.  See tg3_timer,
5392                  * the tg3_periodic_fetch_stats call there, and
5393                  * tg3_get_stats to see how this works for 5705/5750 chips.
5394                  */
5395                 tw32(HOSTCC_STAT_COAL_TICKS,
5396                      DEFAULT_STAT_COAL_TICKS);
5397                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5398                      ((u64) tp->stats_mapping >> 32));
5399                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5400                      ((u64) tp->stats_mapping & 0xffffffff));
5401                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5402                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5403         }
5404
5405         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5406
5407         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5408         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5409         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5410                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5411
5412         /* Clear statistics/status block in chip, and status block in ram. */
5413         for (i = NIC_SRAM_STATS_BLK;
5414              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5415              i += sizeof(u32)) {
5416                 tg3_write_mem(tp, i, 0);
5417                 udelay(40);
5418         }
5419         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5420
5421         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5422                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5423         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5424         udelay(40);
5425
5426         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5427          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5428          * register to preserve the GPIO settings for LOMs. The GPIOs,
5429          * whether used as inputs or outputs, are set by boot code after
5430          * reset.
5431          */
5432         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5433                 u32 gpio_mask;
5434
5435                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5436                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5437
5438                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5439                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5440                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5441
5442                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5443
5444                 /* GPIO1 must be driven high for eeprom write protect */
5445                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5446                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5447         }
5448         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5449         udelay(100);
5450
5451         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5452         tr32(MAILBOX_INTERRUPT_0);
5453
5454         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5455                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5456                 udelay(40);
5457         }
5458
5459         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5460                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5461                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5462                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5463                WDMAC_MODE_LNGREAD_ENAB);
5464
5465         /* If statement applies to 5705 and 5750 PCI devices only */
5466         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5467              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5468             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5469                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5470                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5471                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5472                         /* nothing */
5473                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5474                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5475                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5476                         val |= WDMAC_MODE_RX_ACCEL;
5477                 }
5478         }
5479
5480         tw32_f(WDMAC_MODE, val);
5481         udelay(40);
5482
5483         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5484                 val = tr32(TG3PCI_X_CAPS);
5485                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5486                         val &= ~PCIX_CAPS_BURST_MASK;
5487                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5488                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5489                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5490                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5491                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5492                                 val |= (tp->split_mode_max_reqs <<
5493                                         PCIX_CAPS_SPLIT_SHIFT);
5494                 }
5495                 tw32(TG3PCI_X_CAPS, val);
5496         }
5497
5498         tw32_f(RDMAC_MODE, rdmac_mode);
5499         udelay(40);
5500
5501         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5502         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5503                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5504         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5505         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5506         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5507         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5508         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5509 #if TG3_TSO_SUPPORT != 0
5510         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5511                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5512 #endif
5513         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5514         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5515
5516         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5517                 err = tg3_load_5701_a0_firmware_fix(tp);
5518                 if (err)
5519                         return err;
5520         }
5521
5522 #if TG3_TSO_SUPPORT != 0
5523         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5524                 err = tg3_load_tso_firmware(tp);
5525                 if (err)
5526                         return err;
5527         }
5528 #endif
5529
5530         tp->tx_mode = TX_MODE_ENABLE;
5531         tw32_f(MAC_TX_MODE, tp->tx_mode);
5532         udelay(100);
5533
5534         tp->rx_mode = RX_MODE_ENABLE;
5535         tw32_f(MAC_RX_MODE, tp->rx_mode);
5536         udelay(10);
5537
5538         if (tp->link_config.phy_is_low_power) {
5539                 tp->link_config.phy_is_low_power = 0;
5540                 tp->link_config.speed = tp->link_config.orig_speed;
5541                 tp->link_config.duplex = tp->link_config.orig_duplex;
5542                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5543         }
5544
5545         tp->mi_mode = MAC_MI_MODE_BASE;
5546         tw32_f(MAC_MI_MODE, tp->mi_mode);
5547         udelay(80);
5548
5549         tw32(MAC_LED_CTRL, tp->led_ctrl);
5550
5551         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5552         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5553                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5554                 udelay(10);
5555         }
5556         tw32_f(MAC_RX_MODE, tp->rx_mode);
5557         udelay(10);
5558
5559         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5560                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5561                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5562                         /* Set drive transmission level to 1.2V  */
5563                         /* only if the signal pre-emphasis bit is not set  */
5564                         val = tr32(MAC_SERDES_CFG);
5565                         val &= 0xfffff000;
5566                         val |= 0x880;
5567                         tw32(MAC_SERDES_CFG, val);
5568                 }
5569                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5570                         tw32(MAC_SERDES_CFG, 0x616000);
5571         }
5572
5573         /* Prevent chip from dropping frames when flow control
5574          * is enabled.
5575          */
5576         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5577
5578         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5579             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5580                 /* Use hardware link auto-negotiation */
5581                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5582         }
5583
5584         err = tg3_setup_phy(tp, 1);
5585         if (err)
5586                 return err;
5587
5588         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5589                 u32 tmp;
5590
5591                 /* Clear CRC stats. */
5592                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5593                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5594                         tg3_readphy(tp, 0x14, &tmp);
5595                 }
5596         }
5597
5598         __tg3_set_rx_mode(tp->dev);
5599
5600         /* Initialize receive rules. */
5601         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5602         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5603         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5604         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5605
5606         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5607                 limit = 8;
5608         else
5609                 limit = 16;
5610         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5611                 limit -= 4;
5612         switch (limit) {
5613         case 16:
5614                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5615         case 15:
5616                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5617         case 14:
5618                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5619         case 13:
5620                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5621         case 12:
5622                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5623         case 11:
5624                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5625         case 10:
5626                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5627         case 9:
5628                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5629         case 8:
5630                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5631         case 7:
5632                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5633         case 6:
5634                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5635         case 5:
5636                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5637         case 4:
5638                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5639         case 3:
5640                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5641         case 2:
5642         case 1:
5643
5644         default:
5645                 break;
5646         };
5647
5648         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5649
5650         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5651                 tg3_enable_ints(tp);
5652
5653         return 0;
5654 }
5655
5656 /* Called at device open time to get the chip ready for
5657  * packet processing.  Invoked with tp->lock held.
5658  */
5659 static int tg3_init_hw(struct tg3 *tp)
5660 {
5661         int err;
5662
5663         /* Force the chip into D0. */
5664         err = tg3_set_power_state(tp, 0);
5665         if (err)
5666                 goto out;
5667
5668         tg3_switch_clocks(tp);
5669
5670         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5671
5672         err = tg3_reset_hw(tp);
5673
5674 out:
5675         return err;
5676 }
5677
5678 #define TG3_STAT_ADD32(PSTAT, REG) \
5679 do {    u32 __val = tr32(REG); \
5680         (PSTAT)->low += __val; \
5681         if ((PSTAT)->low < __val) \
5682                 (PSTAT)->high += 1; \
5683 } while (0)
5684
5685 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5686 {
5687         struct tg3_hw_stats *sp = tp->hw_stats;
5688
5689         if (!netif_carrier_ok(tp->dev))
5690                 return;
5691
5692         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5693         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5694         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5695         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5696         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5697         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5698         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5699         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5700         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5701         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5702         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5703         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5704         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5705
5706         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5707         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5708         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5709         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5710         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5711         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5712         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5713         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5714         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5715         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5716         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5717         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5718         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5719         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5720 }
5721
5722 static void tg3_timer(unsigned long __opaque)
5723 {
5724         struct tg3 *tp = (struct tg3 *) __opaque;
5725         unsigned long flags;
5726
5727         spin_lock_irqsave(&tp->lock, flags);
5728         spin_lock(&tp->tx_lock);
5729
5730         /* All of this garbage is because when using non-tagged
5731          * IRQ status the mailbox/status_block protocol the chip
5732          * uses with the cpu is race prone.
5733          */
5734         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5735                 tw32(GRC_LOCAL_CTRL,
5736                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5737         } else {
5738                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5739                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5740         }
5741
5742         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5743                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5744                 spin_unlock(&tp->tx_lock);
5745                 spin_unlock_irqrestore(&tp->lock, flags);
5746                 schedule_work(&tp->reset_task);
5747                 return;
5748         }
5749
5750         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5751                 tg3_periodic_fetch_stats(tp);
5752
5753         /* This part only runs once per second. */
5754         if (!--tp->timer_counter) {
5755                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5756                         u32 mac_stat;
5757                         int phy_event;
5758
5759                         mac_stat = tr32(MAC_STATUS);
5760
5761                         phy_event = 0;
5762                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5763                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5764                                         phy_event = 1;
5765                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5766                                 phy_event = 1;
5767
5768                         if (phy_event)
5769                                 tg3_setup_phy(tp, 0);
5770                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5771                         u32 mac_stat = tr32(MAC_STATUS);
5772                         int need_setup = 0;
5773
5774                         if (netif_carrier_ok(tp->dev) &&
5775                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5776                                 need_setup = 1;
5777                         }
5778                         if (! netif_carrier_ok(tp->dev) &&
5779                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5780                                          MAC_STATUS_SIGNAL_DET))) {
5781                                 need_setup = 1;
5782                         }
5783                         if (need_setup) {
5784                                 tw32_f(MAC_MODE,
5785                                      (tp->mac_mode &
5786                                       ~MAC_MODE_PORT_MODE_MASK));
5787                                 udelay(40);
5788                                 tw32_f(MAC_MODE, tp->mac_mode);
5789                                 udelay(40);
5790                                 tg3_setup_phy(tp, 0);
5791                         }
5792                 }
5793
5794                 tp->timer_counter = tp->timer_multiplier;
5795         }
5796
5797         /* Heartbeat is only sent once every 120 seconds.  */
5798         if (!--tp->asf_counter) {
5799                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5800                         u32 val;
5801
5802                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5803                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5804                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5805                         val = tr32(GRC_RX_CPU_EVENT);
5806                         val |= (1 << 14);
5807                         tw32(GRC_RX_CPU_EVENT, val);
5808                 }
5809                 tp->asf_counter = tp->asf_multiplier;
5810         }
5811
5812         spin_unlock(&tp->tx_lock);
5813         spin_unlock_irqrestore(&tp->lock, flags);
5814
5815         tp->timer.expires = jiffies + tp->timer_offset;
5816         add_timer(&tp->timer);
5817 }
5818
5819 static int tg3_test_interrupt(struct tg3 *tp)
5820 {
5821         struct net_device *dev = tp->dev;
5822         int err, i;
5823         u32 int_mbox = 0;
5824
5825         tg3_disable_ints(tp);
5826
5827         free_irq(tp->pdev->irq, dev);
5828
5829         err = request_irq(tp->pdev->irq, tg3_test_isr,
5830                           SA_SHIRQ, dev->name, dev);
5831         if (err)
5832                 return err;
5833
5834         tg3_enable_ints(tp);
5835
5836         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
5837                HOSTCC_MODE_NOW);
5838
5839         for (i = 0; i < 5; i++) {
5840                 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5841                 if (int_mbox != 0)
5842                         break;
5843                 msleep(10);
5844         }
5845
5846         tg3_disable_ints(tp);
5847
5848         free_irq(tp->pdev->irq, dev);
5849         
5850         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5851                 err = request_irq(tp->pdev->irq, tg3_msi,
5852                                   0, dev->name, dev);
5853         else
5854                 err = request_irq(tp->pdev->irq, tg3_interrupt,
5855                                   SA_SHIRQ, dev->name, dev);
5856
5857         if (err)
5858                 return err;
5859
5860         if (int_mbox != 0)
5861                 return 0;
5862
5863         return -EIO;
5864 }
5865
5866 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
5867  * successfully restored
5868  */
5869 static int tg3_test_msi(struct tg3 *tp)
5870 {
5871         struct net_device *dev = tp->dev;
5872         int err;
5873         u16 pci_cmd;
5874
5875         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
5876                 return 0;
5877
5878         /* Turn off SERR reporting in case MSI terminates with Master
5879          * Abort.
5880          */
5881         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
5882         pci_write_config_word(tp->pdev, PCI_COMMAND,
5883                               pci_cmd & ~PCI_COMMAND_SERR);
5884
5885         err = tg3_test_interrupt(tp);
5886
5887         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
5888
5889         if (!err)
5890                 return 0;
5891
5892         /* other failures */
5893         if (err != -EIO)
5894                 return err;
5895
5896         /* MSI test failed, go back to INTx mode */
5897         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
5898                "switching to INTx mode. Please report this failure to "
5899                "the PCI maintainer and include system chipset information.\n",
5900                        tp->dev->name);
5901
5902         free_irq(tp->pdev->irq, dev);
5903         pci_disable_msi(tp->pdev);
5904
5905         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5906
5907         err = request_irq(tp->pdev->irq, tg3_interrupt,
5908                           SA_SHIRQ, dev->name, dev);
5909
5910         if (err)
5911                 return err;
5912
5913         /* Need to reset the chip because the MSI cycle may have terminated
5914          * with Master Abort.
5915          */
5916         spin_lock_irq(&tp->lock);
5917         spin_lock(&tp->tx_lock);
5918
5919         tg3_halt(tp);
5920         err = tg3_init_hw(tp);
5921
5922         spin_unlock(&tp->tx_lock);
5923         spin_unlock_irq(&tp->lock);
5924
5925         if (err)
5926                 free_irq(tp->pdev->irq, dev);
5927
5928         return err;
5929 }
5930
5931 static int tg3_open(struct net_device *dev)
5932 {
5933         struct tg3 *tp = netdev_priv(dev);
5934         int err;
5935
5936         spin_lock_irq(&tp->lock);
5937         spin_lock(&tp->tx_lock);
5938
5939         tg3_disable_ints(tp);
5940         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5941
5942         spin_unlock(&tp->tx_lock);
5943         spin_unlock_irq(&tp->lock);
5944
5945         /* The placement of this call is tied
5946          * to the setup and use of Host TX descriptors.
5947          */
5948         err = tg3_alloc_consistent(tp);
5949         if (err)
5950                 return err;
5951
5952         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5953             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
5954             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
5955                 if (pci_enable_msi(tp->pdev) == 0) {
5956                         u32 msi_mode;
5957
5958                         msi_mode = tr32(MSGINT_MODE);
5959                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
5960                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
5961                 }
5962         }
5963         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5964                 err = request_irq(tp->pdev->irq, tg3_msi,
5965                                   0, dev->name, dev);
5966         else
5967                 err = request_irq(tp->pdev->irq, tg3_interrupt,
5968                                   SA_SHIRQ, dev->name, dev);
5969
5970         if (err) {
5971                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5972                         pci_disable_msi(tp->pdev);
5973                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5974                 }
5975                 tg3_free_consistent(tp);
5976                 return err;
5977         }
5978
5979         spin_lock_irq(&tp->lock);
5980         spin_lock(&tp->tx_lock);
5981
5982         err = tg3_init_hw(tp);
5983         if (err) {
5984                 tg3_halt(tp);
5985                 tg3_free_rings(tp);
5986         } else {
5987                 tp->timer_offset = HZ / 10;
5988                 tp->timer_counter = tp->timer_multiplier = 10;
5989                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5990
5991                 init_timer(&tp->timer);
5992                 tp->timer.expires = jiffies + tp->timer_offset;
5993                 tp->timer.data = (unsigned long) tp;
5994                 tp->timer.function = tg3_timer;
5995         }
5996
5997         spin_unlock(&tp->tx_lock);
5998         spin_unlock_irq(&tp->lock);
5999
6000         if (err) {
6001                 free_irq(tp->pdev->irq, dev);
6002                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6003                         pci_disable_msi(tp->pdev);
6004                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6005                 }
6006                 tg3_free_consistent(tp);
6007                 return err;
6008         }
6009
6010         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6011                 err = tg3_test_msi(tp);
6012                 if (err) {
6013                         spin_lock_irq(&tp->lock);
6014                         spin_lock(&tp->tx_lock);
6015
6016                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6017                                 pci_disable_msi(tp->pdev);
6018                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6019                         }
6020                         tg3_halt(tp);
6021                         tg3_free_rings(tp);
6022                         tg3_free_consistent(tp);
6023
6024                         spin_unlock(&tp->tx_lock);
6025                         spin_unlock_irq(&tp->lock);
6026
6027                         return err;
6028                 }
6029         }
6030
6031         spin_lock_irq(&tp->lock);
6032         spin_lock(&tp->tx_lock);
6033
6034         add_timer(&tp->timer);
6035         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6036         tg3_enable_ints(tp);
6037
6038         spin_unlock(&tp->tx_lock);
6039         spin_unlock_irq(&tp->lock);
6040
6041         netif_start_queue(dev);
6042
6043         return 0;
6044 }
6045
6046 #if 0
6047 /*static*/ void tg3_dump_state(struct tg3 *tp)
6048 {
6049         u32 val32, val32_2, val32_3, val32_4, val32_5;
6050         u16 val16;
6051         int i;
6052
6053         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6054         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6055         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6056                val16, val32);
6057
6058         /* MAC block */
6059         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6060                tr32(MAC_MODE), tr32(MAC_STATUS));
6061         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6062                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6063         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6064                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6065         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6066                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6067
6068         /* Send data initiator control block */
6069         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6070                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6071         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6072                tr32(SNDDATAI_STATSCTRL));
6073
6074         /* Send data completion control block */
6075         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6076
6077         /* Send BD ring selector block */
6078         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6079                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6080
6081         /* Send BD initiator control block */
6082         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6083                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6084
6085         /* Send BD completion control block */
6086         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6087
6088         /* Receive list placement control block */
6089         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6090                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6091         printk("       RCVLPC_STATSCTRL[%08x]\n",
6092                tr32(RCVLPC_STATSCTRL));
6093
6094         /* Receive data and receive BD initiator control block */
6095         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6096                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6097
6098         /* Receive data completion control block */
6099         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6100                tr32(RCVDCC_MODE));
6101
6102         /* Receive BD initiator control block */
6103         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6104                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6105
6106         /* Receive BD completion control block */
6107         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6108                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6109
6110         /* Receive list selector control block */
6111         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6112                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6113
6114         /* Mbuf cluster free block */
6115         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6116                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6117
6118         /* Host coalescing control block */
6119         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6120                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6121         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6122                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6123                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6124         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6125                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6126                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6127         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6128                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6129         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6130                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6131
6132         /* Memory arbiter control block */
6133         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6134                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6135
6136         /* Buffer manager control block */
6137         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6138                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6139         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6140                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6141         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6142                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6143                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6144                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6145
6146         /* Read DMA control block */
6147         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6148                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6149
6150         /* Write DMA control block */
6151         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6152                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6153
6154         /* DMA completion block */
6155         printk("DEBUG: DMAC_MODE[%08x]\n",
6156                tr32(DMAC_MODE));
6157
6158         /* GRC block */
6159         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6160                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6161         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6162                tr32(GRC_LOCAL_CTRL));
6163
6164         /* TG3_BDINFOs */
6165         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6166                tr32(RCVDBDI_JUMBO_BD + 0x0),
6167                tr32(RCVDBDI_JUMBO_BD + 0x4),
6168                tr32(RCVDBDI_JUMBO_BD + 0x8),
6169                tr32(RCVDBDI_JUMBO_BD + 0xc));
6170         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6171                tr32(RCVDBDI_STD_BD + 0x0),
6172                tr32(RCVDBDI_STD_BD + 0x4),
6173                tr32(RCVDBDI_STD_BD + 0x8),
6174                tr32(RCVDBDI_STD_BD + 0xc));
6175         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6176                tr32(RCVDBDI_MINI_BD + 0x0),
6177                tr32(RCVDBDI_MINI_BD + 0x4),
6178                tr32(RCVDBDI_MINI_BD + 0x8),
6179                tr32(RCVDBDI_MINI_BD + 0xc));
6180
6181         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6182         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6183         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6184         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6185         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6186                val32, val32_2, val32_3, val32_4);
6187
6188         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6189         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6190         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6191         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6192         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6193                val32, val32_2, val32_3, val32_4);
6194
6195         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6196         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6197         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6198         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6199         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6200         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6201                val32, val32_2, val32_3, val32_4, val32_5);
6202
6203         /* SW status block */
6204         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6205                tp->hw_status->status,
6206                tp->hw_status->status_tag,
6207                tp->hw_status->rx_jumbo_consumer,
6208                tp->hw_status->rx_consumer,
6209                tp->hw_status->rx_mini_consumer,
6210                tp->hw_status->idx[0].rx_producer,
6211                tp->hw_status->idx[0].tx_consumer);
6212
6213         /* SW statistics block */
6214         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6215                ((u32 *)tp->hw_stats)[0],
6216                ((u32 *)tp->hw_stats)[1],
6217                ((u32 *)tp->hw_stats)[2],
6218                ((u32 *)tp->hw_stats)[3]);
6219
6220         /* Mailboxes */
6221         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6222                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6223                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6224                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6225                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6226
6227         /* NIC side send descriptors. */
6228         for (i = 0; i < 6; i++) {
6229                 unsigned long txd;
6230
6231                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6232                         + (i * sizeof(struct tg3_tx_buffer_desc));
6233                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6234                        i,
6235                        readl(txd + 0x0), readl(txd + 0x4),
6236                        readl(txd + 0x8), readl(txd + 0xc));
6237         }
6238
6239         /* NIC side RX descriptors. */
6240         for (i = 0; i < 6; i++) {
6241                 unsigned long rxd;
6242
6243                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6244                         + (i * sizeof(struct tg3_rx_buffer_desc));
6245                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6246                        i,
6247                        readl(rxd + 0x0), readl(rxd + 0x4),
6248                        readl(rxd + 0x8), readl(rxd + 0xc));
6249                 rxd += (4 * sizeof(u32));
6250                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6251                        i,
6252                        readl(rxd + 0x0), readl(rxd + 0x4),
6253                        readl(rxd + 0x8), readl(rxd + 0xc));
6254         }
6255
6256         for (i = 0; i < 6; i++) {
6257                 unsigned long rxd;
6258
6259                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6260                         + (i * sizeof(struct tg3_rx_buffer_desc));
6261                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6262                        i,
6263                        readl(rxd + 0x0), readl(rxd + 0x4),
6264                        readl(rxd + 0x8), readl(rxd + 0xc));
6265                 rxd += (4 * sizeof(u32));
6266                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6267                        i,
6268                        readl(rxd + 0x0), readl(rxd + 0x4),
6269                        readl(rxd + 0x8), readl(rxd + 0xc));
6270         }
6271 }
6272 #endif
6273
6274 static struct net_device_stats *tg3_get_stats(struct net_device *);
6275 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6276
6277 static int tg3_close(struct net_device *dev)
6278 {
6279         struct tg3 *tp = netdev_priv(dev);
6280
6281         netif_stop_queue(dev);
6282
6283         del_timer_sync(&tp->timer);
6284
6285         spin_lock_irq(&tp->lock);
6286         spin_lock(&tp->tx_lock);
6287 #if 0
6288         tg3_dump_state(tp);
6289 #endif
6290
6291         tg3_disable_ints(tp);
6292
6293         tg3_halt(tp);
6294         tg3_free_rings(tp);
6295         tp->tg3_flags &=
6296                 ~(TG3_FLAG_INIT_COMPLETE |
6297                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6298         netif_carrier_off(tp->dev);
6299
6300         spin_unlock(&tp->tx_lock);
6301         spin_unlock_irq(&tp->lock);
6302
6303         free_irq(tp->pdev->irq, dev);
6304         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6305                 pci_disable_msi(tp->pdev);
6306                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6307         }
6308
6309         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6310                sizeof(tp->net_stats_prev));
6311         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6312                sizeof(tp->estats_prev));
6313
6314         tg3_free_consistent(tp);
6315
6316         return 0;
6317 }
6318
6319 static inline unsigned long get_stat64(tg3_stat64_t *val)
6320 {
6321         unsigned long ret;
6322
6323 #if (BITS_PER_LONG == 32)
6324         ret = val->low;
6325 #else
6326         ret = ((u64)val->high << 32) | ((u64)val->low);
6327 #endif
6328         return ret;
6329 }
6330
6331 static unsigned long calc_crc_errors(struct tg3 *tp)
6332 {
6333         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6334
6335         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6336             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6337              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6338                 unsigned long flags;
6339                 u32 val;
6340
6341                 spin_lock_irqsave(&tp->lock, flags);
6342                 if (!tg3_readphy(tp, 0x1e, &val)) {
6343                         tg3_writephy(tp, 0x1e, val | 0x8000);
6344                         tg3_readphy(tp, 0x14, &val);
6345                 } else
6346                         val = 0;
6347                 spin_unlock_irqrestore(&tp->lock, flags);
6348
6349                 tp->phy_crc_errors += val;
6350
6351                 return tp->phy_crc_errors;
6352         }
6353
6354         return get_stat64(&hw_stats->rx_fcs_errors);
6355 }
6356
6357 #define ESTAT_ADD(member) \
6358         estats->member =        old_estats->member + \
6359                                 get_stat64(&hw_stats->member)
6360
6361 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6362 {
6363         struct tg3_ethtool_stats *estats = &tp->estats;
6364         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6365         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6366
6367         if (!hw_stats)
6368                 return old_estats;
6369
6370         ESTAT_ADD(rx_octets);
6371         ESTAT_ADD(rx_fragments);
6372         ESTAT_ADD(rx_ucast_packets);
6373         ESTAT_ADD(rx_mcast_packets);
6374         ESTAT_ADD(rx_bcast_packets);
6375         ESTAT_ADD(rx_fcs_errors);
6376         ESTAT_ADD(rx_align_errors);
6377         ESTAT_ADD(rx_xon_pause_rcvd);
6378         ESTAT_ADD(rx_xoff_pause_rcvd);
6379         ESTAT_ADD(rx_mac_ctrl_rcvd);
6380         ESTAT_ADD(rx_xoff_entered);
6381         ESTAT_ADD(rx_frame_too_long_errors);
6382         ESTAT_ADD(rx_jabbers);
6383         ESTAT_ADD(rx_undersize_packets);
6384         ESTAT_ADD(rx_in_length_errors);
6385         ESTAT_ADD(rx_out_length_errors);
6386         ESTAT_ADD(rx_64_or_less_octet_packets);
6387         ESTAT_ADD(rx_65_to_127_octet_packets);
6388         ESTAT_ADD(rx_128_to_255_octet_packets);
6389         ESTAT_ADD(rx_256_to_511_octet_packets);
6390         ESTAT_ADD(rx_512_to_1023_octet_packets);
6391         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6392         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6393         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6394         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6395         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6396
6397         ESTAT_ADD(tx_octets);
6398         ESTAT_ADD(tx_collisions);
6399         ESTAT_ADD(tx_xon_sent);
6400         ESTAT_ADD(tx_xoff_sent);
6401         ESTAT_ADD(tx_flow_control);
6402         ESTAT_ADD(tx_mac_errors);
6403         ESTAT_ADD(tx_single_collisions);
6404         ESTAT_ADD(tx_mult_collisions);
6405         ESTAT_ADD(tx_deferred);
6406         ESTAT_ADD(tx_excessive_collisions);
6407         ESTAT_ADD(tx_late_collisions);
6408         ESTAT_ADD(tx_collide_2times);
6409         ESTAT_ADD(tx_collide_3times);
6410         ESTAT_ADD(tx_collide_4times);
6411         ESTAT_ADD(tx_collide_5times);
6412         ESTAT_ADD(tx_collide_6times);
6413         ESTAT_ADD(tx_collide_7times);
6414         ESTAT_ADD(tx_collide_8times);
6415         ESTAT_ADD(tx_collide_9times);
6416         ESTAT_ADD(tx_collide_10times);
6417         ESTAT_ADD(tx_collide_11times);
6418         ESTAT_ADD(tx_collide_12times);
6419         ESTAT_ADD(tx_collide_13times);
6420         ESTAT_ADD(tx_collide_14times);
6421         ESTAT_ADD(tx_collide_15times);
6422         ESTAT_ADD(tx_ucast_packets);
6423         ESTAT_ADD(tx_mcast_packets);
6424         ESTAT_ADD(tx_bcast_packets);
6425         ESTAT_ADD(tx_carrier_sense_errors);
6426         ESTAT_ADD(tx_discards);
6427         ESTAT_ADD(tx_errors);
6428
6429         ESTAT_ADD(dma_writeq_full);
6430         ESTAT_ADD(dma_write_prioq_full);
6431         ESTAT_ADD(rxbds_empty);
6432         ESTAT_ADD(rx_discards);
6433         ESTAT_ADD(rx_errors);
6434         ESTAT_ADD(rx_threshold_hit);
6435
6436         ESTAT_ADD(dma_readq_full);
6437         ESTAT_ADD(dma_read_prioq_full);
6438         ESTAT_ADD(tx_comp_queue_full);
6439
6440         ESTAT_ADD(ring_set_send_prod_index);
6441         ESTAT_ADD(ring_status_update);
6442         ESTAT_ADD(nic_irqs);
6443         ESTAT_ADD(nic_avoided_irqs);
6444         ESTAT_ADD(nic_tx_threshold_hit);
6445
6446         return estats;
6447 }
6448
6449 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6450 {
6451         struct tg3 *tp = netdev_priv(dev);
6452         struct net_device_stats *stats = &tp->net_stats;
6453         struct net_device_stats *old_stats = &tp->net_stats_prev;
6454         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6455
6456         if (!hw_stats)
6457                 return old_stats;
6458
6459         stats->rx_packets = old_stats->rx_packets +
6460                 get_stat64(&hw_stats->rx_ucast_packets) +
6461                 get_stat64(&hw_stats->rx_mcast_packets) +
6462                 get_stat64(&hw_stats->rx_bcast_packets);
6463                 
6464         stats->tx_packets = old_stats->tx_packets +
6465                 get_stat64(&hw_stats->tx_ucast_packets) +
6466                 get_stat64(&hw_stats->tx_mcast_packets) +
6467                 get_stat64(&hw_stats->tx_bcast_packets);
6468
6469         stats->rx_bytes = old_stats->rx_bytes +
6470                 get_stat64(&hw_stats->rx_octets);
6471         stats->tx_bytes = old_stats->tx_bytes +
6472                 get_stat64(&hw_stats->tx_octets);
6473
6474         stats->rx_errors = old_stats->rx_errors +
6475                 get_stat64(&hw_stats->rx_errors) +
6476                 get_stat64(&hw_stats->rx_discards);
6477         stats->tx_errors = old_stats->tx_errors +
6478                 get_stat64(&hw_stats->tx_errors) +
6479                 get_stat64(&hw_stats->tx_mac_errors) +
6480                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6481                 get_stat64(&hw_stats->tx_discards);
6482
6483         stats->multicast = old_stats->multicast +
6484                 get_stat64(&hw_stats->rx_mcast_packets);
6485         stats->collisions = old_stats->collisions +
6486                 get_stat64(&hw_stats->tx_collisions);
6487
6488         stats->rx_length_errors = old_stats->rx_length_errors +
6489                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6490                 get_stat64(&hw_stats->rx_undersize_packets);
6491
6492         stats->rx_over_errors = old_stats->rx_over_errors +
6493                 get_stat64(&hw_stats->rxbds_empty);
6494         stats->rx_frame_errors = old_stats->rx_frame_errors +
6495                 get_stat64(&hw_stats->rx_align_errors);
6496         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6497                 get_stat64(&hw_stats->tx_discards);
6498         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6499                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6500
6501         stats->rx_crc_errors = old_stats->rx_crc_errors +
6502                 calc_crc_errors(tp);
6503
6504         return stats;
6505 }
6506
6507 static inline u32 calc_crc(unsigned char *buf, int len)
6508 {
6509         u32 reg;
6510         u32 tmp;
6511         int j, k;
6512
6513         reg = 0xffffffff;
6514
6515         for (j = 0; j < len; j++) {
6516                 reg ^= buf[j];
6517
6518                 for (k = 0; k < 8; k++) {
6519                         tmp = reg & 0x01;
6520
6521                         reg >>= 1;
6522
6523                         if (tmp) {
6524                                 reg ^= 0xedb88320;
6525                         }
6526                 }
6527         }
6528
6529         return ~reg;
6530 }
6531
6532 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6533 {
6534         /* accept or reject all multicast frames */
6535         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6536         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6537         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6538         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6539 }
6540
6541 static void __tg3_set_rx_mode(struct net_device *dev)
6542 {
6543         struct tg3 *tp = netdev_priv(dev);
6544         u32 rx_mode;
6545
6546         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6547                                   RX_MODE_KEEP_VLAN_TAG);
6548
6549         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6550          * flag clear.
6551          */
6552 #if TG3_VLAN_TAG_USED
6553         if (!tp->vlgrp &&
6554             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6555                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6556 #else
6557         /* By definition, VLAN is disabled always in this
6558          * case.
6559          */
6560         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6561                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6562 #endif
6563
6564         if (dev->flags & IFF_PROMISC) {
6565                 /* Promiscuous mode. */
6566                 rx_mode |= RX_MODE_PROMISC;
6567         } else if (dev->flags & IFF_ALLMULTI) {
6568                 /* Accept all multicast. */
6569                 tg3_set_multi (tp, 1);
6570         } else if (dev->mc_count < 1) {
6571                 /* Reject all multicast. */
6572                 tg3_set_multi (tp, 0);
6573         } else {
6574                 /* Accept one or more multicast(s). */
6575                 struct dev_mc_list *mclist;
6576                 unsigned int i;
6577                 u32 mc_filter[4] = { 0, };
6578                 u32 regidx;
6579                 u32 bit;
6580                 u32 crc;
6581
6582                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6583                      i++, mclist = mclist->next) {
6584
6585                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6586                         bit = ~crc & 0x7f;
6587                         regidx = (bit & 0x60) >> 5;
6588                         bit &= 0x1f;
6589                         mc_filter[regidx] |= (1 << bit);
6590                 }
6591
6592                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6593                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6594                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6595                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6596         }
6597
6598         if (rx_mode != tp->rx_mode) {
6599                 tp->rx_mode = rx_mode;
6600                 tw32_f(MAC_RX_MODE, rx_mode);
6601                 udelay(10);
6602         }
6603 }
6604
6605 static void tg3_set_rx_mode(struct net_device *dev)
6606 {
6607         struct tg3 *tp = netdev_priv(dev);
6608
6609         spin_lock_irq(&tp->lock);
6610         spin_lock(&tp->tx_lock);
6611         __tg3_set_rx_mode(dev);
6612         spin_unlock(&tp->tx_lock);
6613         spin_unlock_irq(&tp->lock);
6614 }
6615
6616 #define TG3_REGDUMP_LEN         (32 * 1024)
6617
6618 static int tg3_get_regs_len(struct net_device *dev)
6619 {
6620         return TG3_REGDUMP_LEN;
6621 }
6622
6623 static void tg3_get_regs(struct net_device *dev,
6624                 struct ethtool_regs *regs, void *_p)
6625 {
6626         u32 *p = _p;
6627         struct tg3 *tp = netdev_priv(dev);
6628         u8 *orig_p = _p;
6629         int i;
6630
6631         regs->version = 0;
6632
6633         memset(p, 0, TG3_REGDUMP_LEN);
6634
6635         spin_lock_irq(&tp->lock);
6636         spin_lock(&tp->tx_lock);
6637
6638 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6639 #define GET_REG32_LOOP(base,len)                \
6640 do {    p = (u32 *)(orig_p + (base));           \
6641         for (i = 0; i < len; i += 4)            \
6642                 __GET_REG32((base) + i);        \
6643 } while (0)
6644 #define GET_REG32_1(reg)                        \
6645 do {    p = (u32 *)(orig_p + (reg));            \
6646         __GET_REG32((reg));                     \
6647 } while (0)
6648
6649         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6650         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6651         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6652         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6653         GET_REG32_1(SNDDATAC_MODE);
6654         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6655         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6656         GET_REG32_1(SNDBDC_MODE);
6657         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6658         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6659         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6660         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6661         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6662         GET_REG32_1(RCVDCC_MODE);
6663         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6664         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6665         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6666         GET_REG32_1(MBFREE_MODE);
6667         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6668         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6669         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6670         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6671         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6672         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6673         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6674         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6675         GET_REG32_LOOP(FTQ_RESET, 0x120);
6676         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6677         GET_REG32_1(DMAC_MODE);
6678         GET_REG32_LOOP(GRC_MODE, 0x4c);
6679         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6680                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6681
6682 #undef __GET_REG32
6683 #undef GET_REG32_LOOP
6684 #undef GET_REG32_1
6685
6686         spin_unlock(&tp->tx_lock);
6687         spin_unlock_irq(&tp->lock);
6688 }
6689
6690 static int tg3_get_eeprom_len(struct net_device *dev)
6691 {
6692         struct tg3 *tp = netdev_priv(dev);
6693
6694         return tp->nvram_size;
6695 }
6696
6697 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6698
6699 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6700 {
6701         struct tg3 *tp = netdev_priv(dev);
6702         int ret;
6703         u8  *pd;
6704         u32 i, offset, len, val, b_offset, b_count;
6705
6706         offset = eeprom->offset;
6707         len = eeprom->len;
6708         eeprom->len = 0;
6709
6710         eeprom->magic = TG3_EEPROM_MAGIC;
6711
6712         if (offset & 3) {
6713                 /* adjustments to start on required 4 byte boundary */
6714                 b_offset = offset & 3;
6715                 b_count = 4 - b_offset;
6716                 if (b_count > len) {
6717                         /* i.e. offset=1 len=2 */
6718                         b_count = len;
6719                 }
6720                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6721                 if (ret)
6722                         return ret;
6723                 val = cpu_to_le32(val);
6724                 memcpy(data, ((char*)&val) + b_offset, b_count);
6725                 len -= b_count;
6726                 offset += b_count;
6727                 eeprom->len += b_count;
6728         }
6729
6730         /* read bytes upto the last 4 byte boundary */
6731         pd = &data[eeprom->len];
6732         for (i = 0; i < (len - (len & 3)); i += 4) {
6733                 ret = tg3_nvram_read(tp, offset + i, &val);
6734                 if (ret) {
6735                         eeprom->len += i;
6736                         return ret;
6737                 }
6738                 val = cpu_to_le32(val);
6739                 memcpy(pd + i, &val, 4);
6740         }
6741         eeprom->len += i;
6742
6743         if (len & 3) {
6744                 /* read last bytes not ending on 4 byte boundary */
6745                 pd = &data[eeprom->len];
6746                 b_count = len & 3;
6747                 b_offset = offset + len - b_count;
6748                 ret = tg3_nvram_read(tp, b_offset, &val);
6749                 if (ret)
6750                         return ret;
6751                 val = cpu_to_le32(val);
6752                 memcpy(pd, ((char*)&val), b_count);
6753                 eeprom->len += b_count;
6754         }
6755         return 0;
6756 }
6757
6758 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6759
6760 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6761 {
6762         struct tg3 *tp = netdev_priv(dev);
6763         int ret;
6764         u32 offset, len, b_offset, odd_len, start, end;
6765         u8 *buf;
6766
6767         if (eeprom->magic != TG3_EEPROM_MAGIC)
6768                 return -EINVAL;
6769
6770         offset = eeprom->offset;
6771         len = eeprom->len;
6772
6773         if ((b_offset = (offset & 3))) {
6774                 /* adjustments to start on required 4 byte boundary */
6775                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6776                 if (ret)
6777                         return ret;
6778                 start = cpu_to_le32(start);
6779                 len += b_offset;
6780                 offset &= ~3;
6781                 if (len < 4)
6782                         len = 4;
6783         }
6784
6785         odd_len = 0;
6786         if (len & 3) {
6787                 /* adjustments to end on required 4 byte boundary */
6788                 odd_len = 1;
6789                 len = (len + 3) & ~3;
6790                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6791                 if (ret)
6792                         return ret;
6793                 end = cpu_to_le32(end);
6794         }
6795
6796         buf = data;
6797         if (b_offset || odd_len) {
6798                 buf = kmalloc(len, GFP_KERNEL);
6799                 if (buf == 0)
6800                         return -ENOMEM;
6801                 if (b_offset)
6802                         memcpy(buf, &start, 4);
6803                 if (odd_len)
6804                         memcpy(buf+len-4, &end, 4);
6805                 memcpy(buf + b_offset, data, eeprom->len);
6806         }
6807
6808         ret = tg3_nvram_write_block(tp, offset, len, buf);
6809
6810         if (buf != data)
6811                 kfree(buf);
6812
6813         return ret;
6814 }
6815
6816 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6817 {
6818         struct tg3 *tp = netdev_priv(dev);
6819   
6820         cmd->supported = (SUPPORTED_Autoneg);
6821
6822         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6823                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6824                                    SUPPORTED_1000baseT_Full);
6825
6826         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6827                 cmd->supported |= (SUPPORTED_100baseT_Half |
6828                                   SUPPORTED_100baseT_Full |
6829                                   SUPPORTED_10baseT_Half |
6830                                   SUPPORTED_10baseT_Full |
6831                                   SUPPORTED_MII);
6832         else
6833                 cmd->supported |= SUPPORTED_FIBRE;
6834   
6835         cmd->advertising = tp->link_config.advertising;
6836         if (netif_running(dev)) {
6837                 cmd->speed = tp->link_config.active_speed;
6838                 cmd->duplex = tp->link_config.active_duplex;
6839         }
6840         cmd->port = 0;
6841         cmd->phy_address = PHY_ADDR;
6842         cmd->transceiver = 0;
6843         cmd->autoneg = tp->link_config.autoneg;
6844         cmd->maxtxpkt = 0;
6845         cmd->maxrxpkt = 0;
6846         return 0;
6847 }
6848   
6849 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6850 {
6851         struct tg3 *tp = netdev_priv(dev);
6852   
6853         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6854                 /* These are the only valid advertisement bits allowed.  */
6855                 if (cmd->autoneg == AUTONEG_ENABLE &&
6856                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6857                                           ADVERTISED_1000baseT_Full |
6858                                           ADVERTISED_Autoneg |
6859                                           ADVERTISED_FIBRE)))
6860                         return -EINVAL;
6861         }
6862
6863         spin_lock_irq(&tp->lock);
6864         spin_lock(&tp->tx_lock);
6865
6866         tp->link_config.autoneg = cmd->autoneg;
6867         if (cmd->autoneg == AUTONEG_ENABLE) {
6868                 tp->link_config.advertising = cmd->advertising;
6869                 tp->link_config.speed = SPEED_INVALID;
6870                 tp->link_config.duplex = DUPLEX_INVALID;
6871         } else {
6872                 tp->link_config.advertising = 0;
6873                 tp->link_config.speed = cmd->speed;
6874                 tp->link_config.duplex = cmd->duplex;
6875         }
6876   
6877         if (netif_running(dev))
6878                 tg3_setup_phy(tp, 1);
6879
6880         spin_unlock(&tp->tx_lock);
6881         spin_unlock_irq(&tp->lock);
6882   
6883         return 0;
6884 }
6885   
6886 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6887 {
6888         struct tg3 *tp = netdev_priv(dev);
6889   
6890         strcpy(info->driver, DRV_MODULE_NAME);
6891         strcpy(info->version, DRV_MODULE_VERSION);
6892         strcpy(info->bus_info, pci_name(tp->pdev));
6893 }
6894   
6895 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6896 {
6897         struct tg3 *tp = netdev_priv(dev);
6898   
6899         wol->supported = WAKE_MAGIC;
6900         wol->wolopts = 0;
6901         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6902                 wol->wolopts = WAKE_MAGIC;
6903         memset(&wol->sopass, 0, sizeof(wol->sopass));
6904 }
6905   
6906 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6907 {
6908         struct tg3 *tp = netdev_priv(dev);
6909   
6910         if (wol->wolopts & ~WAKE_MAGIC)
6911                 return -EINVAL;
6912         if ((wol->wolopts & WAKE_MAGIC) &&
6913             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6914             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6915                 return -EINVAL;
6916   
6917         spin_lock_irq(&tp->lock);
6918         if (wol->wolopts & WAKE_MAGIC)
6919                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6920         else
6921                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6922         spin_unlock_irq(&tp->lock);
6923   
6924         return 0;
6925 }
6926   
6927 static u32 tg3_get_msglevel(struct net_device *dev)
6928 {
6929         struct tg3 *tp = netdev_priv(dev);
6930         return tp->msg_enable;
6931 }
6932   
6933 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6934 {
6935         struct tg3 *tp = netdev_priv(dev);
6936         tp->msg_enable = value;
6937 }
6938   
6939 #if TG3_TSO_SUPPORT != 0
6940 static int tg3_set_tso(struct net_device *dev, u32 value)
6941 {
6942         struct tg3 *tp = netdev_priv(dev);
6943
6944         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6945                 if (value)
6946                         return -EINVAL;
6947                 return 0;
6948         }
6949         return ethtool_op_set_tso(dev, value);
6950 }
6951 #endif
6952   
6953 static int tg3_nway_reset(struct net_device *dev)
6954 {
6955         struct tg3 *tp = netdev_priv(dev);
6956         u32 bmcr;
6957         int r;
6958   
6959         if (!netif_running(dev))
6960                 return -EAGAIN;
6961
6962         spin_lock_irq(&tp->lock);
6963         r = -EINVAL;
6964         tg3_readphy(tp, MII_BMCR, &bmcr);
6965         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6966             (bmcr & BMCR_ANENABLE)) {
6967                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6968                 r = 0;
6969         }
6970         spin_unlock_irq(&tp->lock);
6971   
6972         return r;
6973 }
6974   
6975 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6976 {
6977         struct tg3 *tp = netdev_priv(dev);
6978   
6979         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6980         ering->rx_mini_max_pending = 0;
6981         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6982
6983         ering->rx_pending = tp->rx_pending;
6984         ering->rx_mini_pending = 0;
6985         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6986         ering->tx_pending = tp->tx_pending;
6987 }
6988   
6989 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6990 {
6991         struct tg3 *tp = netdev_priv(dev);
6992   
6993         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6994             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6995             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6996                 return -EINVAL;
6997   
6998         if (netif_running(dev))
6999                 tg3_netif_stop(tp);
7000
7001         spin_lock_irq(&tp->lock);
7002         spin_lock(&tp->tx_lock);
7003   
7004         tp->rx_pending = ering->rx_pending;
7005
7006         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7007             tp->rx_pending > 63)
7008                 tp->rx_pending = 63;
7009         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7010         tp->tx_pending = ering->tx_pending;
7011
7012         if (netif_running(dev)) {
7013                 tg3_halt(tp);
7014                 tg3_init_hw(tp);
7015                 tg3_netif_start(tp);
7016         }
7017
7018         spin_unlock(&tp->tx_lock);
7019         spin_unlock_irq(&tp->lock);
7020   
7021         return 0;
7022 }
7023   
7024 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7025 {
7026         struct tg3 *tp = netdev_priv(dev);
7027   
7028         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7029         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7030         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7031 }
7032   
7033 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7034 {
7035         struct tg3 *tp = netdev_priv(dev);
7036   
7037         if (netif_running(dev))
7038                 tg3_netif_stop(tp);
7039
7040         spin_lock_irq(&tp->lock);
7041         spin_lock(&tp->tx_lock);
7042         if (epause->autoneg)
7043                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7044         else
7045                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7046         if (epause->rx_pause)
7047                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7048         else
7049                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7050         if (epause->tx_pause)
7051                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7052         else
7053                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7054
7055         if (netif_running(dev)) {
7056                 tg3_halt(tp);
7057                 tg3_init_hw(tp);
7058                 tg3_netif_start(tp);
7059         }
7060         spin_unlock(&tp->tx_lock);
7061         spin_unlock_irq(&tp->lock);
7062   
7063         return 0;
7064 }
7065   
7066 static u32 tg3_get_rx_csum(struct net_device *dev)
7067 {
7068         struct tg3 *tp = netdev_priv(dev);
7069         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7070 }
7071   
7072 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7073 {
7074         struct tg3 *tp = netdev_priv(dev);
7075   
7076         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7077                 if (data != 0)
7078                         return -EINVAL;
7079                 return 0;
7080         }
7081   
7082         spin_lock_irq(&tp->lock);
7083         if (data)
7084                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7085         else
7086                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7087         spin_unlock_irq(&tp->lock);
7088   
7089         return 0;
7090 }
7091   
7092 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7093 {
7094         struct tg3 *tp = netdev_priv(dev);
7095   
7096         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7097                 if (data != 0)
7098                         return -EINVAL;
7099                 return 0;
7100         }
7101   
7102         if (data)
7103                 dev->features |= NETIF_F_IP_CSUM;
7104         else
7105                 dev->features &= ~NETIF_F_IP_CSUM;
7106
7107         return 0;
7108 }
7109
7110 static int tg3_get_stats_count (struct net_device *dev)
7111 {
7112         return TG3_NUM_STATS;
7113 }
7114
7115 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7116 {
7117         switch (stringset) {
7118         case ETH_SS_STATS:
7119                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7120                 break;
7121         default:
7122                 WARN_ON(1);     /* we need a WARN() */
7123                 break;
7124         }
7125 }
7126
7127 static void tg3_get_ethtool_stats (struct net_device *dev,
7128                                    struct ethtool_stats *estats, u64 *tmp_stats)
7129 {
7130         struct tg3 *tp = netdev_priv(dev);
7131         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7132 }
7133
7134 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7135 {
7136         struct mii_ioctl_data *data = if_mii(ifr);
7137         struct tg3 *tp = netdev_priv(dev);
7138         int err;
7139
7140         switch(cmd) {
7141         case SIOCGMIIPHY:
7142                 data->phy_id = PHY_ADDR;
7143
7144                 /* fallthru */
7145         case SIOCGMIIREG: {
7146                 u32 mii_regval;
7147
7148                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7149                         break;                  /* We have no PHY */
7150
7151                 spin_lock_irq(&tp->lock);
7152                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7153                 spin_unlock_irq(&tp->lock);
7154
7155                 data->val_out = mii_regval;
7156
7157                 return err;
7158         }
7159
7160         case SIOCSMIIREG:
7161                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7162                         break;                  /* We have no PHY */
7163
7164                 if (!capable(CAP_NET_ADMIN))
7165                         return -EPERM;
7166
7167                 spin_lock_irq(&tp->lock);
7168                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7169                 spin_unlock_irq(&tp->lock);
7170
7171                 return err;
7172
7173         default:
7174                 /* do nothing */
7175                 break;
7176         }
7177         return -EOPNOTSUPP;
7178 }
7179
7180 #if TG3_VLAN_TAG_USED
7181 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7182 {
7183         struct tg3 *tp = netdev_priv(dev);
7184
7185         spin_lock_irq(&tp->lock);
7186         spin_lock(&tp->tx_lock);
7187
7188         tp->vlgrp = grp;
7189
7190         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7191         __tg3_set_rx_mode(dev);
7192
7193         spin_unlock(&tp->tx_lock);
7194         spin_unlock_irq(&tp->lock);
7195 }
7196
7197 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7198 {
7199         struct tg3 *tp = netdev_priv(dev);
7200
7201         spin_lock_irq(&tp->lock);
7202         spin_lock(&tp->tx_lock);
7203         if (tp->vlgrp)
7204                 tp->vlgrp->vlan_devices[vid] = NULL;
7205         spin_unlock(&tp->tx_lock);
7206         spin_unlock_irq(&tp->lock);
7207 }
7208 #endif
7209
7210 static struct ethtool_ops tg3_ethtool_ops = {
7211         .get_settings           = tg3_get_settings,
7212         .set_settings           = tg3_set_settings,
7213         .get_drvinfo            = tg3_get_drvinfo,
7214         .get_regs_len           = tg3_get_regs_len,
7215         .get_regs               = tg3_get_regs,
7216         .get_wol                = tg3_get_wol,
7217         .set_wol                = tg3_set_wol,
7218         .get_msglevel           = tg3_get_msglevel,
7219         .set_msglevel           = tg3_set_msglevel,
7220         .nway_reset             = tg3_nway_reset,
7221         .get_link               = ethtool_op_get_link,
7222         .get_eeprom_len         = tg3_get_eeprom_len,
7223         .get_eeprom             = tg3_get_eeprom,
7224         .set_eeprom             = tg3_set_eeprom,
7225         .get_ringparam          = tg3_get_ringparam,
7226         .set_ringparam          = tg3_set_ringparam,
7227         .get_pauseparam         = tg3_get_pauseparam,
7228         .set_pauseparam         = tg3_set_pauseparam,
7229         .get_rx_csum            = tg3_get_rx_csum,
7230         .set_rx_csum            = tg3_set_rx_csum,
7231         .get_tx_csum            = ethtool_op_get_tx_csum,
7232         .set_tx_csum            = tg3_set_tx_csum,
7233         .get_sg                 = ethtool_op_get_sg,
7234         .set_sg                 = ethtool_op_set_sg,
7235 #if TG3_TSO_SUPPORT != 0
7236         .get_tso                = ethtool_op_get_tso,
7237         .set_tso                = tg3_set_tso,
7238 #endif
7239         .get_strings            = tg3_get_strings,
7240         .get_stats_count        = tg3_get_stats_count,
7241         .get_ethtool_stats      = tg3_get_ethtool_stats,
7242 };
7243
7244 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
7245 {
7246         u32 cursize, val;
7247
7248         tp->nvram_size = EEPROM_CHIP_SIZE;
7249
7250         if (tg3_nvram_read(tp, 0, &val) != 0)
7251                 return;
7252
7253         if (swab32(val) != TG3_EEPROM_MAGIC)
7254                 return;
7255
7256         /*
7257          * Size the chip by reading offsets at increasing powers of two.
7258          * When we encounter our validation signature, we know the addressing
7259          * has wrapped around, and thus have our chip size.
7260          */
7261         cursize = 0x800;
7262
7263         while (cursize < tp->nvram_size) {
7264                 if (tg3_nvram_read(tp, cursize, &val) != 0)
7265                         return;
7266
7267                 if (swab32(val) == TG3_EEPROM_MAGIC)
7268                         break;
7269
7270                 cursize <<= 1;
7271         }
7272
7273         tp->nvram_size = cursize;
7274 }
7275                 
7276 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7277 {
7278         u32 val;
7279
7280         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7281                 if (val != 0) {
7282                         tp->nvram_size = (val >> 16) * 1024;
7283                         return;
7284                 }
7285         }
7286         tp->nvram_size = 0x20000;
7287 }
7288
7289 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7290 {
7291         u32 nvcfg1;
7292
7293         nvcfg1 = tr32(NVRAM_CFG1);
7294         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7295                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7296         }
7297         else {
7298                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7299                 tw32(NVRAM_CFG1, nvcfg1);
7300         }
7301
7302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7303                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7304                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7305                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7306                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7307                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7308                                 break;
7309                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7310                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7311                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7312                                 break;
7313                         case FLASH_VENDOR_ATMEL_EEPROM:
7314                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7315                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7316                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7317                                 break;
7318                         case FLASH_VENDOR_ST:
7319                                 tp->nvram_jedecnum = JEDEC_ST;
7320                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7321                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7322                                 break;
7323                         case FLASH_VENDOR_SAIFUN:
7324                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7325                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7326                                 break;
7327                         case FLASH_VENDOR_SST_SMALL:
7328                         case FLASH_VENDOR_SST_LARGE:
7329                                 tp->nvram_jedecnum = JEDEC_SST;
7330                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7331                                 break;
7332                 }
7333         }
7334         else {
7335                 tp->nvram_jedecnum = JEDEC_ATMEL;
7336                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7337                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7338         }
7339 }
7340
7341 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
7342 {
7343         u32 nvcfg1;
7344
7345         nvcfg1 = tr32(NVRAM_CFG1);
7346
7347         /* NVRAM protection for TPM */
7348         if (nvcfg1 & (1 << 27))
7349                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
7350
7351         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
7352                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
7353                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
7354                         tp->nvram_jedecnum = JEDEC_ATMEL;
7355                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7356                         break;
7357                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
7358                         tp->nvram_jedecnum = JEDEC_ATMEL;
7359                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7360                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7361                         break;
7362                 case FLASH_5752VENDOR_ST_M45PE10:
7363                 case FLASH_5752VENDOR_ST_M45PE20:
7364                 case FLASH_5752VENDOR_ST_M45PE40:
7365                         tp->nvram_jedecnum = JEDEC_ST;
7366                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7367                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
7368                         break;
7369         }
7370
7371         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
7372                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
7373                         case FLASH_5752PAGE_SIZE_256:
7374                                 tp->nvram_pagesize = 256;
7375                                 break;
7376                         case FLASH_5752PAGE_SIZE_512:
7377                                 tp->nvram_pagesize = 512;
7378                                 break;
7379                         case FLASH_5752PAGE_SIZE_1K:
7380                                 tp->nvram_pagesize = 1024;
7381                                 break;
7382                         case FLASH_5752PAGE_SIZE_2K:
7383                                 tp->nvram_pagesize = 2048;
7384                                 break;
7385                         case FLASH_5752PAGE_SIZE_4K:
7386                                 tp->nvram_pagesize = 4096;
7387                                 break;
7388                         case FLASH_5752PAGE_SIZE_264:
7389                                 tp->nvram_pagesize = 264;
7390                                 break;
7391                 }
7392         }
7393         else {
7394                 /* For eeprom, set pagesize to maximum eeprom size */
7395                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7396
7397                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7398                 tw32(NVRAM_CFG1, nvcfg1);
7399         }
7400 }
7401
7402 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7403 static void __devinit tg3_nvram_init(struct tg3 *tp)
7404 {
7405         int j;
7406
7407         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7408                 return;
7409
7410         tw32_f(GRC_EEPROM_ADDR,
7411              (EEPROM_ADDR_FSM_RESET |
7412               (EEPROM_DEFAULT_CLOCK_PERIOD <<
7413                EEPROM_ADDR_CLKPERD_SHIFT)));
7414
7415         /* XXX schedule_timeout() ... */
7416         for (j = 0; j < 100; j++)
7417                 udelay(10);
7418
7419         /* Enable seeprom accesses. */
7420         tw32_f(GRC_LOCAL_CTRL,
7421              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7422         udelay(100);
7423
7424         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7425             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7426                 tp->tg3_flags |= TG3_FLAG_NVRAM;
7427
7428                 tg3_enable_nvram_access(tp);
7429
7430                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7431                         tg3_get_5752_nvram_info(tp);
7432                 else
7433                         tg3_get_nvram_info(tp);
7434
7435                 tg3_get_nvram_size(tp);
7436
7437                 tg3_disable_nvram_access(tp);
7438
7439         } else {
7440                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7441
7442                 tg3_get_eeprom_size(tp);
7443         }
7444 }
7445
7446 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7447                                         u32 offset, u32 *val)
7448 {
7449         u32 tmp;
7450         int i;
7451
7452         if (offset > EEPROM_ADDR_ADDR_MASK ||
7453             (offset % 4) != 0)
7454                 return -EINVAL;
7455
7456         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7457                                         EEPROM_ADDR_DEVID_MASK |
7458                                         EEPROM_ADDR_READ);
7459         tw32(GRC_EEPROM_ADDR,
7460              tmp |
7461              (0 << EEPROM_ADDR_DEVID_SHIFT) |
7462              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7463               EEPROM_ADDR_ADDR_MASK) |
7464              EEPROM_ADDR_READ | EEPROM_ADDR_START);
7465
7466         for (i = 0; i < 10000; i++) {
7467                 tmp = tr32(GRC_EEPROM_ADDR);
7468
7469                 if (tmp & EEPROM_ADDR_COMPLETE)
7470                         break;
7471                 udelay(100);
7472         }
7473         if (!(tmp & EEPROM_ADDR_COMPLETE))
7474                 return -EBUSY;
7475
7476         *val = tr32(GRC_EEPROM_DATA);
7477         return 0;
7478 }
7479
7480 #define NVRAM_CMD_TIMEOUT 10000
7481
7482 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7483 {
7484         int i;
7485
7486         tw32(NVRAM_CMD, nvram_cmd);
7487         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7488                 udelay(10);
7489                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7490                         udelay(10);
7491                         break;
7492                 }
7493         }
7494         if (i == NVRAM_CMD_TIMEOUT) {
7495                 return -EBUSY;
7496         }
7497         return 0;
7498 }
7499
7500 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7501 {
7502         int ret;
7503
7504         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7505                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7506                 return -EINVAL;
7507         }
7508
7509         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7510                 return tg3_nvram_read_using_eeprom(tp, offset, val);
7511
7512         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7513                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7514                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7515
7516                 offset = ((offset / tp->nvram_pagesize) <<
7517                           ATMEL_AT45DB0X1B_PAGE_POS) +
7518                         (offset % tp->nvram_pagesize);
7519         }
7520
7521         if (offset > NVRAM_ADDR_MSK)
7522                 return -EINVAL;
7523
7524         tg3_nvram_lock(tp);
7525
7526         tg3_enable_nvram_access(tp);
7527
7528         tw32(NVRAM_ADDR, offset);
7529         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7530                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7531
7532         if (ret == 0)
7533                 *val = swab32(tr32(NVRAM_RDDATA));
7534
7535         tg3_nvram_unlock(tp);
7536
7537         tg3_disable_nvram_access(tp);
7538
7539         return ret;
7540 }
7541
7542 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7543                                     u32 offset, u32 len, u8 *buf)
7544 {
7545         int i, j, rc = 0;
7546         u32 val;
7547
7548         for (i = 0; i < len; i += 4) {
7549                 u32 addr, data;
7550
7551                 addr = offset + i;
7552
7553                 memcpy(&data, buf + i, 4);
7554
7555                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7556
7557                 val = tr32(GRC_EEPROM_ADDR);
7558                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7559
7560                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7561                         EEPROM_ADDR_READ);
7562                 tw32(GRC_EEPROM_ADDR, val |
7563                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
7564                         (addr & EEPROM_ADDR_ADDR_MASK) |
7565                         EEPROM_ADDR_START |
7566                         EEPROM_ADDR_WRITE);
7567                 
7568                 for (j = 0; j < 10000; j++) {
7569                         val = tr32(GRC_EEPROM_ADDR);
7570
7571                         if (val & EEPROM_ADDR_COMPLETE)
7572                                 break;
7573                         udelay(100);
7574                 }
7575                 if (!(val & EEPROM_ADDR_COMPLETE)) {
7576                         rc = -EBUSY;
7577                         break;
7578                 }
7579         }
7580
7581         return rc;
7582 }
7583
7584 /* offset and length are dword aligned */
7585 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7586                 u8 *buf)
7587 {
7588         int ret = 0;
7589         u32 pagesize = tp->nvram_pagesize;
7590         u32 pagemask = pagesize - 1;
7591         u32 nvram_cmd;
7592         u8 *tmp;
7593
7594         tmp = kmalloc(pagesize, GFP_KERNEL);
7595         if (tmp == NULL)
7596                 return -ENOMEM;
7597
7598         while (len) {
7599                 int j;
7600                 u32 phy_addr, page_off, size;
7601
7602                 phy_addr = offset & ~pagemask;
7603         
7604                 for (j = 0; j < pagesize; j += 4) {
7605                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
7606                                                 (u32 *) (tmp + j))))
7607                                 break;
7608                 }
7609                 if (ret)
7610                         break;
7611
7612                 page_off = offset & pagemask;
7613                 size = pagesize;
7614                 if (len < size)
7615                         size = len;
7616
7617                 len -= size;
7618
7619                 memcpy(tmp + page_off, buf, size);
7620
7621                 offset = offset + (pagesize - page_off);
7622
7623                 tg3_enable_nvram_access(tp);
7624
7625                 /*
7626                  * Before we can erase the flash page, we need
7627                  * to issue a special "write enable" command.
7628                  */
7629                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7630
7631                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7632                         break;
7633
7634                 /* Erase the target page */
7635                 tw32(NVRAM_ADDR, phy_addr);
7636
7637                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7638                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7639
7640                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7641                         break;
7642
7643                 /* Issue another write enable to start the write. */
7644                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7645
7646                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7647                         break;
7648
7649                 for (j = 0; j < pagesize; j += 4) {
7650                         u32 data;
7651
7652                         data = *((u32 *) (tmp + j));
7653                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
7654
7655                         tw32(NVRAM_ADDR, phy_addr + j);
7656
7657                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7658                                 NVRAM_CMD_WR;
7659
7660                         if (j == 0)
7661                                 nvram_cmd |= NVRAM_CMD_FIRST;
7662                         else if (j == (pagesize - 4))
7663                                 nvram_cmd |= NVRAM_CMD_LAST;
7664
7665                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7666                                 break;
7667                 }
7668                 if (ret)
7669                         break;
7670         }
7671
7672         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7673         tg3_nvram_exec_cmd(tp, nvram_cmd);
7674
7675         kfree(tmp);
7676
7677         return ret;
7678 }
7679
7680 /* offset and length are dword aligned */
7681 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7682                 u8 *buf)
7683 {
7684         int i, ret = 0;
7685
7686         for (i = 0; i < len; i += 4, offset += 4) {
7687                 u32 data, page_off, phy_addr, nvram_cmd;
7688
7689                 memcpy(&data, buf + i, 4);
7690                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7691
7692                 page_off = offset % tp->nvram_pagesize;
7693
7694                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7695                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7696
7697                         phy_addr = ((offset / tp->nvram_pagesize) <<
7698                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7699                 }
7700                 else {
7701                         phy_addr = offset;
7702                 }
7703
7704                 tw32(NVRAM_ADDR, phy_addr);
7705
7706                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7707
7708                 if ((page_off == 0) || (i == 0))
7709                         nvram_cmd |= NVRAM_CMD_FIRST;
7710                 else if (page_off == (tp->nvram_pagesize - 4))
7711                         nvram_cmd |= NVRAM_CMD_LAST;
7712
7713                 if (i == (len - 4))
7714                         nvram_cmd |= NVRAM_CMD_LAST;
7715
7716                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7717                         (nvram_cmd & NVRAM_CMD_FIRST)) {
7718
7719                         if ((ret = tg3_nvram_exec_cmd(tp,
7720                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7721                                 NVRAM_CMD_DONE)))
7722
7723                                 break;
7724                 }
7725                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7726                         /* We always do complete word writes to eeprom. */
7727                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7728                 }
7729
7730                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7731                         break;
7732         }
7733         return ret;
7734 }
7735
7736 /* offset and length are dword aligned */
7737 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7738 {
7739         int ret;
7740
7741         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7742                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7743                 return -EINVAL;
7744         }
7745
7746         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7747                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
7748                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
7749                 udelay(40);
7750         }
7751
7752         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7753                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7754         }
7755         else {
7756                 u32 grc_mode;
7757
7758                 tg3_nvram_lock(tp);
7759
7760                 tg3_enable_nvram_access(tp);
7761                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
7762                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
7763                         tw32(NVRAM_WRITE1, 0x406);
7764
7765                 grc_mode = tr32(GRC_MODE);
7766                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7767
7768                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7769                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7770
7771                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
7772                                 buf);
7773                 }
7774                 else {
7775                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7776                                 buf);
7777                 }
7778
7779                 grc_mode = tr32(GRC_MODE);
7780                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7781
7782                 tg3_disable_nvram_access(tp);
7783                 tg3_nvram_unlock(tp);
7784         }
7785
7786         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7787                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7788                 udelay(40);
7789         }
7790
7791         return ret;
7792 }
7793
7794 struct subsys_tbl_ent {
7795         u16 subsys_vendor, subsys_devid;
7796         u32 phy_id;
7797 };
7798
7799 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7800         /* Broadcom boards. */
7801         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7802         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7803         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7804         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7805         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7806         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7807         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7808         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7809         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7810         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7811         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7812
7813         /* 3com boards. */
7814         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7815         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7816         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7817         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7818         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7819
7820         /* DELL boards. */
7821         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7822         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7823         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7824         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7825
7826         /* Compaq boards. */
7827         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7828         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7829         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7830         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7831         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7832
7833         /* IBM boards. */
7834         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7835 };
7836
7837 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7838 {
7839         int i;
7840
7841         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7842                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7843                      tp->pdev->subsystem_vendor) &&
7844                     (subsys_id_to_phy_id[i].subsys_devid ==
7845                      tp->pdev->subsystem_device))
7846                         return &subsys_id_to_phy_id[i];
7847         }
7848         return NULL;
7849 }
7850
7851 /* Since this function may be called in D3-hot power state during
7852  * tg3_init_one(), only config cycles are allowed.
7853  */
7854 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
7855 {
7856         u32 val;
7857
7858         /* Make sure register accesses (indirect or otherwise)
7859          * will function correctly.
7860          */
7861         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7862                                tp->misc_host_ctrl);
7863
7864         tp->phy_id = PHY_ID_INVALID;
7865         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7866
7867         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7868         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7869                 u32 nic_cfg, led_cfg;
7870                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
7871                 int eeprom_phy_serdes = 0;
7872
7873                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7874                 tp->nic_sram_data_cfg = nic_cfg;
7875
7876                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7877                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7878                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7879                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7880                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7881                     (ver > 0) && (ver < 0x100))
7882                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7883
7884                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7885                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7886                         eeprom_phy_serdes = 1;
7887
7888                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7889                 if (nic_phy_id != 0) {
7890                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7891                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7892
7893                         eeprom_phy_id  = (id1 >> 16) << 10;
7894                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7895                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7896                 } else
7897                         eeprom_phy_id = 0;
7898
7899                 tp->phy_id = eeprom_phy_id;
7900                 if (eeprom_phy_serdes)
7901                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7902
7903                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7904                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7905                                     SHASTA_EXT_LED_MODE_MASK);
7906                 else
7907                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7908
7909                 switch (led_cfg) {
7910                 default:
7911                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7912                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7913                         break;
7914
7915                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7916                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7917                         break;
7918
7919                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7920                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7921                         break;
7922
7923                 case SHASTA_EXT_LED_SHARED:
7924                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7925                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7926                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7927                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7928                                                  LED_CTRL_MODE_PHY_2);
7929                         break;
7930
7931                 case SHASTA_EXT_LED_MAC:
7932                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7933                         break;
7934
7935                 case SHASTA_EXT_LED_COMBO:
7936                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7937                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7938                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7939                                                  LED_CTRL_MODE_PHY_2);
7940                         break;
7941
7942                 };
7943
7944                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7945                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7946                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7947                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7948
7949                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7950                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7951                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7952                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7953
7954                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7955                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7956                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7957                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7958                 }
7959                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7960                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7961
7962                 if (cfg2 & (1 << 17))
7963                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7964
7965                 /* serdes signal pre-emphasis in register 0x590 set by */
7966                 /* bootcode if bit 18 is set */
7967                 if (cfg2 & (1 << 18))
7968                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7969         }
7970 }
7971
7972 static int __devinit tg3_phy_probe(struct tg3 *tp)
7973 {
7974         u32 hw_phy_id_1, hw_phy_id_2;
7975         u32 hw_phy_id, hw_phy_id_masked;
7976         int err;
7977
7978         /* Reading the PHY ID register can conflict with ASF
7979          * firwmare access to the PHY hardware.
7980          */
7981         err = 0;
7982         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7983                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7984         } else {
7985                 /* Now read the physical PHY_ID from the chip and verify
7986                  * that it is sane.  If it doesn't look good, we fall back
7987                  * to either the hard-coded table based PHY_ID and failing
7988                  * that the value found in the eeprom area.
7989                  */
7990                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7991                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7992
7993                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7994                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7995                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7996
7997                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7998         }
7999
8000         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
8001                 tp->phy_id = hw_phy_id;
8002                 if (hw_phy_id_masked == PHY_ID_BCM8002)
8003                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8004         } else {
8005                 if (tp->phy_id != PHY_ID_INVALID) {
8006                         /* Do nothing, phy ID already set up in
8007                          * tg3_get_eeprom_hw_cfg().
8008                          */
8009                 } else {
8010                         struct subsys_tbl_ent *p;
8011
8012                         /* No eeprom signature?  Try the hardcoded
8013                          * subsys device table.
8014                          */
8015                         p = lookup_by_subsys(tp);
8016                         if (!p)
8017                                 return -ENODEV;
8018
8019                         tp->phy_id = p->phy_id;
8020                         if (!tp->phy_id ||
8021                             tp->phy_id == PHY_ID_BCM8002)
8022                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8023                 }
8024         }
8025
8026         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8027             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8028                 u32 bmsr, adv_reg, tg3_ctrl;
8029
8030                 tg3_readphy(tp, MII_BMSR, &bmsr);
8031                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
8032                     (bmsr & BMSR_LSTATUS))
8033                         goto skip_phy_reset;
8034                     
8035                 err = tg3_phy_reset(tp);
8036                 if (err)
8037                         return err;
8038
8039                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
8040                            ADVERTISE_100HALF | ADVERTISE_100FULL |
8041                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
8042                 tg3_ctrl = 0;
8043                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
8044                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
8045                                     MII_TG3_CTRL_ADV_1000_FULL);
8046                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8047                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
8048                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
8049                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
8050                 }
8051
8052                 if (!tg3_copper_is_advertising_all(tp)) {
8053                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8054
8055                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8056                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8057
8058                         tg3_writephy(tp, MII_BMCR,
8059                                      BMCR_ANENABLE | BMCR_ANRESTART);
8060                 }
8061                 tg3_phy_set_wirespeed(tp);
8062
8063                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8064                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8065                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8066         }
8067
8068 skip_phy_reset:
8069         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8070                 err = tg3_init_5401phy_dsp(tp);
8071                 if (err)
8072                         return err;
8073         }
8074
8075         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
8076                 err = tg3_init_5401phy_dsp(tp);
8077         }
8078
8079         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8080                 tp->link_config.advertising =
8081                         (ADVERTISED_1000baseT_Half |
8082                          ADVERTISED_1000baseT_Full |
8083                          ADVERTISED_Autoneg |
8084                          ADVERTISED_FIBRE);
8085         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8086                 tp->link_config.advertising &=
8087                         ~(ADVERTISED_1000baseT_Half |
8088                           ADVERTISED_1000baseT_Full);
8089
8090         return err;
8091 }
8092
8093 static void __devinit tg3_read_partno(struct tg3 *tp)
8094 {
8095         unsigned char vpd_data[256];
8096         int i;
8097
8098         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8099                 /* Sun decided not to put the necessary bits in the
8100                  * NVRAM of their onboard tg3 parts :(
8101                  */
8102                 strcpy(tp->board_part_number, "Sun 570X");
8103                 return;
8104         }
8105
8106         for (i = 0; i < 256; i += 4) {
8107                 u32 tmp;
8108
8109                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
8110                         goto out_not_found;
8111
8112                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
8113                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
8114                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
8115                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
8116         }
8117
8118         /* Now parse and find the part number. */
8119         for (i = 0; i < 256; ) {
8120                 unsigned char val = vpd_data[i];
8121                 int block_end;
8122
8123                 if (val == 0x82 || val == 0x91) {
8124                         i = (i + 3 +
8125                              (vpd_data[i + 1] +
8126                               (vpd_data[i + 2] << 8)));
8127                         continue;
8128                 }
8129
8130                 if (val != 0x90)
8131                         goto out_not_found;
8132
8133                 block_end = (i + 3 +
8134                              (vpd_data[i + 1] +
8135                               (vpd_data[i + 2] << 8)));
8136                 i += 3;
8137                 while (i < block_end) {
8138                         if (vpd_data[i + 0] == 'P' &&
8139                             vpd_data[i + 1] == 'N') {
8140                                 int partno_len = vpd_data[i + 2];
8141
8142                                 if (partno_len > 24)
8143                                         goto out_not_found;
8144
8145                                 memcpy(tp->board_part_number,
8146                                        &vpd_data[i + 3],
8147                                        partno_len);
8148
8149                                 /* Success. */
8150                                 return;
8151                         }
8152                 }
8153
8154                 /* Part number not found. */
8155                 goto out_not_found;
8156         }
8157
8158 out_not_found:
8159         strcpy(tp->board_part_number, "none");
8160 }
8161
8162 #ifdef CONFIG_SPARC64
8163 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
8164 {
8165         struct pci_dev *pdev = tp->pdev;
8166         struct pcidev_cookie *pcp = pdev->sysdata;
8167
8168         if (pcp != NULL) {
8169                 int node = pcp->prom_node;
8170                 u32 venid;
8171                 int err;
8172
8173                 err = prom_getproperty(node, "subsystem-vendor-id",
8174                                        (char *) &venid, sizeof(venid));
8175                 if (err == 0 || err == -1)
8176                         return 0;
8177                 if (venid == PCI_VENDOR_ID_SUN)
8178                         return 1;
8179         }
8180         return 0;
8181 }
8182 #endif
8183
8184 static int __devinit tg3_get_invariants(struct tg3 *tp)
8185 {
8186         static struct pci_device_id write_reorder_chipsets[] = {
8187                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8188                              PCI_DEVICE_ID_INTEL_82801AA_8) },
8189                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8190                              PCI_DEVICE_ID_INTEL_82801AB_8) },
8191                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8192                              PCI_DEVICE_ID_INTEL_82801BA_11) },
8193                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8194                              PCI_DEVICE_ID_INTEL_82801BA_6) },
8195                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
8196                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
8197                 { },
8198         };
8199         u32 misc_ctrl_reg;
8200         u32 cacheline_sz_reg;
8201         u32 pci_state_reg, grc_misc_cfg;
8202         u32 val;
8203         u16 pci_cmd;
8204         int err;
8205
8206 #ifdef CONFIG_SPARC64
8207         if (tg3_is_sun_570X(tp))
8208                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
8209 #endif
8210
8211         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
8212          * reordering to the mailbox registers done by the host
8213          * controller can cause major troubles.  We read back from
8214          * every mailbox register write to force the writes to be
8215          * posted to the chip in order.
8216          */
8217         if (pci_dev_present(write_reorder_chipsets))
8218                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
8219
8220         /* Force memory write invalidate off.  If we leave it on,
8221          * then on 5700_BX chips we have to enable a workaround.
8222          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
8223          * to match the cacheline size.  The Broadcom driver have this
8224          * workaround but turns MWI off all the times so never uses
8225          * it.  This seems to suggest that the workaround is insufficient.
8226          */
8227         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8228         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
8229         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8230
8231         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
8232          * has the register indirect write enable bit set before
8233          * we try to access any of the MMIO registers.  It is also
8234          * critical that the PCI-X hw workaround situation is decided
8235          * before that as well.
8236          */
8237         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8238                               &misc_ctrl_reg);
8239
8240         tp->pci_chip_rev_id = (misc_ctrl_reg >>
8241                                MISC_HOST_CTRL_CHIPREV_SHIFT);
8242
8243         /* Wrong chip ID in 5752 A0. This code can be removed later
8244          * as A0 is not in production.
8245          */
8246         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
8247                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
8248
8249         /* Initialize misc host control in PCI block. */
8250         tp->misc_host_ctrl |= (misc_ctrl_reg &
8251                                MISC_HOST_CTRL_CHIPREV);
8252         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8253                                tp->misc_host_ctrl);
8254
8255         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8256                               &cacheline_sz_reg);
8257
8258         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
8259         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
8260         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
8261         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
8262
8263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8264             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8265                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
8266
8267         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
8268             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
8269                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
8270
8271         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8272                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
8273
8274         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
8275                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
8276
8277         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8278             tp->pci_lat_timer < 64) {
8279                 tp->pci_lat_timer = 64;
8280
8281                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
8282                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
8283                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
8284                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
8285
8286                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8287                                        cacheline_sz_reg);
8288         }
8289
8290         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8291                               &pci_state_reg);
8292
8293         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
8294                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
8295
8296                 /* If this is a 5700 BX chipset, and we are in PCI-X
8297                  * mode, enable register write workaround.
8298                  *
8299                  * The workaround is to use indirect register accesses
8300                  * for all chip writes not to mailbox registers.
8301                  */
8302                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
8303                         u32 pm_reg;
8304                         u16 pci_cmd;
8305
8306                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8307
8308                         /* The chip can have it's power management PCI config
8309                          * space registers clobbered due to this bug.
8310                          * So explicitly force the chip into D0 here.
8311                          */
8312                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8313                                               &pm_reg);
8314                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
8315                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
8316                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8317                                                pm_reg);
8318
8319                         /* Also, force SERR#/PERR# in PCI command. */
8320                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8321                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
8322                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8323                 }
8324         }
8325
8326         /* Back to back register writes can cause problems on this chip,
8327          * the workaround is to read back all reg writes except those to
8328          * mailbox regs.  See tg3_write_indirect_reg32().
8329          *
8330          * PCI Express 5750_A0 rev chips need this workaround too.
8331          */
8332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8333             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8334              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8335                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8336
8337         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8338                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8339         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8340                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8341
8342         /* Chip-specific fixup from Broadcom driver */
8343         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8344             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8345                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8346                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
8347         }
8348
8349         /* Get eeprom hw config before calling tg3_set_power_state().
8350          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
8351          * determined before calling tg3_set_power_state() so that
8352          * we know whether or not to switch out of Vaux power.
8353          * When the flag is set, it means that GPIO1 is used for eeprom
8354          * write protect and also implies that it is a LOM where GPIOs
8355          * are not used to switch power.
8356          */ 
8357         tg3_get_eeprom_hw_cfg(tp);
8358
8359         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
8360          * GPIO1 driven high will bring 5700's external PHY out of reset.
8361          * It is also used as eeprom write protect on LOMs.
8362          */
8363         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
8364         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8365             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
8366                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8367                                        GRC_LCLCTRL_GPIO_OUTPUT1);
8368         /* Unused GPIO3 must be driven as output on 5752 because there
8369          * are no pull-up resistors on unused GPIO pins.
8370          */
8371         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8372                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
8373
8374         /* Force the chip into D0. */
8375         err = tg3_set_power_state(tp, 0);
8376         if (err) {
8377                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
8378                        pci_name(tp->pdev));
8379                 return err;
8380         }
8381
8382         /* 5700 B0 chips do not support checksumming correctly due
8383          * to hardware bugs.
8384          */
8385         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
8386                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
8387
8388         /* Pseudo-header checksum is done by hardware logic and not
8389          * the offload processers, so make the chip do the pseudo-
8390          * header checksums on receive.  For transmit it is more
8391          * convenient to do the pseudo-header checksum in software
8392          * as Linux does that on transmit for us in all cases.
8393          */
8394         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
8395         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8396
8397         /* Derive initial jumbo mode from MTU assigned in
8398          * ether_setup() via the alloc_etherdev() call
8399          */
8400         if (tp->dev->mtu > ETH_DATA_LEN)
8401                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8402
8403         /* Determine WakeOnLan speed to use. */
8404         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8405             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8406             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8407             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8408                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8409         } else {
8410                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8411         }
8412
8413         /* A few boards don't want Ethernet@WireSpeed phy feature */
8414         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8415             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8416              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8417              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8418                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8419
8420         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8421             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8422                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8423         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8424                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8425
8426         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8427                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8428
8429         /* Only 5701 and later support tagged irq status mode.
8430          * Also, 5788 chips cannot use tagged irq status.
8431          *
8432          * However, since we are using NAPI avoid tagged irq status
8433          * because the interrupt condition is more difficult to
8434          * fully clear in that mode.
8435          */
8436         tp->coalesce_mode = 0;
8437
8438         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8439             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8440                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8441
8442         /* Initialize MAC MI mode, polling disabled. */
8443         tw32_f(MAC_MI_MODE, tp->mi_mode);
8444         udelay(80);
8445
8446         /* Initialize data/descriptor byte/word swapping. */
8447         val = tr32(GRC_MODE);
8448         val &= GRC_MODE_HOST_STACKUP;
8449         tw32(GRC_MODE, val | tp->grc_mode);
8450
8451         tg3_switch_clocks(tp);
8452
8453         /* Clear this out for sanity. */
8454         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8455
8456         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8457                               &pci_state_reg);
8458         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8459             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8460                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8461
8462                 if (chiprevid == CHIPREV_ID_5701_A0 ||
8463                     chiprevid == CHIPREV_ID_5701_B0 ||
8464                     chiprevid == CHIPREV_ID_5701_B2 ||
8465                     chiprevid == CHIPREV_ID_5701_B5) {
8466                         void __iomem *sram_base;
8467
8468                         /* Write some dummy words into the SRAM status block
8469                          * area, see if it reads back correctly.  If the return
8470                          * value is bad, force enable the PCIX workaround.
8471                          */
8472                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8473
8474                         writel(0x00000000, sram_base);
8475                         writel(0x00000000, sram_base + 4);
8476                         writel(0xffffffff, sram_base + 4);
8477                         if (readl(sram_base) != 0x00000000)
8478                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8479                 }
8480         }
8481
8482         udelay(50);
8483         tg3_nvram_init(tp);
8484
8485         grc_misc_cfg = tr32(GRC_MISC_CFG);
8486         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8487
8488         /* Broadcom's driver says that CIOBE multisplit has a bug */
8489 #if 0
8490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8491             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8492                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8493                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8494         }
8495 #endif
8496         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8497             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8498              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8499                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8500
8501         /* these are limited to 10/100 only */
8502         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8503              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8504             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8505              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8506              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8507               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8508               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8509             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8510              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8511               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8512                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8513
8514         err = tg3_phy_probe(tp);
8515         if (err) {
8516                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8517                        pci_name(tp->pdev), err);
8518                 /* ... but do not return immediately ... */
8519         }
8520
8521         tg3_read_partno(tp);
8522
8523         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8524                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8525         } else {
8526                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8527                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8528                 else
8529                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8530         }
8531
8532         /* 5700 {AX,BX} chips have a broken status block link
8533          * change bit implementation, so we must use the
8534          * status register in those cases.
8535          */
8536         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8537                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8538         else
8539                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8540
8541         /* The led_ctrl is set during tg3_phy_probe, here we might
8542          * have to force the link status polling mechanism based
8543          * upon subsystem IDs.
8544          */
8545         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8546             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8547                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8548                                   TG3_FLAG_USE_LINKCHG_REG);
8549         }
8550
8551         /* For all SERDES we poll the MAC status register. */
8552         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8553                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8554         else
8555                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8556
8557         /* 5700 BX chips need to have their TX producer index mailboxes
8558          * written twice to workaround a bug.
8559          */
8560         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8561                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8562         else
8563                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8564
8565         /* It seems all chips can get confused if TX buffers
8566          * straddle the 4GB address boundary in some cases.
8567          */
8568         tp->dev->hard_start_xmit = tg3_start_xmit;
8569
8570         tp->rx_offset = 2;
8571         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8572             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8573                 tp->rx_offset = 0;
8574
8575         /* By default, disable wake-on-lan.  User can change this
8576          * using ETHTOOL_SWOL.
8577          */
8578         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8579
8580         return err;
8581 }
8582
8583 #ifdef CONFIG_SPARC64
8584 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8585 {
8586         struct net_device *dev = tp->dev;
8587         struct pci_dev *pdev = tp->pdev;
8588         struct pcidev_cookie *pcp = pdev->sysdata;
8589
8590         if (pcp != NULL) {
8591                 int node = pcp->prom_node;
8592
8593                 if (prom_getproplen(node, "local-mac-address") == 6) {
8594                         prom_getproperty(node, "local-mac-address",
8595                                          dev->dev_addr, 6);
8596                         return 0;
8597                 }
8598         }
8599         return -ENODEV;
8600 }
8601
8602 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8603 {
8604         struct net_device *dev = tp->dev;
8605
8606         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8607         return 0;
8608 }
8609 #endif
8610
8611 static int __devinit tg3_get_device_address(struct tg3 *tp)
8612 {
8613         struct net_device *dev = tp->dev;
8614         u32 hi, lo, mac_offset;
8615
8616 #ifdef CONFIG_SPARC64
8617         if (!tg3_get_macaddr_sparc(tp))
8618                 return 0;
8619 #endif
8620
8621         mac_offset = 0x7c;
8622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8623             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8624                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8625                         mac_offset = 0xcc;
8626                 if (tg3_nvram_lock(tp))
8627                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8628                 else
8629                         tg3_nvram_unlock(tp);
8630         }
8631
8632         /* First try to get it from MAC address mailbox. */
8633         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8634         if ((hi >> 16) == 0x484b) {
8635                 dev->dev_addr[0] = (hi >>  8) & 0xff;
8636                 dev->dev_addr[1] = (hi >>  0) & 0xff;
8637
8638                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8639                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8640                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8641                 dev->dev_addr[4] = (lo >>  8) & 0xff;
8642                 dev->dev_addr[5] = (lo >>  0) & 0xff;
8643         }
8644         /* Next, try NVRAM. */
8645         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8646                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8647                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8648                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8649                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8650                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
8651                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
8652                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8653                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8654         }
8655         /* Finally just fetch it out of the MAC control regs. */
8656         else {
8657                 hi = tr32(MAC_ADDR_0_HIGH);
8658                 lo = tr32(MAC_ADDR_0_LOW);
8659
8660                 dev->dev_addr[5] = lo & 0xff;
8661                 dev->dev_addr[4] = (lo >> 8) & 0xff;
8662                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8663                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8664                 dev->dev_addr[1] = hi & 0xff;
8665                 dev->dev_addr[0] = (hi >> 8) & 0xff;
8666         }
8667
8668         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8669 #ifdef CONFIG_SPARC64
8670                 if (!tg3_get_default_macaddr_sparc(tp))
8671                         return 0;
8672 #endif
8673                 return -EINVAL;
8674         }
8675         return 0;
8676 }
8677
8678 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8679 {
8680         struct tg3_internal_buffer_desc test_desc;
8681         u32 sram_dma_descs;
8682         int i, ret;
8683
8684         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8685
8686         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8687         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8688         tw32(RDMAC_STATUS, 0);
8689         tw32(WDMAC_STATUS, 0);
8690
8691         tw32(BUFMGR_MODE, 0);
8692         tw32(FTQ_RESET, 0);
8693
8694         test_desc.addr_hi = ((u64) buf_dma) >> 32;
8695         test_desc.addr_lo = buf_dma & 0xffffffff;
8696         test_desc.nic_mbuf = 0x00002100;
8697         test_desc.len = size;
8698
8699         /*
8700          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8701          * the *second* time the tg3 driver was getting loaded after an
8702          * initial scan.
8703          *
8704          * Broadcom tells me:
8705          *   ...the DMA engine is connected to the GRC block and a DMA
8706          *   reset may affect the GRC block in some unpredictable way...
8707          *   The behavior of resets to individual blocks has not been tested.
8708          *
8709          * Broadcom noted the GRC reset will also reset all sub-components.
8710          */
8711         if (to_device) {
8712                 test_desc.cqid_sqid = (13 << 8) | 2;
8713
8714                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8715                 udelay(40);
8716         } else {
8717                 test_desc.cqid_sqid = (16 << 8) | 7;
8718
8719                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8720                 udelay(40);
8721         }
8722         test_desc.flags = 0x00000005;
8723
8724         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8725                 u32 val;
8726
8727                 val = *(((u32 *)&test_desc) + i);
8728                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8729                                        sram_dma_descs + (i * sizeof(u32)));
8730                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8731         }
8732         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8733
8734         if (to_device) {
8735                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8736         } else {
8737                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8738         }
8739
8740         ret = -ENODEV;
8741         for (i = 0; i < 40; i++) {
8742                 u32 val;
8743
8744                 if (to_device)
8745                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8746                 else
8747                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8748                 if ((val & 0xffff) == sram_dma_descs) {
8749                         ret = 0;
8750                         break;
8751                 }
8752
8753                 udelay(100);
8754         }
8755
8756         return ret;
8757 }
8758
8759 #define TEST_BUFFER_SIZE        0x400
8760
8761 static int __devinit tg3_test_dma(struct tg3 *tp)
8762 {
8763         dma_addr_t buf_dma;
8764         u32 *buf;
8765         int ret;
8766
8767         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8768         if (!buf) {
8769                 ret = -ENOMEM;
8770                 goto out_nofree;
8771         }
8772
8773         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8774                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8775
8776 #ifndef CONFIG_X86
8777         {
8778                 u8 byte;
8779                 int cacheline_size;
8780                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8781
8782                 if (byte == 0)
8783                         cacheline_size = 1024;
8784                 else
8785                         cacheline_size = (int) byte * 4;
8786
8787                 switch (cacheline_size) {
8788                 case 16:
8789                 case 32:
8790                 case 64:
8791                 case 128:
8792                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8793                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8794                                 tp->dma_rwctrl |=
8795                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8796                                 break;
8797                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8798                                 tp->dma_rwctrl &=
8799                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
8800                                 tp->dma_rwctrl |=
8801                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8802                                 break;
8803                         }
8804                         /* fallthrough */
8805                 case 256:
8806                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8807                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8808                                 tp->dma_rwctrl |=
8809                                         DMA_RWCTRL_WRITE_BNDRY_256;
8810                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8811                                 tp->dma_rwctrl |=
8812                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8813                 };
8814         }
8815 #endif
8816
8817         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8818                 /* DMA read watermark not used on PCIE */
8819                 tp->dma_rwctrl |= 0x00180000;
8820         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8821                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8822                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
8823                         tp->dma_rwctrl |= 0x003f0000;
8824                 else
8825                         tp->dma_rwctrl |= 0x003f000f;
8826         } else {
8827                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8828                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8829                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8830
8831                         if (ccval == 0x6 || ccval == 0x7)
8832                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8833
8834                         /* Set bit 23 to renable PCIX hw bug fix */
8835                         tp->dma_rwctrl |= 0x009f0000;
8836                 } else {
8837                         tp->dma_rwctrl |= 0x001b000f;
8838                 }
8839         }
8840
8841         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8842             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8843                 tp->dma_rwctrl &= 0xfffffff0;
8844
8845         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8846             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8847                 /* Remove this if it causes problems for some boards. */
8848                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8849
8850                 /* On 5700/5701 chips, we need to set this bit.
8851                  * Otherwise the chip will issue cacheline transactions
8852                  * to streamable DMA memory with not all the byte
8853                  * enables turned on.  This is an error on several
8854                  * RISC PCI controllers, in particular sparc64.
8855                  *
8856                  * On 5703/5704 chips, this bit has been reassigned
8857                  * a different meaning.  In particular, it is used
8858                  * on those chips to enable a PCI-X workaround.
8859                  */
8860                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8861         }
8862
8863         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8864
8865 #if 0
8866         /* Unneeded, already done by tg3_get_invariants.  */
8867         tg3_switch_clocks(tp);
8868 #endif
8869
8870         ret = 0;
8871         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8872             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8873                 goto out;
8874
8875         while (1) {
8876                 u32 *p = buf, i;
8877
8878                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8879                         p[i] = i;
8880
8881                 /* Send the buffer to the chip. */
8882                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8883                 if (ret) {
8884                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8885                         break;
8886                 }
8887
8888 #if 0
8889                 /* validate data reached card RAM correctly. */
8890                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8891                         u32 val;
8892                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
8893                         if (le32_to_cpu(val) != p[i]) {
8894                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
8895                                 /* ret = -ENODEV here? */
8896                         }
8897                         p[i] = 0;
8898                 }
8899 #endif
8900                 /* Now read it back. */
8901                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8902                 if (ret) {
8903                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8904
8905                         break;
8906                 }
8907
8908                 /* Verify it. */
8909                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8910                         if (p[i] == i)
8911                                 continue;
8912
8913                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8914                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8915                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8916                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8917                                 break;
8918                         } else {
8919                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8920                                 ret = -ENODEV;
8921                                 goto out;
8922                         }
8923                 }
8924
8925                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8926                         /* Success. */
8927                         ret = 0;
8928                         break;
8929                 }
8930         }
8931
8932 out:
8933         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8934 out_nofree:
8935         return ret;
8936 }
8937
8938 static void __devinit tg3_init_link_config(struct tg3 *tp)
8939 {
8940         tp->link_config.advertising =
8941                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8942                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8943                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8944                  ADVERTISED_Autoneg | ADVERTISED_MII);
8945         tp->link_config.speed = SPEED_INVALID;
8946         tp->link_config.duplex = DUPLEX_INVALID;
8947         tp->link_config.autoneg = AUTONEG_ENABLE;
8948         netif_carrier_off(tp->dev);
8949         tp->link_config.active_speed = SPEED_INVALID;
8950         tp->link_config.active_duplex = DUPLEX_INVALID;
8951         tp->link_config.phy_is_low_power = 0;
8952         tp->link_config.orig_speed = SPEED_INVALID;
8953         tp->link_config.orig_duplex = DUPLEX_INVALID;
8954         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8955 }
8956
8957 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8958 {
8959         tp->bufmgr_config.mbuf_read_dma_low_water =
8960                 DEFAULT_MB_RDMA_LOW_WATER;
8961         tp->bufmgr_config.mbuf_mac_rx_low_water =
8962                 DEFAULT_MB_MACRX_LOW_WATER;
8963         tp->bufmgr_config.mbuf_high_water =
8964                 DEFAULT_MB_HIGH_WATER;
8965
8966         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8967                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8968         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8969                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8970         tp->bufmgr_config.mbuf_high_water_jumbo =
8971                 DEFAULT_MB_HIGH_WATER_JUMBO;
8972
8973         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8974         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8975 }
8976
8977 static char * __devinit tg3_phy_string(struct tg3 *tp)
8978 {
8979         switch (tp->phy_id & PHY_ID_MASK) {
8980         case PHY_ID_BCM5400:    return "5400";
8981         case PHY_ID_BCM5401:    return "5401";
8982         case PHY_ID_BCM5411:    return "5411";
8983         case PHY_ID_BCM5701:    return "5701";
8984         case PHY_ID_BCM5703:    return "5703";
8985         case PHY_ID_BCM5704:    return "5704";
8986         case PHY_ID_BCM5705:    return "5705";
8987         case PHY_ID_BCM5750:    return "5750";
8988         case PHY_ID_BCM5752:    return "5752";
8989         case PHY_ID_BCM8002:    return "8002/serdes";
8990         case 0:                 return "serdes";
8991         default:                return "unknown";
8992         };
8993 }
8994
8995 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8996 {
8997         struct pci_dev *peer;
8998         unsigned int func, devnr = tp->pdev->devfn & ~7;
8999
9000         for (func = 0; func < 8; func++) {
9001                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
9002                 if (peer && peer != tp->pdev)
9003                         break;
9004                 pci_dev_put(peer);
9005         }
9006         if (!peer || peer == tp->pdev)
9007                 BUG();
9008
9009         /*
9010          * We don't need to keep the refcount elevated; there's no way
9011          * to remove one half of this device without removing the other
9012          */
9013         pci_dev_put(peer);
9014
9015         return peer;
9016 }
9017
9018 static int __devinit tg3_init_one(struct pci_dev *pdev,
9019                                   const struct pci_device_id *ent)
9020 {
9021         static int tg3_version_printed = 0;
9022         unsigned long tg3reg_base, tg3reg_len;
9023         struct net_device *dev;
9024         struct tg3 *tp;
9025         int i, err, pci_using_dac, pm_cap;
9026
9027         if (tg3_version_printed++ == 0)
9028                 printk(KERN_INFO "%s", version);
9029
9030         err = pci_enable_device(pdev);
9031         if (err) {
9032                 printk(KERN_ERR PFX "Cannot enable PCI device, "
9033                        "aborting.\n");
9034                 return err;
9035         }
9036
9037         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9038                 printk(KERN_ERR PFX "Cannot find proper PCI device "
9039                        "base address, aborting.\n");
9040                 err = -ENODEV;
9041                 goto err_out_disable_pdev;
9042         }
9043
9044         err = pci_request_regions(pdev, DRV_MODULE_NAME);
9045         if (err) {
9046                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
9047                        "aborting.\n");
9048                 goto err_out_disable_pdev;
9049         }
9050
9051         pci_set_master(pdev);
9052
9053         /* Find power-management capability. */
9054         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9055         if (pm_cap == 0) {
9056                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
9057                        "aborting.\n");
9058                 err = -EIO;
9059                 goto err_out_free_res;
9060         }
9061
9062         /* Configure DMA attributes. */
9063         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
9064         if (!err) {
9065                 pci_using_dac = 1;
9066                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
9067                 if (err < 0) {
9068                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
9069                                "for consistent allocations\n");
9070                         goto err_out_free_res;
9071                 }
9072         } else {
9073                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
9074                 if (err) {
9075                         printk(KERN_ERR PFX "No usable DMA configuration, "
9076                                "aborting.\n");
9077                         goto err_out_free_res;
9078                 }
9079                 pci_using_dac = 0;
9080         }
9081
9082         tg3reg_base = pci_resource_start(pdev, 0);
9083         tg3reg_len = pci_resource_len(pdev, 0);
9084
9085         dev = alloc_etherdev(sizeof(*tp));
9086         if (!dev) {
9087                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
9088                 err = -ENOMEM;
9089                 goto err_out_free_res;
9090         }
9091
9092         SET_MODULE_OWNER(dev);
9093         SET_NETDEV_DEV(dev, &pdev->dev);
9094
9095         if (pci_using_dac)
9096                 dev->features |= NETIF_F_HIGHDMA;
9097         dev->features |= NETIF_F_LLTX;
9098 #if TG3_VLAN_TAG_USED
9099         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9100         dev->vlan_rx_register = tg3_vlan_rx_register;
9101         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
9102 #endif
9103
9104         tp = netdev_priv(dev);
9105         tp->pdev = pdev;
9106         tp->dev = dev;
9107         tp->pm_cap = pm_cap;
9108         tp->mac_mode = TG3_DEF_MAC_MODE;
9109         tp->rx_mode = TG3_DEF_RX_MODE;
9110         tp->tx_mode = TG3_DEF_TX_MODE;
9111         tp->mi_mode = MAC_MI_MODE_BASE;
9112         if (tg3_debug > 0)
9113                 tp->msg_enable = tg3_debug;
9114         else
9115                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
9116
9117         /* The word/byte swap controls here control register access byte
9118          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
9119          * setting below.
9120          */
9121         tp->misc_host_ctrl =
9122                 MISC_HOST_CTRL_MASK_PCI_INT |
9123                 MISC_HOST_CTRL_WORD_SWAP |
9124                 MISC_HOST_CTRL_INDIR_ACCESS |
9125                 MISC_HOST_CTRL_PCISTATE_RW;
9126
9127         /* The NONFRM (non-frame) byte/word swap controls take effect
9128          * on descriptor entries, anything which isn't packet data.
9129          *
9130          * The StrongARM chips on the board (one for tx, one for rx)
9131          * are running in big-endian mode.
9132          */
9133         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
9134                         GRC_MODE_WSWAP_NONFRM_DATA);
9135 #ifdef __BIG_ENDIAN
9136         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
9137 #endif
9138         spin_lock_init(&tp->lock);
9139         spin_lock_init(&tp->tx_lock);
9140         spin_lock_init(&tp->indirect_lock);
9141         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
9142
9143         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
9144         if (tp->regs == 0UL) {
9145                 printk(KERN_ERR PFX "Cannot map device registers, "
9146                        "aborting.\n");
9147                 err = -ENOMEM;
9148                 goto err_out_free_dev;
9149         }
9150
9151         tg3_init_link_config(tp);
9152
9153         tg3_init_bufmgr_config(tp);
9154
9155         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
9156         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
9157         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
9158
9159         dev->open = tg3_open;
9160         dev->stop = tg3_close;
9161         dev->get_stats = tg3_get_stats;
9162         dev->set_multicast_list = tg3_set_rx_mode;
9163         dev->set_mac_address = tg3_set_mac_addr;
9164         dev->do_ioctl = tg3_ioctl;
9165         dev->tx_timeout = tg3_tx_timeout;
9166         dev->poll = tg3_poll;
9167         dev->ethtool_ops = &tg3_ethtool_ops;
9168         dev->weight = 64;
9169         dev->watchdog_timeo = TG3_TX_TIMEOUT;
9170         dev->change_mtu = tg3_change_mtu;
9171         dev->irq = pdev->irq;
9172 #ifdef CONFIG_NET_POLL_CONTROLLER
9173         dev->poll_controller = tg3_poll_controller;
9174 #endif
9175
9176         err = tg3_get_invariants(tp);
9177         if (err) {
9178                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
9179                        "aborting.\n");
9180                 goto err_out_iounmap;
9181         }
9182
9183         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9184                 tp->bufmgr_config.mbuf_read_dma_low_water =
9185                         DEFAULT_MB_RDMA_LOW_WATER_5705;
9186                 tp->bufmgr_config.mbuf_mac_rx_low_water =
9187                         DEFAULT_MB_MACRX_LOW_WATER_5705;
9188                 tp->bufmgr_config.mbuf_high_water =
9189                         DEFAULT_MB_HIGH_WATER_5705;
9190         }
9191
9192 #if TG3_TSO_SUPPORT != 0
9193         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
9194                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9195         }
9196         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9197             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9198             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
9199             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
9200                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
9201         } else {
9202                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9203         }
9204
9205         /* TSO is off by default, user can enable using ethtool.  */
9206 #if 0
9207         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
9208                 dev->features |= NETIF_F_TSO;
9209 #endif
9210
9211 #endif
9212
9213         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
9214             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
9215             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
9216                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
9217                 tp->rx_pending = 63;
9218         }
9219
9220         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9221                 tp->pdev_peer = tg3_find_5704_peer(tp);
9222
9223         err = tg3_get_device_address(tp);
9224         if (err) {
9225                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
9226                        "aborting.\n");
9227                 goto err_out_iounmap;
9228         }
9229
9230         /*
9231          * Reset chip in case UNDI or EFI driver did not shutdown
9232          * DMA self test will enable WDMAC and we'll see (spurious)
9233          * pending DMA on the PCI bus at that point.
9234          */
9235         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
9236             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9237                 pci_save_state(tp->pdev);
9238                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
9239                 tg3_halt(tp);
9240         }
9241
9242         err = tg3_test_dma(tp);
9243         if (err) {
9244                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
9245                 goto err_out_iounmap;
9246         }
9247
9248         /* Tigon3 can do ipv4 only... and some chips have buggy
9249          * checksumming.
9250          */
9251         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
9252                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
9253                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9254         } else
9255                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9256
9257         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
9258                 dev->features &= ~NETIF_F_HIGHDMA;
9259
9260         /* flow control autonegotiation is default behavior */
9261         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9262
9263         err = register_netdev(dev);
9264         if (err) {
9265                 printk(KERN_ERR PFX "Cannot register net device, "
9266                        "aborting.\n");
9267                 goto err_out_iounmap;
9268         }
9269
9270         pci_set_drvdata(pdev, dev);
9271
9272         /* Now that we have fully setup the chip, save away a snapshot
9273          * of the PCI config space.  We need to restore this after
9274          * GRC_MISC_CFG core clock resets and some resume events.
9275          */
9276         pci_save_state(tp->pdev);
9277
9278         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
9279                dev->name,
9280                tp->board_part_number,
9281                tp->pci_chip_rev_id,
9282                tg3_phy_string(tp),
9283                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
9284                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
9285                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
9286                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
9287                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
9288                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
9289
9290         for (i = 0; i < 6; i++)
9291                 printk("%2.2x%c", dev->dev_addr[i],
9292                        i == 5 ? '\n' : ':');
9293
9294         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
9295                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
9296                "TSOcap[%d] \n",
9297                dev->name,
9298                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
9299                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
9300                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
9301                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
9302                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9303                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9304                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
9305
9306         return 0;
9307
9308 err_out_iounmap:
9309         iounmap(tp->regs);
9310
9311 err_out_free_dev:
9312         free_netdev(dev);
9313
9314 err_out_free_res:
9315         pci_release_regions(pdev);
9316
9317 err_out_disable_pdev:
9318         pci_disable_device(pdev);
9319         pci_set_drvdata(pdev, NULL);
9320         return err;
9321 }
9322
9323 static void __devexit tg3_remove_one(struct pci_dev *pdev)
9324 {
9325         struct net_device *dev = pci_get_drvdata(pdev);
9326
9327         if (dev) {
9328                 struct tg3 *tp = netdev_priv(dev);
9329
9330                 unregister_netdev(dev);
9331                 iounmap(tp->regs);
9332                 free_netdev(dev);
9333                 pci_release_regions(pdev);
9334                 pci_disable_device(pdev);
9335                 pci_set_drvdata(pdev, NULL);
9336         }
9337 }
9338
9339 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9340 {
9341         struct net_device *dev = pci_get_drvdata(pdev);
9342         struct tg3 *tp = netdev_priv(dev);
9343         int err;
9344
9345         if (!netif_running(dev))
9346                 return 0;
9347
9348         tg3_netif_stop(tp);
9349
9350         del_timer_sync(&tp->timer);
9351
9352         spin_lock_irq(&tp->lock);
9353         spin_lock(&tp->tx_lock);
9354         tg3_disable_ints(tp);
9355         spin_unlock(&tp->tx_lock);
9356         spin_unlock_irq(&tp->lock);
9357
9358         netif_device_detach(dev);
9359
9360         spin_lock_irq(&tp->lock);
9361         spin_lock(&tp->tx_lock);
9362         tg3_halt(tp);
9363         spin_unlock(&tp->tx_lock);
9364         spin_unlock_irq(&tp->lock);
9365
9366         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9367         if (err) {
9368                 spin_lock_irq(&tp->lock);
9369                 spin_lock(&tp->tx_lock);
9370
9371                 tg3_init_hw(tp);
9372
9373                 tp->timer.expires = jiffies + tp->timer_offset;
9374                 add_timer(&tp->timer);
9375
9376                 netif_device_attach(dev);
9377                 tg3_netif_start(tp);
9378
9379                 spin_unlock(&tp->tx_lock);
9380                 spin_unlock_irq(&tp->lock);
9381         }
9382
9383         return err;
9384 }
9385
9386 static int tg3_resume(struct pci_dev *pdev)
9387 {
9388         struct net_device *dev = pci_get_drvdata(pdev);
9389         struct tg3 *tp = netdev_priv(dev);
9390         int err;
9391
9392         if (!netif_running(dev))
9393                 return 0;
9394
9395         pci_restore_state(tp->pdev);
9396
9397         err = tg3_set_power_state(tp, 0);
9398         if (err)
9399                 return err;
9400
9401         netif_device_attach(dev);
9402
9403         spin_lock_irq(&tp->lock);
9404         spin_lock(&tp->tx_lock);
9405
9406         tg3_init_hw(tp);
9407
9408         tp->timer.expires = jiffies + tp->timer_offset;
9409         add_timer(&tp->timer);
9410
9411         tg3_enable_ints(tp);
9412
9413         tg3_netif_start(tp);
9414
9415         spin_unlock(&tp->tx_lock);
9416         spin_unlock_irq(&tp->lock);
9417
9418         return 0;
9419 }
9420
9421 static struct pci_driver tg3_driver = {
9422         .name           = DRV_MODULE_NAME,
9423         .id_table       = tg3_pci_tbl,
9424         .probe          = tg3_init_one,
9425         .remove         = __devexit_p(tg3_remove_one),
9426         .suspend        = tg3_suspend,
9427         .resume         = tg3_resume
9428 };
9429
9430 static int __init tg3_init(void)
9431 {
9432         return pci_module_init(&tg3_driver);
9433 }
9434
9435 static void __exit tg3_cleanup(void)
9436 {
9437         pci_unregister_driver(&tg3_driver);
9438 }
9439
9440 module_init(tg3_init);
9441 module_exit(tg3_cleanup);