]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/tg3.c
b1e5660baf4a78665cc300d1109072dc2d04c7da
[linux-2.6-omap-h63xx.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.82"
68 #define DRV_MODULE_RELDATE      "October 5, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
204         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
205         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
206         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
209         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
210         {}
211 };
212
213 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
214
215 static const struct {
216         const char string[ETH_GSTRING_LEN];
217 } ethtool_stats_keys[TG3_NUM_STATS] = {
218         { "rx_octets" },
219         { "rx_fragments" },
220         { "rx_ucast_packets" },
221         { "rx_mcast_packets" },
222         { "rx_bcast_packets" },
223         { "rx_fcs_errors" },
224         { "rx_align_errors" },
225         { "rx_xon_pause_rcvd" },
226         { "rx_xoff_pause_rcvd" },
227         { "rx_mac_ctrl_rcvd" },
228         { "rx_xoff_entered" },
229         { "rx_frame_too_long_errors" },
230         { "rx_jabbers" },
231         { "rx_undersize_packets" },
232         { "rx_in_length_errors" },
233         { "rx_out_length_errors" },
234         { "rx_64_or_less_octet_packets" },
235         { "rx_65_to_127_octet_packets" },
236         { "rx_128_to_255_octet_packets" },
237         { "rx_256_to_511_octet_packets" },
238         { "rx_512_to_1023_octet_packets" },
239         { "rx_1024_to_1522_octet_packets" },
240         { "rx_1523_to_2047_octet_packets" },
241         { "rx_2048_to_4095_octet_packets" },
242         { "rx_4096_to_8191_octet_packets" },
243         { "rx_8192_to_9022_octet_packets" },
244
245         { "tx_octets" },
246         { "tx_collisions" },
247
248         { "tx_xon_sent" },
249         { "tx_xoff_sent" },
250         { "tx_flow_control" },
251         { "tx_mac_errors" },
252         { "tx_single_collisions" },
253         { "tx_mult_collisions" },
254         { "tx_deferred" },
255         { "tx_excessive_collisions" },
256         { "tx_late_collisions" },
257         { "tx_collide_2times" },
258         { "tx_collide_3times" },
259         { "tx_collide_4times" },
260         { "tx_collide_5times" },
261         { "tx_collide_6times" },
262         { "tx_collide_7times" },
263         { "tx_collide_8times" },
264         { "tx_collide_9times" },
265         { "tx_collide_10times" },
266         { "tx_collide_11times" },
267         { "tx_collide_12times" },
268         { "tx_collide_13times" },
269         { "tx_collide_14times" },
270         { "tx_collide_15times" },
271         { "tx_ucast_packets" },
272         { "tx_mcast_packets" },
273         { "tx_bcast_packets" },
274         { "tx_carrier_sense_errors" },
275         { "tx_discards" },
276         { "tx_errors" },
277
278         { "dma_writeq_full" },
279         { "dma_write_prioq_full" },
280         { "rxbds_empty" },
281         { "rx_discards" },
282         { "rx_errors" },
283         { "rx_threshold_hit" },
284
285         { "dma_readq_full" },
286         { "dma_read_prioq_full" },
287         { "tx_comp_queue_full" },
288
289         { "ring_set_send_prod_index" },
290         { "ring_status_update" },
291         { "nic_irqs" },
292         { "nic_avoided_irqs" },
293         { "nic_tx_threshold_hit" }
294 };
295
296 static const struct {
297         const char string[ETH_GSTRING_LEN];
298 } ethtool_test_keys[TG3_NUM_TEST] = {
299         { "nvram test     (online) " },
300         { "link test      (online) " },
301         { "register test  (offline)" },
302         { "memory test    (offline)" },
303         { "loopback test  (offline)" },
304         { "interrupt test (offline)" },
305 };
306
307 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
308 {
309         writel(val, tp->regs + off);
310 }
311
312 static u32 tg3_read32(struct tg3 *tp, u32 off)
313 {
314         return (readl(tp->regs + off));
315 }
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         unsigned long flags;
320
321         spin_lock_irqsave(&tp->indirect_lock, flags);
322         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
323         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
324         spin_unlock_irqrestore(&tp->indirect_lock, flags);
325 }
326
327 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
328 {
329         writel(val, tp->regs + off);
330         readl(tp->regs + off);
331 }
332
333 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
334 {
335         unsigned long flags;
336         u32 val;
337
338         spin_lock_irqsave(&tp->indirect_lock, flags);
339         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
341         spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         return val;
343 }
344
345 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
346 {
347         unsigned long flags;
348
349         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
350                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
351                                        TG3_64BIT_REG_LOW, val);
352                 return;
353         }
354         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
355                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
356                                        TG3_64BIT_REG_LOW, val);
357                 return;
358         }
359
360         spin_lock_irqsave(&tp->indirect_lock, flags);
361         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
362         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
363         spin_unlock_irqrestore(&tp->indirect_lock, flags);
364
365         /* In indirect mode when disabling interrupts, we also need
366          * to clear the interrupt bit in the GRC local ctrl register.
367          */
368         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
369             (val == 0x1)) {
370                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
371                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
372         }
373 }
374
375 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
376 {
377         unsigned long flags;
378         u32 val;
379
380         spin_lock_irqsave(&tp->indirect_lock, flags);
381         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
382         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
383         spin_unlock_irqrestore(&tp->indirect_lock, flags);
384         return val;
385 }
386
387 /* usec_wait specifies the wait time in usec when writing to certain registers
388  * where it is unsafe to read back the register without some delay.
389  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
390  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
391  */
392 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
393 {
394         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
395             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
396                 /* Non-posted methods */
397                 tp->write32(tp, off, val);
398         else {
399                 /* Posted method */
400                 tg3_write32(tp, off, val);
401                 if (usec_wait)
402                         udelay(usec_wait);
403                 tp->read32(tp, off);
404         }
405         /* Wait again after the read for the posted method to guarantee that
406          * the wait time is met.
407          */
408         if (usec_wait)
409                 udelay(usec_wait);
410 }
411
412 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
413 {
414         tp->write32_mbox(tp, off, val);
415         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
416             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417                 tp->read32_mbox(tp, off);
418 }
419
420 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
421 {
422         void __iomem *mbox = tp->regs + off;
423         writel(val, mbox);
424         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
425                 writel(val, mbox);
426         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
427                 readl(mbox);
428 }
429
430 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
431 {
432         return (readl(tp->regs + off + GRCMBOX_BASE));
433 }
434
435 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
436 {
437         writel(val, tp->regs + off + GRCMBOX_BASE);
438 }
439
440 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
441 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
442 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
443 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
444 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
445
446 #define tw32(reg,val)           tp->write32(tp, reg, val)
447 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
448 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
449 #define tr32(reg)               tp->read32(tp, reg)
450
451 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
452 {
453         unsigned long flags;
454
455         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
456             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
457                 return;
458
459         spin_lock_irqsave(&tp->indirect_lock, flags);
460         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
461                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
462                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
463
464                 /* Always leave this as zero. */
465                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
466         } else {
467                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
468                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
469
470                 /* Always leave this as zero. */
471                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
472         }
473         spin_unlock_irqrestore(&tp->indirect_lock, flags);
474 }
475
476 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
477 {
478         unsigned long flags;
479
480         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
481             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
482                 *val = 0;
483                 return;
484         }
485
486         spin_lock_irqsave(&tp->indirect_lock, flags);
487         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
488                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
489                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
490
491                 /* Always leave this as zero. */
492                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
493         } else {
494                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
495                 *val = tr32(TG3PCI_MEM_WIN_DATA);
496
497                 /* Always leave this as zero. */
498                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
499         }
500         spin_unlock_irqrestore(&tp->indirect_lock, flags);
501 }
502
503 static void tg3_disable_ints(struct tg3 *tp)
504 {
505         tw32(TG3PCI_MISC_HOST_CTRL,
506              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
507         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
508 }
509
510 static inline void tg3_cond_int(struct tg3 *tp)
511 {
512         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
513             (tp->hw_status->status & SD_STATUS_UPDATED))
514                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
515         else
516                 tw32(HOSTCC_MODE, tp->coalesce_mode |
517                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
518 }
519
520 static void tg3_enable_ints(struct tg3 *tp)
521 {
522         tp->irq_sync = 0;
523         wmb();
524
525         tw32(TG3PCI_MISC_HOST_CTRL,
526              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
527         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
528                        (tp->last_tag << 24));
529         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
530                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
531                                (tp->last_tag << 24));
532         tg3_cond_int(tp);
533 }
534
535 static inline unsigned int tg3_has_work(struct tg3 *tp)
536 {
537         struct tg3_hw_status *sblk = tp->hw_status;
538         unsigned int work_exists = 0;
539
540         /* check for phy events */
541         if (!(tp->tg3_flags &
542               (TG3_FLAG_USE_LINKCHG_REG |
543                TG3_FLAG_POLL_SERDES))) {
544                 if (sblk->status & SD_STATUS_LINK_CHG)
545                         work_exists = 1;
546         }
547         /* check for RX/TX work to do */
548         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
549             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
550                 work_exists = 1;
551
552         return work_exists;
553 }
554
555 /* tg3_restart_ints
556  *  similar to tg3_enable_ints, but it accurately determines whether there
557  *  is new work pending and can return without flushing the PIO write
558  *  which reenables interrupts
559  */
560 static void tg3_restart_ints(struct tg3 *tp)
561 {
562         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
563                      tp->last_tag << 24);
564         mmiowb();
565
566         /* When doing tagged status, this work check is unnecessary.
567          * The last_tag we write above tells the chip which piece of
568          * work we've completed.
569          */
570         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
571             tg3_has_work(tp))
572                 tw32(HOSTCC_MODE, tp->coalesce_mode |
573                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
574 }
575
576 static inline void tg3_netif_stop(struct tg3 *tp)
577 {
578         tp->dev->trans_start = jiffies; /* prevent tx timeout */
579         napi_disable(&tp->napi);
580         netif_tx_disable(tp->dev);
581 }
582
583 static inline void tg3_netif_start(struct tg3 *tp)
584 {
585         netif_wake_queue(tp->dev);
586         /* NOTE: unconditional netif_wake_queue is only appropriate
587          * so long as all callers are assured to have free tx slots
588          * (such as after tg3_init_hw)
589          */
590         napi_enable(&tp->napi);
591         tp->hw_status->status |= SD_STATUS_UPDATED;
592         tg3_enable_ints(tp);
593 }
594
595 static void tg3_switch_clocks(struct tg3 *tp)
596 {
597         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
598         u32 orig_clock_ctrl;
599
600         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
601             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
602                 return;
603
604         orig_clock_ctrl = clock_ctrl;
605         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
606                        CLOCK_CTRL_CLKRUN_OENABLE |
607                        0x1f);
608         tp->pci_clock_ctrl = clock_ctrl;
609
610         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
611                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
612                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
613                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
614                 }
615         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
616                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
617                             clock_ctrl |
618                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
619                             40);
620                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
621                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
622                             40);
623         }
624         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
625 }
626
627 #define PHY_BUSY_LOOPS  5000
628
629 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
630 {
631         u32 frame_val;
632         unsigned int loops;
633         int ret;
634
635         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
636                 tw32_f(MAC_MI_MODE,
637                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
638                 udelay(80);
639         }
640
641         *val = 0x0;
642
643         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
644                       MI_COM_PHY_ADDR_MASK);
645         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
646                       MI_COM_REG_ADDR_MASK);
647         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
648
649         tw32_f(MAC_MI_COM, frame_val);
650
651         loops = PHY_BUSY_LOOPS;
652         while (loops != 0) {
653                 udelay(10);
654                 frame_val = tr32(MAC_MI_COM);
655
656                 if ((frame_val & MI_COM_BUSY) == 0) {
657                         udelay(5);
658                         frame_val = tr32(MAC_MI_COM);
659                         break;
660                 }
661                 loops -= 1;
662         }
663
664         ret = -EBUSY;
665         if (loops != 0) {
666                 *val = frame_val & MI_COM_DATA_MASK;
667                 ret = 0;
668         }
669
670         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
671                 tw32_f(MAC_MI_MODE, tp->mi_mode);
672                 udelay(80);
673         }
674
675         return ret;
676 }
677
678 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
679 {
680         u32 frame_val;
681         unsigned int loops;
682         int ret;
683
684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
685             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
686                 return 0;
687
688         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
689                 tw32_f(MAC_MI_MODE,
690                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
691                 udelay(80);
692         }
693
694         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
695                       MI_COM_PHY_ADDR_MASK);
696         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
697                       MI_COM_REG_ADDR_MASK);
698         frame_val |= (val & MI_COM_DATA_MASK);
699         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
700
701         tw32_f(MAC_MI_COM, frame_val);
702
703         loops = PHY_BUSY_LOOPS;
704         while (loops != 0) {
705                 udelay(10);
706                 frame_val = tr32(MAC_MI_COM);
707                 if ((frame_val & MI_COM_BUSY) == 0) {
708                         udelay(5);
709                         frame_val = tr32(MAC_MI_COM);
710                         break;
711                 }
712                 loops -= 1;
713         }
714
715         ret = -EBUSY;
716         if (loops != 0)
717                 ret = 0;
718
719         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
720                 tw32_f(MAC_MI_MODE, tp->mi_mode);
721                 udelay(80);
722         }
723
724         return ret;
725 }
726
727 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
728 {
729         u32 phy;
730
731         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
732             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
733                 return;
734
735         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
736                 u32 ephy;
737
738                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
739                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
740                                      ephy | MII_TG3_EPHY_SHADOW_EN);
741                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
742                                 if (enable)
743                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
744                                 else
745                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
746                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
747                         }
748                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
749                 }
750         } else {
751                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
752                       MII_TG3_AUXCTL_SHDWSEL_MISC;
753                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
754                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
755                         if (enable)
756                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
757                         else
758                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
759                         phy |= MII_TG3_AUXCTL_MISC_WREN;
760                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
761                 }
762         }
763 }
764
765 static void tg3_phy_set_wirespeed(struct tg3 *tp)
766 {
767         u32 val;
768
769         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
770                 return;
771
772         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
773             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
774                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
775                              (val | (1 << 15) | (1 << 4)));
776 }
777
778 static int tg3_bmcr_reset(struct tg3 *tp)
779 {
780         u32 phy_control;
781         int limit, err;
782
783         /* OK, reset it, and poll the BMCR_RESET bit until it
784          * clears or we time out.
785          */
786         phy_control = BMCR_RESET;
787         err = tg3_writephy(tp, MII_BMCR, phy_control);
788         if (err != 0)
789                 return -EBUSY;
790
791         limit = 5000;
792         while (limit--) {
793                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
794                 if (err != 0)
795                         return -EBUSY;
796
797                 if ((phy_control & BMCR_RESET) == 0) {
798                         udelay(40);
799                         break;
800                 }
801                 udelay(10);
802         }
803         if (limit <= 0)
804                 return -EBUSY;
805
806         return 0;
807 }
808
809 static int tg3_wait_macro_done(struct tg3 *tp)
810 {
811         int limit = 100;
812
813         while (limit--) {
814                 u32 tmp32;
815
816                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
817                         if ((tmp32 & 0x1000) == 0)
818                                 break;
819                 }
820         }
821         if (limit <= 0)
822                 return -EBUSY;
823
824         return 0;
825 }
826
827 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
828 {
829         static const u32 test_pat[4][6] = {
830         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
831         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
832         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
833         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
834         };
835         int chan;
836
837         for (chan = 0; chan < 4; chan++) {
838                 int i;
839
840                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
841                              (chan * 0x2000) | 0x0200);
842                 tg3_writephy(tp, 0x16, 0x0002);
843
844                 for (i = 0; i < 6; i++)
845                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
846                                      test_pat[chan][i]);
847
848                 tg3_writephy(tp, 0x16, 0x0202);
849                 if (tg3_wait_macro_done(tp)) {
850                         *resetp = 1;
851                         return -EBUSY;
852                 }
853
854                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
855                              (chan * 0x2000) | 0x0200);
856                 tg3_writephy(tp, 0x16, 0x0082);
857                 if (tg3_wait_macro_done(tp)) {
858                         *resetp = 1;
859                         return -EBUSY;
860                 }
861
862                 tg3_writephy(tp, 0x16, 0x0802);
863                 if (tg3_wait_macro_done(tp)) {
864                         *resetp = 1;
865                         return -EBUSY;
866                 }
867
868                 for (i = 0; i < 6; i += 2) {
869                         u32 low, high;
870
871                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
872                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
873                             tg3_wait_macro_done(tp)) {
874                                 *resetp = 1;
875                                 return -EBUSY;
876                         }
877                         low &= 0x7fff;
878                         high &= 0x000f;
879                         if (low != test_pat[chan][i] ||
880                             high != test_pat[chan][i+1]) {
881                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
882                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
883                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
884
885                                 return -EBUSY;
886                         }
887                 }
888         }
889
890         return 0;
891 }
892
893 static int tg3_phy_reset_chanpat(struct tg3 *tp)
894 {
895         int chan;
896
897         for (chan = 0; chan < 4; chan++) {
898                 int i;
899
900                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
901                              (chan * 0x2000) | 0x0200);
902                 tg3_writephy(tp, 0x16, 0x0002);
903                 for (i = 0; i < 6; i++)
904                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
905                 tg3_writephy(tp, 0x16, 0x0202);
906                 if (tg3_wait_macro_done(tp))
907                         return -EBUSY;
908         }
909
910         return 0;
911 }
912
913 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
914 {
915         u32 reg32, phy9_orig;
916         int retries, do_phy_reset, err;
917
918         retries = 10;
919         do_phy_reset = 1;
920         do {
921                 if (do_phy_reset) {
922                         err = tg3_bmcr_reset(tp);
923                         if (err)
924                                 return err;
925                         do_phy_reset = 0;
926                 }
927
928                 /* Disable transmitter and interrupt.  */
929                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
930                         continue;
931
932                 reg32 |= 0x3000;
933                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
934
935                 /* Set full-duplex, 1000 mbps.  */
936                 tg3_writephy(tp, MII_BMCR,
937                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
938
939                 /* Set to master mode.  */
940                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
941                         continue;
942
943                 tg3_writephy(tp, MII_TG3_CTRL,
944                              (MII_TG3_CTRL_AS_MASTER |
945                               MII_TG3_CTRL_ENABLE_AS_MASTER));
946
947                 /* Enable SM_DSP_CLOCK and 6dB.  */
948                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
949
950                 /* Block the PHY control access.  */
951                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
952                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
953
954                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
955                 if (!err)
956                         break;
957         } while (--retries);
958
959         err = tg3_phy_reset_chanpat(tp);
960         if (err)
961                 return err;
962
963         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
964         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
965
966         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
967         tg3_writephy(tp, 0x16, 0x0000);
968
969         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
970             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
971                 /* Set Extended packet length bit for jumbo frames */
972                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
973         }
974         else {
975                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
976         }
977
978         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
979
980         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
981                 reg32 &= ~0x3000;
982                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
983         } else if (!err)
984                 err = -EBUSY;
985
986         return err;
987 }
988
989 static void tg3_link_report(struct tg3 *);
990
991 /* This will reset the tigon3 PHY if there is no valid
992  * link unless the FORCE argument is non-zero.
993  */
994 static int tg3_phy_reset(struct tg3 *tp)
995 {
996         u32 phy_status;
997         int err;
998
999         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1000                 u32 val;
1001
1002                 val = tr32(GRC_MISC_CFG);
1003                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1004                 udelay(40);
1005         }
1006         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1007         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1008         if (err != 0)
1009                 return -EBUSY;
1010
1011         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1012                 netif_carrier_off(tp->dev);
1013                 tg3_link_report(tp);
1014         }
1015
1016         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1017             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1018             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1019                 err = tg3_phy_reset_5703_4_5(tp);
1020                 if (err)
1021                         return err;
1022                 goto out;
1023         }
1024
1025         err = tg3_bmcr_reset(tp);
1026         if (err)
1027                 return err;
1028
1029 out:
1030         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1031                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1032                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1033                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1034                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1035                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1036                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1037         }
1038         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1039                 tg3_writephy(tp, 0x1c, 0x8d68);
1040                 tg3_writephy(tp, 0x1c, 0x8d68);
1041         }
1042         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1043                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1044                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1045                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1046                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1047                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1048                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1049                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1050                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1051         }
1052         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1053                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1054                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1055                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1056                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1057                         tg3_writephy(tp, MII_TG3_TEST1,
1058                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1059                 } else
1060                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1061                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1062         }
1063         /* Set Extended packet length bit (bit 14) on all chips that */
1064         /* support jumbo frames */
1065         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1066                 /* Cannot do read-modify-write on 5401 */
1067                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1068         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1069                 u32 phy_reg;
1070
1071                 /* Set bit 14 with read-modify-write to preserve other bits */
1072                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1073                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1074                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1075         }
1076
1077         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1078          * jumbo frames transmission.
1079          */
1080         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1081                 u32 phy_reg;
1082
1083                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1084                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1085                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1086         }
1087
1088         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1089                 /* adjust output voltage */
1090                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1091         }
1092
1093         tg3_phy_toggle_automdix(tp, 1);
1094         tg3_phy_set_wirespeed(tp);
1095         return 0;
1096 }
1097
1098 static void tg3_frob_aux_power(struct tg3 *tp)
1099 {
1100         struct tg3 *tp_peer = tp;
1101
1102         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1103                 return;
1104
1105         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1106             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1107                 struct net_device *dev_peer;
1108
1109                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1110                 /* remove_one() may have been run on the peer. */
1111                 if (!dev_peer)
1112                         tp_peer = tp;
1113                 else
1114                         tp_peer = netdev_priv(dev_peer);
1115         }
1116
1117         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1118             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1119             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1120             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1121                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1122                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1123                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1124                                     (GRC_LCLCTRL_GPIO_OE0 |
1125                                      GRC_LCLCTRL_GPIO_OE1 |
1126                                      GRC_LCLCTRL_GPIO_OE2 |
1127                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1128                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1129                                     100);
1130                 } else {
1131                         u32 no_gpio2;
1132                         u32 grc_local_ctrl = 0;
1133
1134                         if (tp_peer != tp &&
1135                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1136                                 return;
1137
1138                         /* Workaround to prevent overdrawing Amps. */
1139                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1140                             ASIC_REV_5714) {
1141                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1142                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1143                                             grc_local_ctrl, 100);
1144                         }
1145
1146                         /* On 5753 and variants, GPIO2 cannot be used. */
1147                         no_gpio2 = tp->nic_sram_data_cfg &
1148                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1149
1150                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1151                                          GRC_LCLCTRL_GPIO_OE1 |
1152                                          GRC_LCLCTRL_GPIO_OE2 |
1153                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1154                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1155                         if (no_gpio2) {
1156                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1157                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1158                         }
1159                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1160                                                     grc_local_ctrl, 100);
1161
1162                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1163
1164                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1165                                                     grc_local_ctrl, 100);
1166
1167                         if (!no_gpio2) {
1168                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1169                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1170                                             grc_local_ctrl, 100);
1171                         }
1172                 }
1173         } else {
1174                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1175                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1176                         if (tp_peer != tp &&
1177                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1178                                 return;
1179
1180                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1181                                     (GRC_LCLCTRL_GPIO_OE1 |
1182                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1183
1184                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1185                                     GRC_LCLCTRL_GPIO_OE1, 100);
1186
1187                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1188                                     (GRC_LCLCTRL_GPIO_OE1 |
1189                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1190                 }
1191         }
1192 }
1193
1194 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1195 {
1196         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1197                 return 1;
1198         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1199                 if (speed != SPEED_10)
1200                         return 1;
1201         } else if (speed == SPEED_10)
1202                 return 1;
1203
1204         return 0;
1205 }
1206
1207 static int tg3_setup_phy(struct tg3 *, int);
1208
1209 #define RESET_KIND_SHUTDOWN     0
1210 #define RESET_KIND_INIT         1
1211 #define RESET_KIND_SUSPEND      2
1212
1213 static void tg3_write_sig_post_reset(struct tg3 *, int);
1214 static int tg3_halt_cpu(struct tg3 *, u32);
1215 static int tg3_nvram_lock(struct tg3 *);
1216 static void tg3_nvram_unlock(struct tg3 *);
1217
1218 static void tg3_power_down_phy(struct tg3 *tp)
1219 {
1220         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1221                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1222                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1223                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1224
1225                         sg_dig_ctrl |=
1226                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1227                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1228                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1229                 }
1230                 return;
1231         }
1232
1233         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1234                 u32 val;
1235
1236                 tg3_bmcr_reset(tp);
1237                 val = tr32(GRC_MISC_CFG);
1238                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1239                 udelay(40);
1240                 return;
1241         } else {
1242                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1243                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1244                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1245         }
1246
1247         /* The PHY should not be powered down on some chips because
1248          * of bugs.
1249          */
1250         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1251             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1252             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1253              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1254                 return;
1255         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1256 }
1257
1258 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1259 {
1260         u32 misc_host_ctrl;
1261         u16 power_control, power_caps;
1262         int pm = tp->pm_cap;
1263
1264         /* Make sure register accesses (indirect or otherwise)
1265          * will function correctly.
1266          */
1267         pci_write_config_dword(tp->pdev,
1268                                TG3PCI_MISC_HOST_CTRL,
1269                                tp->misc_host_ctrl);
1270
1271         pci_read_config_word(tp->pdev,
1272                              pm + PCI_PM_CTRL,
1273                              &power_control);
1274         power_control |= PCI_PM_CTRL_PME_STATUS;
1275         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1276         switch (state) {
1277         case PCI_D0:
1278                 power_control |= 0;
1279                 pci_write_config_word(tp->pdev,
1280                                       pm + PCI_PM_CTRL,
1281                                       power_control);
1282                 udelay(100);    /* Delay after power state change */
1283
1284                 /* Switch out of Vaux if it is a NIC */
1285                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1286                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1287
1288                 return 0;
1289
1290         case PCI_D1:
1291                 power_control |= 1;
1292                 break;
1293
1294         case PCI_D2:
1295                 power_control |= 2;
1296                 break;
1297
1298         case PCI_D3hot:
1299                 power_control |= 3;
1300                 break;
1301
1302         default:
1303                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1304                        "requested.\n",
1305                        tp->dev->name, state);
1306                 return -EINVAL;
1307         };
1308
1309         power_control |= PCI_PM_CTRL_PME_ENABLE;
1310
1311         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1312         tw32(TG3PCI_MISC_HOST_CTRL,
1313              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1314
1315         if (tp->link_config.phy_is_low_power == 0) {
1316                 tp->link_config.phy_is_low_power = 1;
1317                 tp->link_config.orig_speed = tp->link_config.speed;
1318                 tp->link_config.orig_duplex = tp->link_config.duplex;
1319                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1320         }
1321
1322         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1323                 tp->link_config.speed = SPEED_10;
1324                 tp->link_config.duplex = DUPLEX_HALF;
1325                 tp->link_config.autoneg = AUTONEG_ENABLE;
1326                 tg3_setup_phy(tp, 0);
1327         }
1328
1329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1330                 u32 val;
1331
1332                 val = tr32(GRC_VCPU_EXT_CTRL);
1333                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1334         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1335                 int i;
1336                 u32 val;
1337
1338                 for (i = 0; i < 200; i++) {
1339                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1340                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1341                                 break;
1342                         msleep(1);
1343                 }
1344         }
1345         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1346                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1347                                                      WOL_DRV_STATE_SHUTDOWN |
1348                                                      WOL_DRV_WOL |
1349                                                      WOL_SET_MAGIC_PKT);
1350
1351         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1352
1353         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1354                 u32 mac_mode;
1355
1356                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1357                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1358                         udelay(40);
1359
1360                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1361                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1362                         else
1363                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1364
1365                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1366                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1367                             ASIC_REV_5700) {
1368                                 u32 speed = (tp->tg3_flags &
1369                                              TG3_FLAG_WOL_SPEED_100MB) ?
1370                                              SPEED_100 : SPEED_10;
1371                                 if (tg3_5700_link_polarity(tp, speed))
1372                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1373                                 else
1374                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1375                         }
1376                 } else {
1377                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1378                 }
1379
1380                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1381                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1382
1383                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1384                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1385                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1386
1387                 tw32_f(MAC_MODE, mac_mode);
1388                 udelay(100);
1389
1390                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1391                 udelay(10);
1392         }
1393
1394         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1395             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1396              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1397                 u32 base_val;
1398
1399                 base_val = tp->pci_clock_ctrl;
1400                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1401                              CLOCK_CTRL_TXCLK_DISABLE);
1402
1403                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1404                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1405         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1406                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1407                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1408                 /* do nothing */
1409         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1410                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1411                 u32 newbits1, newbits2;
1412
1413                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1414                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1415                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1416                                     CLOCK_CTRL_TXCLK_DISABLE |
1417                                     CLOCK_CTRL_ALTCLK);
1418                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1419                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1420                         newbits1 = CLOCK_CTRL_625_CORE;
1421                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1422                 } else {
1423                         newbits1 = CLOCK_CTRL_ALTCLK;
1424                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1425                 }
1426
1427                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1428                             40);
1429
1430                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1431                             40);
1432
1433                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1434                         u32 newbits3;
1435
1436                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1437                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1438                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1439                                             CLOCK_CTRL_TXCLK_DISABLE |
1440                                             CLOCK_CTRL_44MHZ_CORE);
1441                         } else {
1442                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1443                         }
1444
1445                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1446                                     tp->pci_clock_ctrl | newbits3, 40);
1447                 }
1448         }
1449
1450         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1451             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1452                 tg3_power_down_phy(tp);
1453
1454         tg3_frob_aux_power(tp);
1455
1456         /* Workaround for unstable PLL clock */
1457         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1458             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1459                 u32 val = tr32(0x7d00);
1460
1461                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1462                 tw32(0x7d00, val);
1463                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1464                         int err;
1465
1466                         err = tg3_nvram_lock(tp);
1467                         tg3_halt_cpu(tp, RX_CPU_BASE);
1468                         if (!err)
1469                                 tg3_nvram_unlock(tp);
1470                 }
1471         }
1472
1473         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1474
1475         /* Finally, set the new power state. */
1476         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1477         udelay(100);    /* Delay after power state change */
1478
1479         return 0;
1480 }
1481
1482 static void tg3_link_report(struct tg3 *tp)
1483 {
1484         if (!netif_carrier_ok(tp->dev)) {
1485                 if (netif_msg_link(tp))
1486                         printk(KERN_INFO PFX "%s: Link is down.\n",
1487                                tp->dev->name);
1488         } else if (netif_msg_link(tp)) {
1489                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1490                        tp->dev->name,
1491                        (tp->link_config.active_speed == SPEED_1000 ?
1492                         1000 :
1493                         (tp->link_config.active_speed == SPEED_100 ?
1494                          100 : 10)),
1495                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1496                         "full" : "half"));
1497
1498                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1499                        "%s for RX.\n",
1500                        tp->dev->name,
1501                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1502                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1503         }
1504 }
1505
1506 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1507 {
1508         u32 new_tg3_flags = 0;
1509         u32 old_rx_mode = tp->rx_mode;
1510         u32 old_tx_mode = tp->tx_mode;
1511
1512         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1513
1514                 /* Convert 1000BaseX flow control bits to 1000BaseT
1515                  * bits before resolving flow control.
1516                  */
1517                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1518                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1519                                        ADVERTISE_PAUSE_ASYM);
1520                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1521
1522                         if (local_adv & ADVERTISE_1000XPAUSE)
1523                                 local_adv |= ADVERTISE_PAUSE_CAP;
1524                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1525                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1526                         if (remote_adv & LPA_1000XPAUSE)
1527                                 remote_adv |= LPA_PAUSE_CAP;
1528                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1529                                 remote_adv |= LPA_PAUSE_ASYM;
1530                 }
1531
1532                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1533                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1534                                 if (remote_adv & LPA_PAUSE_CAP)
1535                                         new_tg3_flags |=
1536                                                 (TG3_FLAG_RX_PAUSE |
1537                                                 TG3_FLAG_TX_PAUSE);
1538                                 else if (remote_adv & LPA_PAUSE_ASYM)
1539                                         new_tg3_flags |=
1540                                                 (TG3_FLAG_RX_PAUSE);
1541                         } else {
1542                                 if (remote_adv & LPA_PAUSE_CAP)
1543                                         new_tg3_flags |=
1544                                                 (TG3_FLAG_RX_PAUSE |
1545                                                 TG3_FLAG_TX_PAUSE);
1546                         }
1547                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1548                         if ((remote_adv & LPA_PAUSE_CAP) &&
1549                         (remote_adv & LPA_PAUSE_ASYM))
1550                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1551                 }
1552
1553                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1554                 tp->tg3_flags |= new_tg3_flags;
1555         } else {
1556                 new_tg3_flags = tp->tg3_flags;
1557         }
1558
1559         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1560                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1561         else
1562                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1563
1564         if (old_rx_mode != tp->rx_mode) {
1565                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1566         }
1567
1568         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1569                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1570         else
1571                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1572
1573         if (old_tx_mode != tp->tx_mode) {
1574                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1575         }
1576 }
1577
1578 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1579 {
1580         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1581         case MII_TG3_AUX_STAT_10HALF:
1582                 *speed = SPEED_10;
1583                 *duplex = DUPLEX_HALF;
1584                 break;
1585
1586         case MII_TG3_AUX_STAT_10FULL:
1587                 *speed = SPEED_10;
1588                 *duplex = DUPLEX_FULL;
1589                 break;
1590
1591         case MII_TG3_AUX_STAT_100HALF:
1592                 *speed = SPEED_100;
1593                 *duplex = DUPLEX_HALF;
1594                 break;
1595
1596         case MII_TG3_AUX_STAT_100FULL:
1597                 *speed = SPEED_100;
1598                 *duplex = DUPLEX_FULL;
1599                 break;
1600
1601         case MII_TG3_AUX_STAT_1000HALF:
1602                 *speed = SPEED_1000;
1603                 *duplex = DUPLEX_HALF;
1604                 break;
1605
1606         case MII_TG3_AUX_STAT_1000FULL:
1607                 *speed = SPEED_1000;
1608                 *duplex = DUPLEX_FULL;
1609                 break;
1610
1611         default:
1612                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1613                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1614                                  SPEED_10;
1615                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1616                                   DUPLEX_HALF;
1617                         break;
1618                 }
1619                 *speed = SPEED_INVALID;
1620                 *duplex = DUPLEX_INVALID;
1621                 break;
1622         };
1623 }
1624
1625 static void tg3_phy_copper_begin(struct tg3 *tp)
1626 {
1627         u32 new_adv;
1628         int i;
1629
1630         if (tp->link_config.phy_is_low_power) {
1631                 /* Entering low power mode.  Disable gigabit and
1632                  * 100baseT advertisements.
1633                  */
1634                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1635
1636                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1637                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1638                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1639                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1640
1641                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1642         } else if (tp->link_config.speed == SPEED_INVALID) {
1643                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1644                         tp->link_config.advertising &=
1645                                 ~(ADVERTISED_1000baseT_Half |
1646                                   ADVERTISED_1000baseT_Full);
1647
1648                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1649                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1650                         new_adv |= ADVERTISE_10HALF;
1651                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1652                         new_adv |= ADVERTISE_10FULL;
1653                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1654                         new_adv |= ADVERTISE_100HALF;
1655                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1656                         new_adv |= ADVERTISE_100FULL;
1657                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1658
1659                 if (tp->link_config.advertising &
1660                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1661                         new_adv = 0;
1662                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1663                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1664                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1665                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1666                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1667                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1668                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1669                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1670                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1671                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1672                 } else {
1673                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1674                 }
1675         } else {
1676                 /* Asking for a specific link mode. */
1677                 if (tp->link_config.speed == SPEED_1000) {
1678                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1679                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1680
1681                         if (tp->link_config.duplex == DUPLEX_FULL)
1682                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1683                         else
1684                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1685                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1686                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1687                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1688                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1689                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1690                 } else {
1691                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1692
1693                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1694                         if (tp->link_config.speed == SPEED_100) {
1695                                 if (tp->link_config.duplex == DUPLEX_FULL)
1696                                         new_adv |= ADVERTISE_100FULL;
1697                                 else
1698                                         new_adv |= ADVERTISE_100HALF;
1699                         } else {
1700                                 if (tp->link_config.duplex == DUPLEX_FULL)
1701                                         new_adv |= ADVERTISE_10FULL;
1702                                 else
1703                                         new_adv |= ADVERTISE_10HALF;
1704                         }
1705                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1706                 }
1707         }
1708
1709         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1710             tp->link_config.speed != SPEED_INVALID) {
1711                 u32 bmcr, orig_bmcr;
1712
1713                 tp->link_config.active_speed = tp->link_config.speed;
1714                 tp->link_config.active_duplex = tp->link_config.duplex;
1715
1716                 bmcr = 0;
1717                 switch (tp->link_config.speed) {
1718                 default:
1719                 case SPEED_10:
1720                         break;
1721
1722                 case SPEED_100:
1723                         bmcr |= BMCR_SPEED100;
1724                         break;
1725
1726                 case SPEED_1000:
1727                         bmcr |= TG3_BMCR_SPEED1000;
1728                         break;
1729                 };
1730
1731                 if (tp->link_config.duplex == DUPLEX_FULL)
1732                         bmcr |= BMCR_FULLDPLX;
1733
1734                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1735                     (bmcr != orig_bmcr)) {
1736                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1737                         for (i = 0; i < 1500; i++) {
1738                                 u32 tmp;
1739
1740                                 udelay(10);
1741                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1742                                     tg3_readphy(tp, MII_BMSR, &tmp))
1743                                         continue;
1744                                 if (!(tmp & BMSR_LSTATUS)) {
1745                                         udelay(40);
1746                                         break;
1747                                 }
1748                         }
1749                         tg3_writephy(tp, MII_BMCR, bmcr);
1750                         udelay(40);
1751                 }
1752         } else {
1753                 tg3_writephy(tp, MII_BMCR,
1754                              BMCR_ANENABLE | BMCR_ANRESTART);
1755         }
1756 }
1757
1758 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1759 {
1760         int err;
1761
1762         /* Turn off tap power management. */
1763         /* Set Extended packet length bit */
1764         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1765
1766         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1767         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1768
1769         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1770         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1771
1772         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1773         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1774
1775         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1776         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1777
1778         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1779         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1780
1781         udelay(40);
1782
1783         return err;
1784 }
1785
1786 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1787 {
1788         u32 adv_reg, all_mask = 0;
1789
1790         if (mask & ADVERTISED_10baseT_Half)
1791                 all_mask |= ADVERTISE_10HALF;
1792         if (mask & ADVERTISED_10baseT_Full)
1793                 all_mask |= ADVERTISE_10FULL;
1794         if (mask & ADVERTISED_100baseT_Half)
1795                 all_mask |= ADVERTISE_100HALF;
1796         if (mask & ADVERTISED_100baseT_Full)
1797                 all_mask |= ADVERTISE_100FULL;
1798
1799         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1800                 return 0;
1801
1802         if ((adv_reg & all_mask) != all_mask)
1803                 return 0;
1804         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1805                 u32 tg3_ctrl;
1806
1807                 all_mask = 0;
1808                 if (mask & ADVERTISED_1000baseT_Half)
1809                         all_mask |= ADVERTISE_1000HALF;
1810                 if (mask & ADVERTISED_1000baseT_Full)
1811                         all_mask |= ADVERTISE_1000FULL;
1812
1813                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1814                         return 0;
1815
1816                 if ((tg3_ctrl & all_mask) != all_mask)
1817                         return 0;
1818         }
1819         return 1;
1820 }
1821
1822 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1823 {
1824         int current_link_up;
1825         u32 bmsr, dummy;
1826         u16 current_speed;
1827         u8 current_duplex;
1828         int i, err;
1829
1830         tw32(MAC_EVENT, 0);
1831
1832         tw32_f(MAC_STATUS,
1833              (MAC_STATUS_SYNC_CHANGED |
1834               MAC_STATUS_CFG_CHANGED |
1835               MAC_STATUS_MI_COMPLETION |
1836               MAC_STATUS_LNKSTATE_CHANGED));
1837         udelay(40);
1838
1839         tp->mi_mode = MAC_MI_MODE_BASE;
1840         tw32_f(MAC_MI_MODE, tp->mi_mode);
1841         udelay(80);
1842
1843         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1844
1845         /* Some third-party PHYs need to be reset on link going
1846          * down.
1847          */
1848         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1849              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1850              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1851             netif_carrier_ok(tp->dev)) {
1852                 tg3_readphy(tp, MII_BMSR, &bmsr);
1853                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1854                     !(bmsr & BMSR_LSTATUS))
1855                         force_reset = 1;
1856         }
1857         if (force_reset)
1858                 tg3_phy_reset(tp);
1859
1860         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1861                 tg3_readphy(tp, MII_BMSR, &bmsr);
1862                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1863                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1864                         bmsr = 0;
1865
1866                 if (!(bmsr & BMSR_LSTATUS)) {
1867                         err = tg3_init_5401phy_dsp(tp);
1868                         if (err)
1869                                 return err;
1870
1871                         tg3_readphy(tp, MII_BMSR, &bmsr);
1872                         for (i = 0; i < 1000; i++) {
1873                                 udelay(10);
1874                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1875                                     (bmsr & BMSR_LSTATUS)) {
1876                                         udelay(40);
1877                                         break;
1878                                 }
1879                         }
1880
1881                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1882                             !(bmsr & BMSR_LSTATUS) &&
1883                             tp->link_config.active_speed == SPEED_1000) {
1884                                 err = tg3_phy_reset(tp);
1885                                 if (!err)
1886                                         err = tg3_init_5401phy_dsp(tp);
1887                                 if (err)
1888                                         return err;
1889                         }
1890                 }
1891         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1892                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1893                 /* 5701 {A0,B0} CRC bug workaround */
1894                 tg3_writephy(tp, 0x15, 0x0a75);
1895                 tg3_writephy(tp, 0x1c, 0x8c68);
1896                 tg3_writephy(tp, 0x1c, 0x8d68);
1897                 tg3_writephy(tp, 0x1c, 0x8c68);
1898         }
1899
1900         /* Clear pending interrupts... */
1901         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1902         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1903
1904         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1905                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1906         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1907                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1908
1909         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1910             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1911                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1912                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1913                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1914                 else
1915                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1916         }
1917
1918         current_link_up = 0;
1919         current_speed = SPEED_INVALID;
1920         current_duplex = DUPLEX_INVALID;
1921
1922         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1923                 u32 val;
1924
1925                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1926                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1927                 if (!(val & (1 << 10))) {
1928                         val |= (1 << 10);
1929                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1930                         goto relink;
1931                 }
1932         }
1933
1934         bmsr = 0;
1935         for (i = 0; i < 100; i++) {
1936                 tg3_readphy(tp, MII_BMSR, &bmsr);
1937                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1938                     (bmsr & BMSR_LSTATUS))
1939                         break;
1940                 udelay(40);
1941         }
1942
1943         if (bmsr & BMSR_LSTATUS) {
1944                 u32 aux_stat, bmcr;
1945
1946                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1947                 for (i = 0; i < 2000; i++) {
1948                         udelay(10);
1949                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1950                             aux_stat)
1951                                 break;
1952                 }
1953
1954                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1955                                              &current_speed,
1956                                              &current_duplex);
1957
1958                 bmcr = 0;
1959                 for (i = 0; i < 200; i++) {
1960                         tg3_readphy(tp, MII_BMCR, &bmcr);
1961                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1962                                 continue;
1963                         if (bmcr && bmcr != 0x7fff)
1964                                 break;
1965                         udelay(10);
1966                 }
1967
1968                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1969                         if (bmcr & BMCR_ANENABLE) {
1970                                 current_link_up = 1;
1971
1972                                 /* Force autoneg restart if we are exiting
1973                                  * low power mode.
1974                                  */
1975                                 if (!tg3_copper_is_advertising_all(tp,
1976                                                 tp->link_config.advertising))
1977                                         current_link_up = 0;
1978                         } else {
1979                                 current_link_up = 0;
1980                         }
1981                 } else {
1982                         if (!(bmcr & BMCR_ANENABLE) &&
1983                             tp->link_config.speed == current_speed &&
1984                             tp->link_config.duplex == current_duplex) {
1985                                 current_link_up = 1;
1986                         } else {
1987                                 current_link_up = 0;
1988                         }
1989                 }
1990
1991                 tp->link_config.active_speed = current_speed;
1992                 tp->link_config.active_duplex = current_duplex;
1993         }
1994
1995         if (current_link_up == 1 &&
1996             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1997             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1998                 u32 local_adv, remote_adv;
1999
2000                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2001                         local_adv = 0;
2002                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2003
2004                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2005                         remote_adv = 0;
2006
2007                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2008
2009                 /* If we are not advertising full pause capability,
2010                  * something is wrong.  Bring the link down and reconfigure.
2011                  */
2012                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2013                         current_link_up = 0;
2014                 } else {
2015                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2016                 }
2017         }
2018 relink:
2019         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2020                 u32 tmp;
2021
2022                 tg3_phy_copper_begin(tp);
2023
2024                 tg3_readphy(tp, MII_BMSR, &tmp);
2025                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2026                     (tmp & BMSR_LSTATUS))
2027                         current_link_up = 1;
2028         }
2029
2030         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2031         if (current_link_up == 1) {
2032                 if (tp->link_config.active_speed == SPEED_100 ||
2033                     tp->link_config.active_speed == SPEED_10)
2034                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2035                 else
2036                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2037         } else
2038                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039
2040         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2041         if (tp->link_config.active_duplex == DUPLEX_HALF)
2042                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2043
2044         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2045                 if (current_link_up == 1 &&
2046                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2047                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2048                 else
2049                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2050         }
2051
2052         /* ??? Without this setting Netgear GA302T PHY does not
2053          * ??? send/receive packets...
2054          */
2055         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2056             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2057                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2058                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2059                 udelay(80);
2060         }
2061
2062         tw32_f(MAC_MODE, tp->mac_mode);
2063         udelay(40);
2064
2065         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2066                 /* Polled via timer. */
2067                 tw32_f(MAC_EVENT, 0);
2068         } else {
2069                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2070         }
2071         udelay(40);
2072
2073         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2074             current_link_up == 1 &&
2075             tp->link_config.active_speed == SPEED_1000 &&
2076             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2077              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2078                 udelay(120);
2079                 tw32_f(MAC_STATUS,
2080                      (MAC_STATUS_SYNC_CHANGED |
2081                       MAC_STATUS_CFG_CHANGED));
2082                 udelay(40);
2083                 tg3_write_mem(tp,
2084                               NIC_SRAM_FIRMWARE_MBOX,
2085                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2086         }
2087
2088         if (current_link_up != netif_carrier_ok(tp->dev)) {
2089                 if (current_link_up)
2090                         netif_carrier_on(tp->dev);
2091                 else
2092                         netif_carrier_off(tp->dev);
2093                 tg3_link_report(tp);
2094         }
2095
2096         return 0;
2097 }
2098
2099 struct tg3_fiber_aneginfo {
2100         int state;
2101 #define ANEG_STATE_UNKNOWN              0
2102 #define ANEG_STATE_AN_ENABLE            1
2103 #define ANEG_STATE_RESTART_INIT         2
2104 #define ANEG_STATE_RESTART              3
2105 #define ANEG_STATE_DISABLE_LINK_OK      4
2106 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2107 #define ANEG_STATE_ABILITY_DETECT       6
2108 #define ANEG_STATE_ACK_DETECT_INIT      7
2109 #define ANEG_STATE_ACK_DETECT           8
2110 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2111 #define ANEG_STATE_COMPLETE_ACK         10
2112 #define ANEG_STATE_IDLE_DETECT_INIT     11
2113 #define ANEG_STATE_IDLE_DETECT          12
2114 #define ANEG_STATE_LINK_OK              13
2115 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2116 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2117
2118         u32 flags;
2119 #define MR_AN_ENABLE            0x00000001
2120 #define MR_RESTART_AN           0x00000002
2121 #define MR_AN_COMPLETE          0x00000004
2122 #define MR_PAGE_RX              0x00000008
2123 #define MR_NP_LOADED            0x00000010
2124 #define MR_TOGGLE_TX            0x00000020
2125 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2126 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2127 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2128 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2129 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2130 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2131 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2132 #define MR_TOGGLE_RX            0x00002000
2133 #define MR_NP_RX                0x00004000
2134
2135 #define MR_LINK_OK              0x80000000
2136
2137         unsigned long link_time, cur_time;
2138
2139         u32 ability_match_cfg;
2140         int ability_match_count;
2141
2142         char ability_match, idle_match, ack_match;
2143
2144         u32 txconfig, rxconfig;
2145 #define ANEG_CFG_NP             0x00000080
2146 #define ANEG_CFG_ACK            0x00000040
2147 #define ANEG_CFG_RF2            0x00000020
2148 #define ANEG_CFG_RF1            0x00000010
2149 #define ANEG_CFG_PS2            0x00000001
2150 #define ANEG_CFG_PS1            0x00008000
2151 #define ANEG_CFG_HD             0x00004000
2152 #define ANEG_CFG_FD             0x00002000
2153 #define ANEG_CFG_INVAL          0x00001f06
2154
2155 };
2156 #define ANEG_OK         0
2157 #define ANEG_DONE       1
2158 #define ANEG_TIMER_ENAB 2
2159 #define ANEG_FAILED     -1
2160
2161 #define ANEG_STATE_SETTLE_TIME  10000
2162
2163 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2164                                    struct tg3_fiber_aneginfo *ap)
2165 {
2166         unsigned long delta;
2167         u32 rx_cfg_reg;
2168         int ret;
2169
2170         if (ap->state == ANEG_STATE_UNKNOWN) {
2171                 ap->rxconfig = 0;
2172                 ap->link_time = 0;
2173                 ap->cur_time = 0;
2174                 ap->ability_match_cfg = 0;
2175                 ap->ability_match_count = 0;
2176                 ap->ability_match = 0;
2177                 ap->idle_match = 0;
2178                 ap->ack_match = 0;
2179         }
2180         ap->cur_time++;
2181
2182         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2183                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2184
2185                 if (rx_cfg_reg != ap->ability_match_cfg) {
2186                         ap->ability_match_cfg = rx_cfg_reg;
2187                         ap->ability_match = 0;
2188                         ap->ability_match_count = 0;
2189                 } else {
2190                         if (++ap->ability_match_count > 1) {
2191                                 ap->ability_match = 1;
2192                                 ap->ability_match_cfg = rx_cfg_reg;
2193                         }
2194                 }
2195                 if (rx_cfg_reg & ANEG_CFG_ACK)
2196                         ap->ack_match = 1;
2197                 else
2198                         ap->ack_match = 0;
2199
2200                 ap->idle_match = 0;
2201         } else {
2202                 ap->idle_match = 1;
2203                 ap->ability_match_cfg = 0;
2204                 ap->ability_match_count = 0;
2205                 ap->ability_match = 0;
2206                 ap->ack_match = 0;
2207
2208                 rx_cfg_reg = 0;
2209         }
2210
2211         ap->rxconfig = rx_cfg_reg;
2212         ret = ANEG_OK;
2213
2214         switch(ap->state) {
2215         case ANEG_STATE_UNKNOWN:
2216                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2217                         ap->state = ANEG_STATE_AN_ENABLE;
2218
2219                 /* fallthru */
2220         case ANEG_STATE_AN_ENABLE:
2221                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2222                 if (ap->flags & MR_AN_ENABLE) {
2223                         ap->link_time = 0;
2224                         ap->cur_time = 0;
2225                         ap->ability_match_cfg = 0;
2226                         ap->ability_match_count = 0;
2227                         ap->ability_match = 0;
2228                         ap->idle_match = 0;
2229                         ap->ack_match = 0;
2230
2231                         ap->state = ANEG_STATE_RESTART_INIT;
2232                 } else {
2233                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2234                 }
2235                 break;
2236
2237         case ANEG_STATE_RESTART_INIT:
2238                 ap->link_time = ap->cur_time;
2239                 ap->flags &= ~(MR_NP_LOADED);
2240                 ap->txconfig = 0;
2241                 tw32(MAC_TX_AUTO_NEG, 0);
2242                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2243                 tw32_f(MAC_MODE, tp->mac_mode);
2244                 udelay(40);
2245
2246                 ret = ANEG_TIMER_ENAB;
2247                 ap->state = ANEG_STATE_RESTART;
2248
2249                 /* fallthru */
2250         case ANEG_STATE_RESTART:
2251                 delta = ap->cur_time - ap->link_time;
2252                 if (delta > ANEG_STATE_SETTLE_TIME) {
2253                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2254                 } else {
2255                         ret = ANEG_TIMER_ENAB;
2256                 }
2257                 break;
2258
2259         case ANEG_STATE_DISABLE_LINK_OK:
2260                 ret = ANEG_DONE;
2261                 break;
2262
2263         case ANEG_STATE_ABILITY_DETECT_INIT:
2264                 ap->flags &= ~(MR_TOGGLE_TX);
2265                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2266                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2267                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2268                 tw32_f(MAC_MODE, tp->mac_mode);
2269                 udelay(40);
2270
2271                 ap->state = ANEG_STATE_ABILITY_DETECT;
2272                 break;
2273
2274         case ANEG_STATE_ABILITY_DETECT:
2275                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2276                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2277                 }
2278                 break;
2279
2280         case ANEG_STATE_ACK_DETECT_INIT:
2281                 ap->txconfig |= ANEG_CFG_ACK;
2282                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2283                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2284                 tw32_f(MAC_MODE, tp->mac_mode);
2285                 udelay(40);
2286
2287                 ap->state = ANEG_STATE_ACK_DETECT;
2288
2289                 /* fallthru */
2290         case ANEG_STATE_ACK_DETECT:
2291                 if (ap->ack_match != 0) {
2292                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2293                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2294                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2295                         } else {
2296                                 ap->state = ANEG_STATE_AN_ENABLE;
2297                         }
2298                 } else if (ap->ability_match != 0 &&
2299                            ap->rxconfig == 0) {
2300                         ap->state = ANEG_STATE_AN_ENABLE;
2301                 }
2302                 break;
2303
2304         case ANEG_STATE_COMPLETE_ACK_INIT:
2305                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2306                         ret = ANEG_FAILED;
2307                         break;
2308                 }
2309                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2310                                MR_LP_ADV_HALF_DUPLEX |
2311                                MR_LP_ADV_SYM_PAUSE |
2312                                MR_LP_ADV_ASYM_PAUSE |
2313                                MR_LP_ADV_REMOTE_FAULT1 |
2314                                MR_LP_ADV_REMOTE_FAULT2 |
2315                                MR_LP_ADV_NEXT_PAGE |
2316                                MR_TOGGLE_RX |
2317                                MR_NP_RX);
2318                 if (ap->rxconfig & ANEG_CFG_FD)
2319                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2320                 if (ap->rxconfig & ANEG_CFG_HD)
2321                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2322                 if (ap->rxconfig & ANEG_CFG_PS1)
2323                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2324                 if (ap->rxconfig & ANEG_CFG_PS2)
2325                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2326                 if (ap->rxconfig & ANEG_CFG_RF1)
2327                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2328                 if (ap->rxconfig & ANEG_CFG_RF2)
2329                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2330                 if (ap->rxconfig & ANEG_CFG_NP)
2331                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2332
2333                 ap->link_time = ap->cur_time;
2334
2335                 ap->flags ^= (MR_TOGGLE_TX);
2336                 if (ap->rxconfig & 0x0008)
2337                         ap->flags |= MR_TOGGLE_RX;
2338                 if (ap->rxconfig & ANEG_CFG_NP)
2339                         ap->flags |= MR_NP_RX;
2340                 ap->flags |= MR_PAGE_RX;
2341
2342                 ap->state = ANEG_STATE_COMPLETE_ACK;
2343                 ret = ANEG_TIMER_ENAB;
2344                 break;
2345
2346         case ANEG_STATE_COMPLETE_ACK:
2347                 if (ap->ability_match != 0 &&
2348                     ap->rxconfig == 0) {
2349                         ap->state = ANEG_STATE_AN_ENABLE;
2350                         break;
2351                 }
2352                 delta = ap->cur_time - ap->link_time;
2353                 if (delta > ANEG_STATE_SETTLE_TIME) {
2354                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2355                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2356                         } else {
2357                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2358                                     !(ap->flags & MR_NP_RX)) {
2359                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2360                                 } else {
2361                                         ret = ANEG_FAILED;
2362                                 }
2363                         }
2364                 }
2365                 break;
2366
2367         case ANEG_STATE_IDLE_DETECT_INIT:
2368                 ap->link_time = ap->cur_time;
2369                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2370                 tw32_f(MAC_MODE, tp->mac_mode);
2371                 udelay(40);
2372
2373                 ap->state = ANEG_STATE_IDLE_DETECT;
2374                 ret = ANEG_TIMER_ENAB;
2375                 break;
2376
2377         case ANEG_STATE_IDLE_DETECT:
2378                 if (ap->ability_match != 0 &&
2379                     ap->rxconfig == 0) {
2380                         ap->state = ANEG_STATE_AN_ENABLE;
2381                         break;
2382                 }
2383                 delta = ap->cur_time - ap->link_time;
2384                 if (delta > ANEG_STATE_SETTLE_TIME) {
2385                         /* XXX another gem from the Broadcom driver :( */
2386                         ap->state = ANEG_STATE_LINK_OK;
2387                 }
2388                 break;
2389
2390         case ANEG_STATE_LINK_OK:
2391                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2392                 ret = ANEG_DONE;
2393                 break;
2394
2395         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2396                 /* ??? unimplemented */
2397                 break;
2398
2399         case ANEG_STATE_NEXT_PAGE_WAIT:
2400                 /* ??? unimplemented */
2401                 break;
2402
2403         default:
2404                 ret = ANEG_FAILED;
2405                 break;
2406         };
2407
2408         return ret;
2409 }
2410
2411 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2412 {
2413         int res = 0;
2414         struct tg3_fiber_aneginfo aninfo;
2415         int status = ANEG_FAILED;
2416         unsigned int tick;
2417         u32 tmp;
2418
2419         tw32_f(MAC_TX_AUTO_NEG, 0);
2420
2421         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2422         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2423         udelay(40);
2424
2425         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2426         udelay(40);
2427
2428         memset(&aninfo, 0, sizeof(aninfo));
2429         aninfo.flags |= MR_AN_ENABLE;
2430         aninfo.state = ANEG_STATE_UNKNOWN;
2431         aninfo.cur_time = 0;
2432         tick = 0;
2433         while (++tick < 195000) {
2434                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2435                 if (status == ANEG_DONE || status == ANEG_FAILED)
2436                         break;
2437
2438                 udelay(1);
2439         }
2440
2441         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2442         tw32_f(MAC_MODE, tp->mac_mode);
2443         udelay(40);
2444
2445         *flags = aninfo.flags;
2446
2447         if (status == ANEG_DONE &&
2448             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2449                              MR_LP_ADV_FULL_DUPLEX)))
2450                 res = 1;
2451
2452         return res;
2453 }
2454
2455 static void tg3_init_bcm8002(struct tg3 *tp)
2456 {
2457         u32 mac_status = tr32(MAC_STATUS);
2458         int i;
2459
2460         /* Reset when initting first time or we have a link. */
2461         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2462             !(mac_status & MAC_STATUS_PCS_SYNCED))
2463                 return;
2464
2465         /* Set PLL lock range. */
2466         tg3_writephy(tp, 0x16, 0x8007);
2467
2468         /* SW reset */
2469         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2470
2471         /* Wait for reset to complete. */
2472         /* XXX schedule_timeout() ... */
2473         for (i = 0; i < 500; i++)
2474                 udelay(10);
2475
2476         /* Config mode; select PMA/Ch 1 regs. */
2477         tg3_writephy(tp, 0x10, 0x8411);
2478
2479         /* Enable auto-lock and comdet, select txclk for tx. */
2480         tg3_writephy(tp, 0x11, 0x0a10);
2481
2482         tg3_writephy(tp, 0x18, 0x00a0);
2483         tg3_writephy(tp, 0x16, 0x41ff);
2484
2485         /* Assert and deassert POR. */
2486         tg3_writephy(tp, 0x13, 0x0400);
2487         udelay(40);
2488         tg3_writephy(tp, 0x13, 0x0000);
2489
2490         tg3_writephy(tp, 0x11, 0x0a50);
2491         udelay(40);
2492         tg3_writephy(tp, 0x11, 0x0a10);
2493
2494         /* Wait for signal to stabilize */
2495         /* XXX schedule_timeout() ... */
2496         for (i = 0; i < 15000; i++)
2497                 udelay(10);
2498
2499         /* Deselect the channel register so we can read the PHYID
2500          * later.
2501          */
2502         tg3_writephy(tp, 0x10, 0x8011);
2503 }
2504
2505 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2506 {
2507         u32 sg_dig_ctrl, sg_dig_status;
2508         u32 serdes_cfg, expected_sg_dig_ctrl;
2509         int workaround, port_a;
2510         int current_link_up;
2511
2512         serdes_cfg = 0;
2513         expected_sg_dig_ctrl = 0;
2514         workaround = 0;
2515         port_a = 1;
2516         current_link_up = 0;
2517
2518         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2519             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2520                 workaround = 1;
2521                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2522                         port_a = 0;
2523
2524                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2525                 /* preserve bits 20-23 for voltage regulator */
2526                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2527         }
2528
2529         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2530
2531         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2532                 if (sg_dig_ctrl & (1 << 31)) {
2533                         if (workaround) {
2534                                 u32 val = serdes_cfg;
2535
2536                                 if (port_a)
2537                                         val |= 0xc010000;
2538                                 else
2539                                         val |= 0x4010000;
2540                                 tw32_f(MAC_SERDES_CFG, val);
2541                         }
2542                         tw32_f(SG_DIG_CTRL, 0x01388400);
2543                 }
2544                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2545                         tg3_setup_flow_control(tp, 0, 0);
2546                         current_link_up = 1;
2547                 }
2548                 goto out;
2549         }
2550
2551         /* Want auto-negotiation.  */
2552         expected_sg_dig_ctrl = 0x81388400;
2553
2554         /* Pause capability */
2555         expected_sg_dig_ctrl |= (1 << 11);
2556
2557         /* Asymettric pause */
2558         expected_sg_dig_ctrl |= (1 << 12);
2559
2560         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2561                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2562                     tp->serdes_counter &&
2563                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2564                                     MAC_STATUS_RCVD_CFG)) ==
2565                      MAC_STATUS_PCS_SYNCED)) {
2566                         tp->serdes_counter--;
2567                         current_link_up = 1;
2568                         goto out;
2569                 }
2570 restart_autoneg:
2571                 if (workaround)
2572                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2573                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2574                 udelay(5);
2575                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2576
2577                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2578                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2579         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2580                                  MAC_STATUS_SIGNAL_DET)) {
2581                 sg_dig_status = tr32(SG_DIG_STATUS);
2582                 mac_status = tr32(MAC_STATUS);
2583
2584                 if ((sg_dig_status & (1 << 1)) &&
2585                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2586                         u32 local_adv, remote_adv;
2587
2588                         local_adv = ADVERTISE_PAUSE_CAP;
2589                         remote_adv = 0;
2590                         if (sg_dig_status & (1 << 19))
2591                                 remote_adv |= LPA_PAUSE_CAP;
2592                         if (sg_dig_status & (1 << 20))
2593                                 remote_adv |= LPA_PAUSE_ASYM;
2594
2595                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2596                         current_link_up = 1;
2597                         tp->serdes_counter = 0;
2598                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2599                 } else if (!(sg_dig_status & (1 << 1))) {
2600                         if (tp->serdes_counter)
2601                                 tp->serdes_counter--;
2602                         else {
2603                                 if (workaround) {
2604                                         u32 val = serdes_cfg;
2605
2606                                         if (port_a)
2607                                                 val |= 0xc010000;
2608                                         else
2609                                                 val |= 0x4010000;
2610
2611                                         tw32_f(MAC_SERDES_CFG, val);
2612                                 }
2613
2614                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2615                                 udelay(40);
2616
2617                                 /* Link parallel detection - link is up */
2618                                 /* only if we have PCS_SYNC and not */
2619                                 /* receiving config code words */
2620                                 mac_status = tr32(MAC_STATUS);
2621                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2622                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2623                                         tg3_setup_flow_control(tp, 0, 0);
2624                                         current_link_up = 1;
2625                                         tp->tg3_flags2 |=
2626                                                 TG3_FLG2_PARALLEL_DETECT;
2627                                         tp->serdes_counter =
2628                                                 SERDES_PARALLEL_DET_TIMEOUT;
2629                                 } else
2630                                         goto restart_autoneg;
2631                         }
2632                 }
2633         } else {
2634                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2635                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2636         }
2637
2638 out:
2639         return current_link_up;
2640 }
2641
2642 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2643 {
2644         int current_link_up = 0;
2645
2646         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2647                 goto out;
2648
2649         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2650                 u32 flags;
2651                 int i;
2652
2653                 if (fiber_autoneg(tp, &flags)) {
2654                         u32 local_adv, remote_adv;
2655
2656                         local_adv = ADVERTISE_PAUSE_CAP;
2657                         remote_adv = 0;
2658                         if (flags & MR_LP_ADV_SYM_PAUSE)
2659                                 remote_adv |= LPA_PAUSE_CAP;
2660                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2661                                 remote_adv |= LPA_PAUSE_ASYM;
2662
2663                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2664
2665                         current_link_up = 1;
2666                 }
2667                 for (i = 0; i < 30; i++) {
2668                         udelay(20);
2669                         tw32_f(MAC_STATUS,
2670                                (MAC_STATUS_SYNC_CHANGED |
2671                                 MAC_STATUS_CFG_CHANGED));
2672                         udelay(40);
2673                         if ((tr32(MAC_STATUS) &
2674                              (MAC_STATUS_SYNC_CHANGED |
2675                               MAC_STATUS_CFG_CHANGED)) == 0)
2676                                 break;
2677                 }
2678
2679                 mac_status = tr32(MAC_STATUS);
2680                 if (current_link_up == 0 &&
2681                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2682                     !(mac_status & MAC_STATUS_RCVD_CFG))
2683                         current_link_up = 1;
2684         } else {
2685                 /* Forcing 1000FD link up. */
2686                 current_link_up = 1;
2687
2688                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2689                 udelay(40);
2690
2691                 tw32_f(MAC_MODE, tp->mac_mode);
2692                 udelay(40);
2693         }
2694
2695 out:
2696         return current_link_up;
2697 }
2698
2699 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2700 {
2701         u32 orig_pause_cfg;
2702         u16 orig_active_speed;
2703         u8 orig_active_duplex;
2704         u32 mac_status;
2705         int current_link_up;
2706         int i;
2707
2708         orig_pause_cfg =
2709                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2710                                   TG3_FLAG_TX_PAUSE));
2711         orig_active_speed = tp->link_config.active_speed;
2712         orig_active_duplex = tp->link_config.active_duplex;
2713
2714         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2715             netif_carrier_ok(tp->dev) &&
2716             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2717                 mac_status = tr32(MAC_STATUS);
2718                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2719                                MAC_STATUS_SIGNAL_DET |
2720                                MAC_STATUS_CFG_CHANGED |
2721                                MAC_STATUS_RCVD_CFG);
2722                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2723                                    MAC_STATUS_SIGNAL_DET)) {
2724                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2725                                             MAC_STATUS_CFG_CHANGED));
2726                         return 0;
2727                 }
2728         }
2729
2730         tw32_f(MAC_TX_AUTO_NEG, 0);
2731
2732         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2733         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2734         tw32_f(MAC_MODE, tp->mac_mode);
2735         udelay(40);
2736
2737         if (tp->phy_id == PHY_ID_BCM8002)
2738                 tg3_init_bcm8002(tp);
2739
2740         /* Enable link change event even when serdes polling.  */
2741         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2742         udelay(40);
2743
2744         current_link_up = 0;
2745         mac_status = tr32(MAC_STATUS);
2746
2747         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2748                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2749         else
2750                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2751
2752         tp->hw_status->status =
2753                 (SD_STATUS_UPDATED |
2754                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2755
2756         for (i = 0; i < 100; i++) {
2757                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2758                                     MAC_STATUS_CFG_CHANGED));
2759                 udelay(5);
2760                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2761                                          MAC_STATUS_CFG_CHANGED |
2762                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2763                         break;
2764         }
2765
2766         mac_status = tr32(MAC_STATUS);
2767         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2768                 current_link_up = 0;
2769                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2770                     tp->serdes_counter == 0) {
2771                         tw32_f(MAC_MODE, (tp->mac_mode |
2772                                           MAC_MODE_SEND_CONFIGS));
2773                         udelay(1);
2774                         tw32_f(MAC_MODE, tp->mac_mode);
2775                 }
2776         }
2777
2778         if (current_link_up == 1) {
2779                 tp->link_config.active_speed = SPEED_1000;
2780                 tp->link_config.active_duplex = DUPLEX_FULL;
2781                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2782                                     LED_CTRL_LNKLED_OVERRIDE |
2783                                     LED_CTRL_1000MBPS_ON));
2784         } else {
2785                 tp->link_config.active_speed = SPEED_INVALID;
2786                 tp->link_config.active_duplex = DUPLEX_INVALID;
2787                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2788                                     LED_CTRL_LNKLED_OVERRIDE |
2789                                     LED_CTRL_TRAFFIC_OVERRIDE));
2790         }
2791
2792         if (current_link_up != netif_carrier_ok(tp->dev)) {
2793                 if (current_link_up)
2794                         netif_carrier_on(tp->dev);
2795                 else
2796                         netif_carrier_off(tp->dev);
2797                 tg3_link_report(tp);
2798         } else {
2799                 u32 now_pause_cfg =
2800                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2801                                          TG3_FLAG_TX_PAUSE);
2802                 if (orig_pause_cfg != now_pause_cfg ||
2803                     orig_active_speed != tp->link_config.active_speed ||
2804                     orig_active_duplex != tp->link_config.active_duplex)
2805                         tg3_link_report(tp);
2806         }
2807
2808         return 0;
2809 }
2810
2811 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2812 {
2813         int current_link_up, err = 0;
2814         u32 bmsr, bmcr;
2815         u16 current_speed;
2816         u8 current_duplex;
2817
2818         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2819         tw32_f(MAC_MODE, tp->mac_mode);
2820         udelay(40);
2821
2822         tw32(MAC_EVENT, 0);
2823
2824         tw32_f(MAC_STATUS,
2825              (MAC_STATUS_SYNC_CHANGED |
2826               MAC_STATUS_CFG_CHANGED |
2827               MAC_STATUS_MI_COMPLETION |
2828               MAC_STATUS_LNKSTATE_CHANGED));
2829         udelay(40);
2830
2831         if (force_reset)
2832                 tg3_phy_reset(tp);
2833
2834         current_link_up = 0;
2835         current_speed = SPEED_INVALID;
2836         current_duplex = DUPLEX_INVALID;
2837
2838         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2839         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2840         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2841                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2842                         bmsr |= BMSR_LSTATUS;
2843                 else
2844                         bmsr &= ~BMSR_LSTATUS;
2845         }
2846
2847         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2848
2849         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2850             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2851                 /* do nothing, just check for link up at the end */
2852         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2853                 u32 adv, new_adv;
2854
2855                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2856                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2857                                   ADVERTISE_1000XPAUSE |
2858                                   ADVERTISE_1000XPSE_ASYM |
2859                                   ADVERTISE_SLCT);
2860
2861                 /* Always advertise symmetric PAUSE just like copper */
2862                 new_adv |= ADVERTISE_1000XPAUSE;
2863
2864                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2865                         new_adv |= ADVERTISE_1000XHALF;
2866                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2867                         new_adv |= ADVERTISE_1000XFULL;
2868
2869                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2870                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2871                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2872                         tg3_writephy(tp, MII_BMCR, bmcr);
2873
2874                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2875                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2876                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2877
2878                         return err;
2879                 }
2880         } else {
2881                 u32 new_bmcr;
2882
2883                 bmcr &= ~BMCR_SPEED1000;
2884                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2885
2886                 if (tp->link_config.duplex == DUPLEX_FULL)
2887                         new_bmcr |= BMCR_FULLDPLX;
2888
2889                 if (new_bmcr != bmcr) {
2890                         /* BMCR_SPEED1000 is a reserved bit that needs
2891                          * to be set on write.
2892                          */
2893                         new_bmcr |= BMCR_SPEED1000;
2894
2895                         /* Force a linkdown */
2896                         if (netif_carrier_ok(tp->dev)) {
2897                                 u32 adv;
2898
2899                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2900                                 adv &= ~(ADVERTISE_1000XFULL |
2901                                          ADVERTISE_1000XHALF |
2902                                          ADVERTISE_SLCT);
2903                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2904                                 tg3_writephy(tp, MII_BMCR, bmcr |
2905                                                            BMCR_ANRESTART |
2906                                                            BMCR_ANENABLE);
2907                                 udelay(10);
2908                                 netif_carrier_off(tp->dev);
2909                         }
2910                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2911                         bmcr = new_bmcr;
2912                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2913                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2914                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2915                             ASIC_REV_5714) {
2916                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2917                                         bmsr |= BMSR_LSTATUS;
2918                                 else
2919                                         bmsr &= ~BMSR_LSTATUS;
2920                         }
2921                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2922                 }
2923         }
2924
2925         if (bmsr & BMSR_LSTATUS) {
2926                 current_speed = SPEED_1000;
2927                 current_link_up = 1;
2928                 if (bmcr & BMCR_FULLDPLX)
2929                         current_duplex = DUPLEX_FULL;
2930                 else
2931                         current_duplex = DUPLEX_HALF;
2932
2933                 if (bmcr & BMCR_ANENABLE) {
2934                         u32 local_adv, remote_adv, common;
2935
2936                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2937                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2938                         common = local_adv & remote_adv;
2939                         if (common & (ADVERTISE_1000XHALF |
2940                                       ADVERTISE_1000XFULL)) {
2941                                 if (common & ADVERTISE_1000XFULL)
2942                                         current_duplex = DUPLEX_FULL;
2943                                 else
2944                                         current_duplex = DUPLEX_HALF;
2945
2946                                 tg3_setup_flow_control(tp, local_adv,
2947                                                        remote_adv);
2948                         }
2949                         else
2950                                 current_link_up = 0;
2951                 }
2952         }
2953
2954         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2955         if (tp->link_config.active_duplex == DUPLEX_HALF)
2956                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2957
2958         tw32_f(MAC_MODE, tp->mac_mode);
2959         udelay(40);
2960
2961         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2962
2963         tp->link_config.active_speed = current_speed;
2964         tp->link_config.active_duplex = current_duplex;
2965
2966         if (current_link_up != netif_carrier_ok(tp->dev)) {
2967                 if (current_link_up)
2968                         netif_carrier_on(tp->dev);
2969                 else {
2970                         netif_carrier_off(tp->dev);
2971                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2972                 }
2973                 tg3_link_report(tp);
2974         }
2975         return err;
2976 }
2977
2978 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2979 {
2980         if (tp->serdes_counter) {
2981                 /* Give autoneg time to complete. */
2982                 tp->serdes_counter--;
2983                 return;
2984         }
2985         if (!netif_carrier_ok(tp->dev) &&
2986             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2987                 u32 bmcr;
2988
2989                 tg3_readphy(tp, MII_BMCR, &bmcr);
2990                 if (bmcr & BMCR_ANENABLE) {
2991                         u32 phy1, phy2;
2992
2993                         /* Select shadow register 0x1f */
2994                         tg3_writephy(tp, 0x1c, 0x7c00);
2995                         tg3_readphy(tp, 0x1c, &phy1);
2996
2997                         /* Select expansion interrupt status register */
2998                         tg3_writephy(tp, 0x17, 0x0f01);
2999                         tg3_readphy(tp, 0x15, &phy2);
3000                         tg3_readphy(tp, 0x15, &phy2);
3001
3002                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3003                                 /* We have signal detect and not receiving
3004                                  * config code words, link is up by parallel
3005                                  * detection.
3006                                  */
3007
3008                                 bmcr &= ~BMCR_ANENABLE;
3009                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3010                                 tg3_writephy(tp, MII_BMCR, bmcr);
3011                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3012                         }
3013                 }
3014         }
3015         else if (netif_carrier_ok(tp->dev) &&
3016                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3017                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3018                 u32 phy2;
3019
3020                 /* Select expansion interrupt status register */
3021                 tg3_writephy(tp, 0x17, 0x0f01);
3022                 tg3_readphy(tp, 0x15, &phy2);
3023                 if (phy2 & 0x20) {
3024                         u32 bmcr;
3025
3026                         /* Config code words received, turn on autoneg. */
3027                         tg3_readphy(tp, MII_BMCR, &bmcr);
3028                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3029
3030                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3031
3032                 }
3033         }
3034 }
3035
3036 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3037 {
3038         int err;
3039
3040         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3041                 err = tg3_setup_fiber_phy(tp, force_reset);
3042         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3043                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3044         } else {
3045                 err = tg3_setup_copper_phy(tp, force_reset);
3046         }
3047
3048         if (tp->link_config.active_speed == SPEED_1000 &&
3049             tp->link_config.active_duplex == DUPLEX_HALF)
3050                 tw32(MAC_TX_LENGTHS,
3051                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3052                       (6 << TX_LENGTHS_IPG_SHIFT) |
3053                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3054         else
3055                 tw32(MAC_TX_LENGTHS,
3056                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3057                       (6 << TX_LENGTHS_IPG_SHIFT) |
3058                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3059
3060         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3061                 if (netif_carrier_ok(tp->dev)) {
3062                         tw32(HOSTCC_STAT_COAL_TICKS,
3063                              tp->coal.stats_block_coalesce_usecs);
3064                 } else {
3065                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3066                 }
3067         }
3068
3069         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3070                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3071                 if (!netif_carrier_ok(tp->dev))
3072                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3073                               tp->pwrmgmt_thresh;
3074                 else
3075                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3076                 tw32(PCIE_PWR_MGMT_THRESH, val);
3077         }
3078
3079         return err;
3080 }
3081
3082 /* This is called whenever we suspect that the system chipset is re-
3083  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3084  * is bogus tx completions. We try to recover by setting the
3085  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3086  * in the workqueue.
3087  */
3088 static void tg3_tx_recover(struct tg3 *tp)
3089 {
3090         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3091                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3092
3093         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3094                "mapped I/O cycles to the network device, attempting to "
3095                "recover. Please report the problem to the driver maintainer "
3096                "and include system chipset information.\n", tp->dev->name);
3097
3098         spin_lock(&tp->lock);
3099         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3100         spin_unlock(&tp->lock);
3101 }
3102
3103 static inline u32 tg3_tx_avail(struct tg3 *tp)
3104 {
3105         smp_mb();
3106         return (tp->tx_pending -
3107                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3108 }
3109
3110 /* Tigon3 never reports partial packet sends.  So we do not
3111  * need special logic to handle SKBs that have not had all
3112  * of their frags sent yet, like SunGEM does.
3113  */
3114 static void tg3_tx(struct tg3 *tp)
3115 {
3116         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3117         u32 sw_idx = tp->tx_cons;
3118
3119         while (sw_idx != hw_idx) {
3120                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3121                 struct sk_buff *skb = ri->skb;
3122                 int i, tx_bug = 0;
3123
3124                 if (unlikely(skb == NULL)) {
3125                         tg3_tx_recover(tp);
3126                         return;
3127                 }
3128
3129                 pci_unmap_single(tp->pdev,
3130                                  pci_unmap_addr(ri, mapping),
3131                                  skb_headlen(skb),
3132                                  PCI_DMA_TODEVICE);
3133
3134                 ri->skb = NULL;
3135
3136                 sw_idx = NEXT_TX(sw_idx);
3137
3138                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3139                         ri = &tp->tx_buffers[sw_idx];
3140                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3141                                 tx_bug = 1;
3142
3143                         pci_unmap_page(tp->pdev,
3144                                        pci_unmap_addr(ri, mapping),
3145                                        skb_shinfo(skb)->frags[i].size,
3146                                        PCI_DMA_TODEVICE);
3147
3148                         sw_idx = NEXT_TX(sw_idx);
3149                 }
3150
3151                 dev_kfree_skb(skb);
3152
3153                 if (unlikely(tx_bug)) {
3154                         tg3_tx_recover(tp);
3155                         return;
3156                 }
3157         }
3158
3159         tp->tx_cons = sw_idx;
3160
3161         /* Need to make the tx_cons update visible to tg3_start_xmit()
3162          * before checking for netif_queue_stopped().  Without the
3163          * memory barrier, there is a small possibility that tg3_start_xmit()
3164          * will miss it and cause the queue to be stopped forever.
3165          */
3166         smp_mb();
3167
3168         if (unlikely(netif_queue_stopped(tp->dev) &&
3169                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3170                 netif_tx_lock(tp->dev);
3171                 if (netif_queue_stopped(tp->dev) &&
3172                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3173                         netif_wake_queue(tp->dev);
3174                 netif_tx_unlock(tp->dev);
3175         }
3176 }
3177
3178 /* Returns size of skb allocated or < 0 on error.
3179  *
3180  * We only need to fill in the address because the other members
3181  * of the RX descriptor are invariant, see tg3_init_rings.
3182  *
3183  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3184  * posting buffers we only dirty the first cache line of the RX
3185  * descriptor (containing the address).  Whereas for the RX status
3186  * buffers the cpu only reads the last cacheline of the RX descriptor
3187  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3188  */
3189 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3190                             int src_idx, u32 dest_idx_unmasked)
3191 {
3192         struct tg3_rx_buffer_desc *desc;
3193         struct ring_info *map, *src_map;
3194         struct sk_buff *skb;
3195         dma_addr_t mapping;
3196         int skb_size, dest_idx;
3197
3198         src_map = NULL;
3199         switch (opaque_key) {
3200         case RXD_OPAQUE_RING_STD:
3201                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3202                 desc = &tp->rx_std[dest_idx];
3203                 map = &tp->rx_std_buffers[dest_idx];
3204                 if (src_idx >= 0)
3205                         src_map = &tp->rx_std_buffers[src_idx];
3206                 skb_size = tp->rx_pkt_buf_sz;
3207                 break;
3208
3209         case RXD_OPAQUE_RING_JUMBO:
3210                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3211                 desc = &tp->rx_jumbo[dest_idx];
3212                 map = &tp->rx_jumbo_buffers[dest_idx];
3213                 if (src_idx >= 0)
3214                         src_map = &tp->rx_jumbo_buffers[src_idx];
3215                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3216                 break;
3217
3218         default:
3219                 return -EINVAL;
3220         };
3221
3222         /* Do not overwrite any of the map or rp information
3223          * until we are sure we can commit to a new buffer.
3224          *
3225          * Callers depend upon this behavior and assume that
3226          * we leave everything unchanged if we fail.
3227          */
3228         skb = netdev_alloc_skb(tp->dev, skb_size);
3229         if (skb == NULL)
3230                 return -ENOMEM;
3231
3232         skb_reserve(skb, tp->rx_offset);
3233
3234         mapping = pci_map_single(tp->pdev, skb->data,
3235                                  skb_size - tp->rx_offset,
3236                                  PCI_DMA_FROMDEVICE);
3237
3238         map->skb = skb;
3239         pci_unmap_addr_set(map, mapping, mapping);
3240
3241         if (src_map != NULL)
3242                 src_map->skb = NULL;
3243
3244         desc->addr_hi = ((u64)mapping >> 32);
3245         desc->addr_lo = ((u64)mapping & 0xffffffff);
3246
3247         return skb_size;
3248 }
3249
3250 /* We only need to move over in the address because the other
3251  * members of the RX descriptor are invariant.  See notes above
3252  * tg3_alloc_rx_skb for full details.
3253  */
3254 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3255                            int src_idx, u32 dest_idx_unmasked)
3256 {
3257         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3258         struct ring_info *src_map, *dest_map;
3259         int dest_idx;
3260
3261         switch (opaque_key) {
3262         case RXD_OPAQUE_RING_STD:
3263                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3264                 dest_desc = &tp->rx_std[dest_idx];
3265                 dest_map = &tp->rx_std_buffers[dest_idx];
3266                 src_desc = &tp->rx_std[src_idx];
3267                 src_map = &tp->rx_std_buffers[src_idx];
3268                 break;
3269
3270         case RXD_OPAQUE_RING_JUMBO:
3271                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3272                 dest_desc = &tp->rx_jumbo[dest_idx];
3273                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3274                 src_desc = &tp->rx_jumbo[src_idx];
3275                 src_map = &tp->rx_jumbo_buffers[src_idx];
3276                 break;
3277
3278         default:
3279                 return;
3280         };
3281
3282         dest_map->skb = src_map->skb;
3283         pci_unmap_addr_set(dest_map, mapping,
3284                            pci_unmap_addr(src_map, mapping));
3285         dest_desc->addr_hi = src_desc->addr_hi;
3286         dest_desc->addr_lo = src_desc->addr_lo;
3287
3288         src_map->skb = NULL;
3289 }
3290
3291 #if TG3_VLAN_TAG_USED
3292 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3293 {
3294         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3295 }
3296 #endif
3297
3298 /* The RX ring scheme is composed of multiple rings which post fresh
3299  * buffers to the chip, and one special ring the chip uses to report
3300  * status back to the host.
3301  *
3302  * The special ring reports the status of received packets to the
3303  * host.  The chip does not write into the original descriptor the
3304  * RX buffer was obtained from.  The chip simply takes the original
3305  * descriptor as provided by the host, updates the status and length
3306  * field, then writes this into the next status ring entry.
3307  *
3308  * Each ring the host uses to post buffers to the chip is described
3309  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3310  * it is first placed into the on-chip ram.  When the packet's length
3311  * is known, it walks down the TG3_BDINFO entries to select the ring.
3312  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3313  * which is within the range of the new packet's length is chosen.
3314  *
3315  * The "separate ring for rx status" scheme may sound queer, but it makes
3316  * sense from a cache coherency perspective.  If only the host writes
3317  * to the buffer post rings, and only the chip writes to the rx status
3318  * rings, then cache lines never move beyond shared-modified state.
3319  * If both the host and chip were to write into the same ring, cache line
3320  * eviction could occur since both entities want it in an exclusive state.
3321  */
3322 static int tg3_rx(struct tg3 *tp, int budget)
3323 {
3324         u32 work_mask, rx_std_posted = 0;
3325         u32 sw_idx = tp->rx_rcb_ptr;
3326         u16 hw_idx;
3327         int received;
3328
3329         hw_idx = tp->hw_status->idx[0].rx_producer;
3330         /*
3331          * We need to order the read of hw_idx and the read of
3332          * the opaque cookie.
3333          */
3334         rmb();
3335         work_mask = 0;
3336         received = 0;
3337         while (sw_idx != hw_idx && budget > 0) {
3338                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3339                 unsigned int len;
3340                 struct sk_buff *skb;
3341                 dma_addr_t dma_addr;
3342                 u32 opaque_key, desc_idx, *post_ptr;
3343
3344                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3345                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3346                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3347                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3348                                                   mapping);
3349                         skb = tp->rx_std_buffers[desc_idx].skb;
3350                         post_ptr = &tp->rx_std_ptr;
3351                         rx_std_posted++;
3352                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3353                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3354                                                   mapping);
3355                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3356                         post_ptr = &tp->rx_jumbo_ptr;
3357                 }
3358                 else {
3359                         goto next_pkt_nopost;
3360                 }
3361
3362                 work_mask |= opaque_key;
3363
3364                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3365                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3366                 drop_it:
3367                         tg3_recycle_rx(tp, opaque_key,
3368                                        desc_idx, *post_ptr);
3369                 drop_it_no_recycle:
3370                         /* Other statistics kept track of by card. */
3371                         tp->net_stats.rx_dropped++;
3372                         goto next_pkt;
3373                 }
3374
3375                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3376
3377                 if (len > RX_COPY_THRESHOLD
3378                         && tp->rx_offset == 2
3379                         /* rx_offset != 2 iff this is a 5701 card running
3380                          * in PCI-X mode [see tg3_get_invariants()] */
3381                 ) {
3382                         int skb_size;
3383
3384                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3385                                                     desc_idx, *post_ptr);
3386                         if (skb_size < 0)
3387                                 goto drop_it;
3388
3389                         pci_unmap_single(tp->pdev, dma_addr,
3390                                          skb_size - tp->rx_offset,
3391                                          PCI_DMA_FROMDEVICE);
3392
3393                         skb_put(skb, len);
3394                 } else {
3395                         struct sk_buff *copy_skb;
3396
3397                         tg3_recycle_rx(tp, opaque_key,
3398                                        desc_idx, *post_ptr);
3399
3400                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3401                         if (copy_skb == NULL)
3402                                 goto drop_it_no_recycle;
3403
3404                         skb_reserve(copy_skb, 2);
3405                         skb_put(copy_skb, len);
3406                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3407                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3408                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3409
3410                         /* We'll reuse the original ring buffer. */
3411                         skb = copy_skb;
3412                 }
3413
3414                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3415                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3416                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3417                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3418                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3419                 else
3420                         skb->ip_summed = CHECKSUM_NONE;
3421
3422                 skb->protocol = eth_type_trans(skb, tp->dev);
3423 #if TG3_VLAN_TAG_USED
3424                 if (tp->vlgrp != NULL &&
3425                     desc->type_flags & RXD_FLAG_VLAN) {
3426                         tg3_vlan_rx(tp, skb,
3427                                     desc->err_vlan & RXD_VLAN_MASK);
3428                 } else
3429 #endif
3430                         netif_receive_skb(skb);
3431
3432                 tp->dev->last_rx = jiffies;
3433                 received++;
3434                 budget--;
3435
3436 next_pkt:
3437                 (*post_ptr)++;
3438
3439                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3440                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3441
3442                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3443                                      TG3_64BIT_REG_LOW, idx);
3444                         work_mask &= ~RXD_OPAQUE_RING_STD;
3445                         rx_std_posted = 0;
3446                 }
3447 next_pkt_nopost:
3448                 sw_idx++;
3449                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3450
3451                 /* Refresh hw_idx to see if there is new work */
3452                 if (sw_idx == hw_idx) {
3453                         hw_idx = tp->hw_status->idx[0].rx_producer;
3454                         rmb();
3455                 }
3456         }
3457
3458         /* ACK the status ring. */
3459         tp->rx_rcb_ptr = sw_idx;
3460         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3461
3462         /* Refill RX ring(s). */
3463         if (work_mask & RXD_OPAQUE_RING_STD) {
3464                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3465                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3466                              sw_idx);
3467         }
3468         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3469                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3470                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3471                              sw_idx);
3472         }
3473         mmiowb();
3474
3475         return received;
3476 }
3477
3478 static int tg3_poll(struct napi_struct *napi, int budget)
3479 {
3480         struct tg3 *tp = container_of(napi, struct tg3, napi);
3481         struct net_device *netdev = tp->dev;
3482         struct tg3_hw_status *sblk = tp->hw_status;
3483         int work_done = 0;
3484
3485         /* handle link change and other phy events */
3486         if (!(tp->tg3_flags &
3487               (TG3_FLAG_USE_LINKCHG_REG |
3488                TG3_FLAG_POLL_SERDES))) {
3489                 if (sblk->status & SD_STATUS_LINK_CHG) {
3490                         sblk->status = SD_STATUS_UPDATED |
3491                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3492                         spin_lock(&tp->lock);
3493                         tg3_setup_phy(tp, 0);
3494                         spin_unlock(&tp->lock);
3495                 }
3496         }
3497
3498         /* run TX completion thread */
3499         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3500                 tg3_tx(tp);
3501                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3502                         netif_rx_complete(netdev, napi);
3503                         schedule_work(&tp->reset_task);
3504                         return 0;
3505                 }
3506         }
3507
3508         /* run RX thread, within the bounds set by NAPI.
3509          * All RX "locking" is done by ensuring outside
3510          * code synchronizes with tg3->napi.poll()
3511          */
3512         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3513                 work_done = tg3_rx(tp, budget);
3514
3515         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3516                 tp->last_tag = sblk->status_tag;
3517                 rmb();
3518         } else
3519                 sblk->status &= ~SD_STATUS_UPDATED;
3520
3521         /* if no more work, tell net stack and NIC we're done */
3522         if (!tg3_has_work(tp)) {
3523                 netif_rx_complete(netdev, napi);
3524                 tg3_restart_ints(tp);
3525         }
3526
3527         return work_done;
3528 }
3529
3530 static void tg3_irq_quiesce(struct tg3 *tp)
3531 {
3532         BUG_ON(tp->irq_sync);
3533
3534         tp->irq_sync = 1;
3535         smp_mb();
3536
3537         synchronize_irq(tp->pdev->irq);
3538 }
3539
3540 static inline int tg3_irq_sync(struct tg3 *tp)
3541 {
3542         return tp->irq_sync;
3543 }
3544
3545 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3546  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3547  * with as well.  Most of the time, this is not necessary except when
3548  * shutting down the device.
3549  */
3550 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3551 {
3552         spin_lock_bh(&tp->lock);
3553         if (irq_sync)
3554                 tg3_irq_quiesce(tp);
3555 }
3556
3557 static inline void tg3_full_unlock(struct tg3 *tp)
3558 {
3559         spin_unlock_bh(&tp->lock);
3560 }
3561
3562 /* One-shot MSI handler - Chip automatically disables interrupt
3563  * after sending MSI so driver doesn't have to do it.
3564  */
3565 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3566 {
3567         struct net_device *dev = dev_id;
3568         struct tg3 *tp = netdev_priv(dev);
3569
3570         prefetch(tp->hw_status);
3571         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3572
3573         if (likely(!tg3_irq_sync(tp)))
3574                 netif_rx_schedule(dev, &tp->napi);
3575
3576         return IRQ_HANDLED;
3577 }
3578
3579 /* MSI ISR - No need to check for interrupt sharing and no need to
3580  * flush status block and interrupt mailbox. PCI ordering rules
3581  * guarantee that MSI will arrive after the status block.
3582  */
3583 static irqreturn_t tg3_msi(int irq, void *dev_id)
3584 {
3585         struct net_device *dev = dev_id;
3586         struct tg3 *tp = netdev_priv(dev);
3587
3588         prefetch(tp->hw_status);
3589         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3590         /*
3591          * Writing any value to intr-mbox-0 clears PCI INTA# and
3592          * chip-internal interrupt pending events.
3593          * Writing non-zero to intr-mbox-0 additional tells the
3594          * NIC to stop sending us irqs, engaging "in-intr-handler"
3595          * event coalescing.
3596          */
3597         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3598         if (likely(!tg3_irq_sync(tp)))
3599                 netif_rx_schedule(dev, &tp->napi);
3600
3601         return IRQ_RETVAL(1);
3602 }
3603
3604 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3605 {
3606         struct net_device *dev = dev_id;
3607         struct tg3 *tp = netdev_priv(dev);
3608         struct tg3_hw_status *sblk = tp->hw_status;
3609         unsigned int handled = 1;
3610
3611         /* In INTx mode, it is possible for the interrupt to arrive at
3612          * the CPU before the status block posted prior to the interrupt.
3613          * Reading the PCI State register will confirm whether the
3614          * interrupt is ours and will flush the status block.
3615          */
3616         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3617                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3618                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3619                         handled = 0;
3620                         goto out;
3621                 }
3622         }
3623
3624         /*
3625          * Writing any value to intr-mbox-0 clears PCI INTA# and
3626          * chip-internal interrupt pending events.
3627          * Writing non-zero to intr-mbox-0 additional tells the
3628          * NIC to stop sending us irqs, engaging "in-intr-handler"
3629          * event coalescing.
3630          *
3631          * Flush the mailbox to de-assert the IRQ immediately to prevent
3632          * spurious interrupts.  The flush impacts performance but
3633          * excessive spurious interrupts can be worse in some cases.
3634          */
3635         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3636         if (tg3_irq_sync(tp))
3637                 goto out;
3638         sblk->status &= ~SD_STATUS_UPDATED;
3639         if (likely(tg3_has_work(tp))) {
3640                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3641                 netif_rx_schedule(dev, &tp->napi);
3642         } else {
3643                 /* No work, shared interrupt perhaps?  re-enable
3644                  * interrupts, and flush that PCI write
3645                  */
3646                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3647                                0x00000000);
3648         }
3649 out:
3650         return IRQ_RETVAL(handled);
3651 }
3652
3653 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3654 {
3655         struct net_device *dev = dev_id;
3656         struct tg3 *tp = netdev_priv(dev);
3657         struct tg3_hw_status *sblk = tp->hw_status;
3658         unsigned int handled = 1;
3659
3660         /* In INTx mode, it is possible for the interrupt to arrive at
3661          * the CPU before the status block posted prior to the interrupt.
3662          * Reading the PCI State register will confirm whether the
3663          * interrupt is ours and will flush the status block.
3664          */
3665         if (unlikely(sblk->status_tag == tp->last_tag)) {
3666                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3667                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3668                         handled = 0;
3669                         goto out;
3670                 }
3671         }
3672
3673         /*
3674          * writing any value to intr-mbox-0 clears PCI INTA# and
3675          * chip-internal interrupt pending events.
3676          * writing non-zero to intr-mbox-0 additional tells the
3677          * NIC to stop sending us irqs, engaging "in-intr-handler"
3678          * event coalescing.
3679          *
3680          * Flush the mailbox to de-assert the IRQ immediately to prevent
3681          * spurious interrupts.  The flush impacts performance but
3682          * excessive spurious interrupts can be worse in some cases.
3683          */
3684         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3685         if (tg3_irq_sync(tp))
3686                 goto out;
3687         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3688                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3689                 /* Update last_tag to mark that this status has been
3690                  * seen. Because interrupt may be shared, we may be
3691                  * racing with tg3_poll(), so only update last_tag
3692                  * if tg3_poll() is not scheduled.
3693                  */
3694                 tp->last_tag = sblk->status_tag;
3695                 __netif_rx_schedule(dev, &tp->napi);
3696         }
3697 out:
3698         return IRQ_RETVAL(handled);
3699 }
3700
3701 /* ISR for interrupt test */
3702 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3703 {
3704         struct net_device *dev = dev_id;
3705         struct tg3 *tp = netdev_priv(dev);
3706         struct tg3_hw_status *sblk = tp->hw_status;
3707
3708         if ((sblk->status & SD_STATUS_UPDATED) ||
3709             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3710                 tg3_disable_ints(tp);
3711                 return IRQ_RETVAL(1);
3712         }
3713         return IRQ_RETVAL(0);
3714 }
3715
3716 static int tg3_init_hw(struct tg3 *, int);
3717 static int tg3_halt(struct tg3 *, int, int);
3718
3719 /* Restart hardware after configuration changes, self-test, etc.
3720  * Invoked with tp->lock held.
3721  */
3722 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3723 {
3724         int err;
3725
3726         err = tg3_init_hw(tp, reset_phy);
3727         if (err) {
3728                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3729                        "aborting.\n", tp->dev->name);
3730                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3731                 tg3_full_unlock(tp);
3732                 del_timer_sync(&tp->timer);
3733                 tp->irq_sync = 0;
3734                 napi_enable(&tp->napi);
3735                 dev_close(tp->dev);
3736                 tg3_full_lock(tp, 0);
3737         }
3738         return err;
3739 }
3740
3741 #ifdef CONFIG_NET_POLL_CONTROLLER
3742 static void tg3_poll_controller(struct net_device *dev)
3743 {
3744         struct tg3 *tp = netdev_priv(dev);
3745
3746         tg3_interrupt(tp->pdev->irq, dev);
3747 }
3748 #endif
3749
3750 static void tg3_reset_task(struct work_struct *work)
3751 {
3752         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3753         unsigned int restart_timer;
3754
3755         tg3_full_lock(tp, 0);
3756
3757         if (!netif_running(tp->dev)) {
3758                 tg3_full_unlock(tp);
3759                 return;
3760         }
3761
3762         tg3_full_unlock(tp);
3763
3764         tg3_netif_stop(tp);
3765
3766         tg3_full_lock(tp, 1);
3767
3768         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3769         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3770
3771         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3772                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3773                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3774                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3775                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3776         }
3777
3778         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3779         if (tg3_init_hw(tp, 1))
3780                 goto out;
3781
3782         tg3_netif_start(tp);
3783
3784         if (restart_timer)
3785                 mod_timer(&tp->timer, jiffies + 1);
3786
3787 out:
3788         tg3_full_unlock(tp);
3789 }
3790
3791 static void tg3_dump_short_state(struct tg3 *tp)
3792 {
3793         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3794                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3795         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3796                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3797 }
3798
3799 static void tg3_tx_timeout(struct net_device *dev)
3800 {
3801         struct tg3 *tp = netdev_priv(dev);
3802
3803         if (netif_msg_tx_err(tp)) {
3804                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3805                        dev->name);
3806                 tg3_dump_short_state(tp);
3807         }
3808
3809         schedule_work(&tp->reset_task);
3810 }
3811
3812 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3813 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3814 {
3815         u32 base = (u32) mapping & 0xffffffff;
3816
3817         return ((base > 0xffffdcc0) &&
3818                 (base + len + 8 < base));
3819 }
3820
3821 /* Test for DMA addresses > 40-bit */
3822 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3823                                           int len)
3824 {
3825 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3826         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3827                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3828         return 0;
3829 #else
3830         return 0;
3831 #endif
3832 }
3833
3834 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3835
3836 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3837 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3838                                        u32 last_plus_one, u32 *start,
3839                                        u32 base_flags, u32 mss)
3840 {
3841         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3842         dma_addr_t new_addr = 0;
3843         u32 entry = *start;
3844         int i, ret = 0;
3845
3846         if (!new_skb) {
3847                 ret = -1;
3848         } else {
3849                 /* New SKB is guaranteed to be linear. */
3850                 entry = *start;
3851                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3852                                           PCI_DMA_TODEVICE);
3853                 /* Make sure new skb does not cross any 4G boundaries.
3854                  * Drop the packet if it does.
3855                  */
3856                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3857                         ret = -1;
3858                         dev_kfree_skb(new_skb);
3859                         new_skb = NULL;
3860                 } else {
3861                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3862                                     base_flags, 1 | (mss << 1));
3863                         *start = NEXT_TX(entry);
3864                 }
3865         }
3866
3867         /* Now clean up the sw ring entries. */
3868         i = 0;
3869         while (entry != last_plus_one) {
3870                 int len;
3871
3872                 if (i == 0)
3873                         len = skb_headlen(skb);
3874                 else
3875                         len = skb_shinfo(skb)->frags[i-1].size;
3876                 pci_unmap_single(tp->pdev,
3877                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3878                                  len, PCI_DMA_TODEVICE);
3879                 if (i == 0) {
3880                         tp->tx_buffers[entry].skb = new_skb;
3881                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3882                 } else {
3883                         tp->tx_buffers[entry].skb = NULL;
3884                 }
3885                 entry = NEXT_TX(entry);
3886                 i++;
3887         }
3888
3889         dev_kfree_skb(skb);
3890
3891         return ret;
3892 }
3893
3894 static void tg3_set_txd(struct tg3 *tp, int entry,
3895                         dma_addr_t mapping, int len, u32 flags,
3896                         u32 mss_and_is_end)
3897 {
3898         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3899         int is_end = (mss_and_is_end & 0x1);
3900         u32 mss = (mss_and_is_end >> 1);
3901         u32 vlan_tag = 0;
3902
3903         if (is_end)
3904                 flags |= TXD_FLAG_END;
3905         if (flags & TXD_FLAG_VLAN) {
3906                 vlan_tag = flags >> 16;
3907                 flags &= 0xffff;
3908         }
3909         vlan_tag |= (mss << TXD_MSS_SHIFT);
3910
3911         txd->addr_hi = ((u64) mapping >> 32);
3912         txd->addr_lo = ((u64) mapping & 0xffffffff);
3913         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3914         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3915 }
3916
3917 /* hard_start_xmit for devices that don't have any bugs and
3918  * support TG3_FLG2_HW_TSO_2 only.
3919  */
3920 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3921 {
3922         struct tg3 *tp = netdev_priv(dev);
3923         dma_addr_t mapping;
3924         u32 len, entry, base_flags, mss;
3925
3926         len = skb_headlen(skb);
3927
3928         /* We are running in BH disabled context with netif_tx_lock
3929          * and TX reclaim runs via tp->napi.poll inside of a software
3930          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3931          * no IRQ context deadlocks to worry about either.  Rejoice!
3932          */
3933         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3934                 if (!netif_queue_stopped(dev)) {
3935                         netif_stop_queue(dev);
3936
3937                         /* This is a hard error, log it. */
3938                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3939                                "queue awake!\n", dev->name);
3940                 }
3941                 return NETDEV_TX_BUSY;
3942         }
3943
3944         entry = tp->tx_prod;
3945         base_flags = 0;
3946         mss = 0;
3947         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
3948                 int tcp_opt_len, ip_tcp_len;
3949
3950                 if (skb_header_cloned(skb) &&
3951                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3952                         dev_kfree_skb(skb);
3953                         goto out_unlock;
3954                 }
3955
3956                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3957                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3958                 else {
3959                         struct iphdr *iph = ip_hdr(skb);
3960
3961                         tcp_opt_len = tcp_optlen(skb);
3962                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
3963
3964                         iph->check = 0;
3965                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3966                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3967                 }
3968
3969                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3970                                TXD_FLAG_CPU_POST_DMA);
3971
3972                 tcp_hdr(skb)->check = 0;
3973
3974         }
3975         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3976                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3977 #if TG3_VLAN_TAG_USED
3978         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3979                 base_flags |= (TXD_FLAG_VLAN |
3980                                (vlan_tx_tag_get(skb) << 16));
3981 #endif
3982
3983         /* Queue skb data, a.k.a. the main skb fragment. */
3984         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3985
3986         tp->tx_buffers[entry].skb = skb;
3987         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3988
3989         tg3_set_txd(tp, entry, mapping, len, base_flags,
3990                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3991
3992         entry = NEXT_TX(entry);
3993
3994         /* Now loop through additional data fragments, and queue them. */
3995         if (skb_shinfo(skb)->nr_frags > 0) {
3996                 unsigned int i, last;
3997
3998                 last = skb_shinfo(skb)->nr_frags - 1;
3999                 for (i = 0; i <= last; i++) {
4000                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4001
4002                         len = frag->size;
4003                         mapping = pci_map_page(tp->pdev,
4004                                                frag->page,
4005                                                frag->page_offset,
4006                                                len, PCI_DMA_TODEVICE);
4007
4008                         tp->tx_buffers[entry].skb = NULL;
4009                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4010
4011                         tg3_set_txd(tp, entry, mapping, len,
4012                                     base_flags, (i == last) | (mss << 1));
4013
4014                         entry = NEXT_TX(entry);
4015                 }
4016         }
4017
4018         /* Packets are ready, update Tx producer idx local and on card. */
4019         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4020
4021         tp->tx_prod = entry;
4022         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4023                 netif_stop_queue(dev);
4024                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4025                         netif_wake_queue(tp->dev);
4026         }
4027
4028 out_unlock:
4029         mmiowb();
4030
4031         dev->trans_start = jiffies;
4032
4033         return NETDEV_TX_OK;
4034 }
4035
4036 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4037
4038 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4039  * TSO header is greater than 80 bytes.
4040  */
4041 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4042 {
4043         struct sk_buff *segs, *nskb;
4044
4045         /* Estimate the number of fragments in the worst case */
4046         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4047                 netif_stop_queue(tp->dev);
4048                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4049                         return NETDEV_TX_BUSY;
4050
4051                 netif_wake_queue(tp->dev);
4052         }
4053
4054         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4055         if (unlikely(IS_ERR(segs)))
4056                 goto tg3_tso_bug_end;
4057
4058         do {
4059                 nskb = segs;
4060                 segs = segs->next;
4061                 nskb->next = NULL;
4062                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4063         } while (segs);
4064
4065 tg3_tso_bug_end:
4066         dev_kfree_skb(skb);
4067
4068         return NETDEV_TX_OK;
4069 }
4070
4071 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4072  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4073  */
4074 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4075 {
4076         struct tg3 *tp = netdev_priv(dev);
4077         dma_addr_t mapping;
4078         u32 len, entry, base_flags, mss;
4079         int would_hit_hwbug;
4080
4081         len = skb_headlen(skb);
4082
4083         /* We are running in BH disabled context with netif_tx_lock
4084          * and TX reclaim runs via tp->napi.poll inside of a software
4085          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4086          * no IRQ context deadlocks to worry about either.  Rejoice!
4087          */
4088         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4089                 if (!netif_queue_stopped(dev)) {
4090                         netif_stop_queue(dev);
4091
4092                         /* This is a hard error, log it. */
4093                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4094                                "queue awake!\n", dev->name);
4095                 }
4096                 return NETDEV_TX_BUSY;
4097         }
4098
4099         entry = tp->tx_prod;
4100         base_flags = 0;
4101         if (skb->ip_summed == CHECKSUM_PARTIAL)
4102                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4103         mss = 0;
4104         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4105                 struct iphdr *iph;
4106                 int tcp_opt_len, ip_tcp_len, hdr_len;
4107
4108                 if (skb_header_cloned(skb) &&
4109                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4110                         dev_kfree_skb(skb);
4111                         goto out_unlock;
4112                 }
4113
4114                 tcp_opt_len = tcp_optlen(skb);
4115                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4116
4117                 hdr_len = ip_tcp_len + tcp_opt_len;
4118                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4119                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4120                         return (tg3_tso_bug(tp, skb));
4121
4122                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4123                                TXD_FLAG_CPU_POST_DMA);
4124
4125                 iph = ip_hdr(skb);
4126                 iph->check = 0;
4127                 iph->tot_len = htons(mss + hdr_len);
4128                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4129                         tcp_hdr(skb)->check = 0;
4130                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4131                 } else
4132                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4133                                                                  iph->daddr, 0,
4134                                                                  IPPROTO_TCP,
4135                                                                  0);
4136
4137                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4138                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4139                         if (tcp_opt_len || iph->ihl > 5) {
4140                                 int tsflags;
4141
4142                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4143                                 mss |= (tsflags << 11);
4144                         }
4145                 } else {
4146                         if (tcp_opt_len || iph->ihl > 5) {
4147                                 int tsflags;
4148
4149                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4150                                 base_flags |= tsflags << 12;
4151                         }
4152                 }
4153         }
4154 #if TG3_VLAN_TAG_USED
4155         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4156                 base_flags |= (TXD_FLAG_VLAN |
4157                                (vlan_tx_tag_get(skb) << 16));
4158 #endif
4159
4160         /* Queue skb data, a.k.a. the main skb fragment. */
4161         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4162
4163         tp->tx_buffers[entry].skb = skb;
4164         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4165
4166         would_hit_hwbug = 0;
4167
4168         if (tg3_4g_overflow_test(mapping, len))
4169                 would_hit_hwbug = 1;
4170
4171         tg3_set_txd(tp, entry, mapping, len, base_flags,
4172                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4173
4174         entry = NEXT_TX(entry);
4175
4176         /* Now loop through additional data fragments, and queue them. */
4177         if (skb_shinfo(skb)->nr_frags > 0) {
4178                 unsigned int i, last;
4179
4180                 last = skb_shinfo(skb)->nr_frags - 1;
4181                 for (i = 0; i <= last; i++) {
4182                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4183
4184                         len = frag->size;
4185                         mapping = pci_map_page(tp->pdev,
4186                                                frag->page,
4187                                                frag->page_offset,
4188                                                len, PCI_DMA_TODEVICE);
4189
4190                         tp->tx_buffers[entry].skb = NULL;
4191                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4192
4193                         if (tg3_4g_overflow_test(mapping, len))
4194                                 would_hit_hwbug = 1;
4195
4196                         if (tg3_40bit_overflow_test(tp, mapping, len))
4197                                 would_hit_hwbug = 1;
4198
4199                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4200                                 tg3_set_txd(tp, entry, mapping, len,
4201                                             base_flags, (i == last)|(mss << 1));
4202                         else
4203                                 tg3_set_txd(tp, entry, mapping, len,
4204                                             base_flags, (i == last));
4205
4206                         entry = NEXT_TX(entry);
4207                 }
4208         }
4209
4210         if (would_hit_hwbug) {
4211                 u32 last_plus_one = entry;
4212                 u32 start;
4213
4214                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4215                 start &= (TG3_TX_RING_SIZE - 1);
4216
4217                 /* If the workaround fails due to memory/mapping
4218                  * failure, silently drop this packet.
4219                  */
4220                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4221                                                 &start, base_flags, mss))
4222                         goto out_unlock;
4223
4224                 entry = start;
4225         }
4226
4227         /* Packets are ready, update Tx producer idx local and on card. */
4228         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4229
4230         tp->tx_prod = entry;
4231         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4232                 netif_stop_queue(dev);
4233                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4234                         netif_wake_queue(tp->dev);
4235         }
4236
4237 out_unlock:
4238         mmiowb();
4239
4240         dev->trans_start = jiffies;
4241
4242         return NETDEV_TX_OK;
4243 }
4244
4245 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4246                                int new_mtu)
4247 {
4248         dev->mtu = new_mtu;
4249
4250         if (new_mtu > ETH_DATA_LEN) {
4251                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4252                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4253                         ethtool_op_set_tso(dev, 0);
4254                 }
4255                 else
4256                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4257         } else {
4258                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4259                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4260                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4261         }
4262 }
4263
4264 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4265 {
4266         struct tg3 *tp = netdev_priv(dev);
4267         int err;
4268
4269         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4270                 return -EINVAL;
4271
4272         if (!netif_running(dev)) {
4273                 /* We'll just catch it later when the
4274                  * device is up'd.
4275                  */
4276                 tg3_set_mtu(dev, tp, new_mtu);
4277                 return 0;
4278         }
4279
4280         tg3_netif_stop(tp);
4281
4282         tg3_full_lock(tp, 1);
4283
4284         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4285
4286         tg3_set_mtu(dev, tp, new_mtu);
4287
4288         err = tg3_restart_hw(tp, 0);
4289
4290         if (!err)
4291                 tg3_netif_start(tp);
4292
4293         tg3_full_unlock(tp);
4294
4295         return err;
4296 }
4297
4298 /* Free up pending packets in all rx/tx rings.
4299  *
4300  * The chip has been shut down and the driver detached from
4301  * the networking, so no interrupts or new tx packets will
4302  * end up in the driver.  tp->{tx,}lock is not held and we are not
4303  * in an interrupt context and thus may sleep.
4304  */
4305 static void tg3_free_rings(struct tg3 *tp)
4306 {
4307         struct ring_info *rxp;
4308         int i;
4309
4310         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4311                 rxp = &tp->rx_std_buffers[i];
4312
4313                 if (rxp->skb == NULL)
4314                         continue;
4315                 pci_unmap_single(tp->pdev,
4316                                  pci_unmap_addr(rxp, mapping),
4317                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4318                                  PCI_DMA_FROMDEVICE);
4319                 dev_kfree_skb_any(rxp->skb);
4320                 rxp->skb = NULL;
4321         }
4322
4323         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4324                 rxp = &tp->rx_jumbo_buffers[i];
4325
4326                 if (rxp->skb == NULL)
4327                         continue;
4328                 pci_unmap_single(tp->pdev,
4329                                  pci_unmap_addr(rxp, mapping),
4330                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4331                                  PCI_DMA_FROMDEVICE);
4332                 dev_kfree_skb_any(rxp->skb);
4333                 rxp->skb = NULL;
4334         }
4335
4336         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4337                 struct tx_ring_info *txp;
4338                 struct sk_buff *skb;
4339                 int j;
4340
4341                 txp = &tp->tx_buffers[i];
4342                 skb = txp->skb;
4343
4344                 if (skb == NULL) {
4345                         i++;
4346                         continue;
4347                 }
4348
4349                 pci_unmap_single(tp->pdev,
4350                                  pci_unmap_addr(txp, mapping),
4351                                  skb_headlen(skb),
4352                                  PCI_DMA_TODEVICE);
4353                 txp->skb = NULL;
4354
4355                 i++;
4356
4357                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4358                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4359                         pci_unmap_page(tp->pdev,
4360                                        pci_unmap_addr(txp, mapping),
4361                                        skb_shinfo(skb)->frags[j].size,
4362                                        PCI_DMA_TODEVICE);
4363                         i++;
4364                 }
4365
4366                 dev_kfree_skb_any(skb);
4367         }
4368 }
4369
4370 /* Initialize tx/rx rings for packet processing.
4371  *
4372  * The chip has been shut down and the driver detached from
4373  * the networking, so no interrupts or new tx packets will
4374  * end up in the driver.  tp->{tx,}lock are held and thus
4375  * we may not sleep.
4376  */
4377 static int tg3_init_rings(struct tg3 *tp)
4378 {
4379         u32 i;
4380
4381         /* Free up all the SKBs. */
4382         tg3_free_rings(tp);
4383
4384         /* Zero out all descriptors. */
4385         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4386         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4387         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4388         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4389
4390         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4391         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4392             (tp->dev->mtu > ETH_DATA_LEN))
4393                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4394
4395         /* Initialize invariants of the rings, we only set this
4396          * stuff once.  This works because the card does not
4397          * write into the rx buffer posting rings.
4398          */
4399         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4400                 struct tg3_rx_buffer_desc *rxd;
4401
4402                 rxd = &tp->rx_std[i];
4403                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4404                         << RXD_LEN_SHIFT;
4405                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4406                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4407                                (i << RXD_OPAQUE_INDEX_SHIFT));
4408         }
4409
4410         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4411                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4412                         struct tg3_rx_buffer_desc *rxd;
4413
4414                         rxd = &tp->rx_jumbo[i];
4415                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4416                                 << RXD_LEN_SHIFT;
4417                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4418                                 RXD_FLAG_JUMBO;
4419                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4420                                (i << RXD_OPAQUE_INDEX_SHIFT));
4421                 }
4422         }
4423
4424         /* Now allocate fresh SKBs for each rx ring. */
4425         for (i = 0; i < tp->rx_pending; i++) {
4426                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4427                         printk(KERN_WARNING PFX
4428                                "%s: Using a smaller RX standard ring, "
4429                                "only %d out of %d buffers were allocated "
4430                                "successfully.\n",
4431                                tp->dev->name, i, tp->rx_pending);
4432                         if (i == 0)
4433                                 return -ENOMEM;
4434                         tp->rx_pending = i;
4435                         break;
4436                 }
4437         }
4438
4439         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4440                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4441                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4442                                              -1, i) < 0) {
4443                                 printk(KERN_WARNING PFX
4444                                        "%s: Using a smaller RX jumbo ring, "
4445                                        "only %d out of %d buffers were "
4446                                        "allocated successfully.\n",
4447                                        tp->dev->name, i, tp->rx_jumbo_pending);
4448                                 if (i == 0) {
4449                                         tg3_free_rings(tp);
4450                                         return -ENOMEM;
4451                                 }
4452                                 tp->rx_jumbo_pending = i;
4453                                 break;
4454                         }
4455                 }
4456         }
4457         return 0;
4458 }
4459
4460 /*
4461  * Must not be invoked with interrupt sources disabled and
4462  * the hardware shutdown down.
4463  */
4464 static void tg3_free_consistent(struct tg3 *tp)
4465 {
4466         kfree(tp->rx_std_buffers);
4467         tp->rx_std_buffers = NULL;
4468         if (tp->rx_std) {
4469                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4470                                     tp->rx_std, tp->rx_std_mapping);
4471                 tp->rx_std = NULL;
4472         }
4473         if (tp->rx_jumbo) {
4474                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4475                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4476                 tp->rx_jumbo = NULL;
4477         }
4478         if (tp->rx_rcb) {
4479                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4480                                     tp->rx_rcb, tp->rx_rcb_mapping);
4481                 tp->rx_rcb = NULL;
4482         }
4483         if (tp->tx_ring) {
4484                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4485                         tp->tx_ring, tp->tx_desc_mapping);
4486                 tp->tx_ring = NULL;
4487         }
4488         if (tp->hw_status) {
4489                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4490                                     tp->hw_status, tp->status_mapping);
4491                 tp->hw_status = NULL;
4492         }
4493         if (tp->hw_stats) {
4494                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4495                                     tp->hw_stats, tp->stats_mapping);
4496                 tp->hw_stats = NULL;
4497         }
4498 }
4499
4500 /*
4501  * Must not be invoked with interrupt sources disabled and
4502  * the hardware shutdown down.  Can sleep.
4503  */
4504 static int tg3_alloc_consistent(struct tg3 *tp)
4505 {
4506         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4507                                       (TG3_RX_RING_SIZE +
4508                                        TG3_RX_JUMBO_RING_SIZE)) +
4509                                      (sizeof(struct tx_ring_info) *
4510                                       TG3_TX_RING_SIZE),
4511                                      GFP_KERNEL);
4512         if (!tp->rx_std_buffers)
4513                 return -ENOMEM;
4514
4515         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4516         tp->tx_buffers = (struct tx_ring_info *)
4517                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4518
4519         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4520                                           &tp->rx_std_mapping);
4521         if (!tp->rx_std)
4522                 goto err_out;
4523
4524         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4525                                             &tp->rx_jumbo_mapping);
4526
4527         if (!tp->rx_jumbo)
4528                 goto err_out;
4529
4530         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4531                                           &tp->rx_rcb_mapping);
4532         if (!tp->rx_rcb)
4533                 goto err_out;
4534
4535         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4536                                            &tp->tx_desc_mapping);
4537         if (!tp->tx_ring)
4538                 goto err_out;
4539
4540         tp->hw_status = pci_alloc_consistent(tp->pdev,
4541                                              TG3_HW_STATUS_SIZE,
4542                                              &tp->status_mapping);
4543         if (!tp->hw_status)
4544                 goto err_out;
4545
4546         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4547                                             sizeof(struct tg3_hw_stats),
4548                                             &tp->stats_mapping);
4549         if (!tp->hw_stats)
4550                 goto err_out;
4551
4552         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4553         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4554
4555         return 0;
4556
4557 err_out:
4558         tg3_free_consistent(tp);
4559         return -ENOMEM;
4560 }
4561
4562 #define MAX_WAIT_CNT 1000
4563
4564 /* To stop a block, clear the enable bit and poll till it
4565  * clears.  tp->lock is held.
4566  */
4567 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4568 {
4569         unsigned int i;
4570         u32 val;
4571
4572         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4573                 switch (ofs) {
4574                 case RCVLSC_MODE:
4575                 case DMAC_MODE:
4576                 case MBFREE_MODE:
4577                 case BUFMGR_MODE:
4578                 case MEMARB_MODE:
4579                         /* We can't enable/disable these bits of the
4580                          * 5705/5750, just say success.
4581                          */
4582                         return 0;
4583
4584                 default:
4585                         break;
4586                 };
4587         }
4588
4589         val = tr32(ofs);
4590         val &= ~enable_bit;
4591         tw32_f(ofs, val);
4592
4593         for (i = 0; i < MAX_WAIT_CNT; i++) {
4594                 udelay(100);
4595                 val = tr32(ofs);
4596                 if ((val & enable_bit) == 0)
4597                         break;
4598         }
4599
4600         if (i == MAX_WAIT_CNT && !silent) {
4601                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4602                        "ofs=%lx enable_bit=%x\n",
4603                        ofs, enable_bit);
4604                 return -ENODEV;
4605         }
4606
4607         return 0;
4608 }
4609
4610 /* tp->lock is held. */
4611 static int tg3_abort_hw(struct tg3 *tp, int silent)
4612 {
4613         int i, err;
4614
4615         tg3_disable_ints(tp);
4616
4617         tp->rx_mode &= ~RX_MODE_ENABLE;
4618         tw32_f(MAC_RX_MODE, tp->rx_mode);
4619         udelay(10);
4620
4621         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4622         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4623         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4624         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4625         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4626         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4627
4628         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4629         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4630         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4631         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4632         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4633         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4634         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4635
4636         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4637         tw32_f(MAC_MODE, tp->mac_mode);
4638         udelay(40);
4639
4640         tp->tx_mode &= ~TX_MODE_ENABLE;
4641         tw32_f(MAC_TX_MODE, tp->tx_mode);
4642
4643         for (i = 0; i < MAX_WAIT_CNT; i++) {
4644                 udelay(100);
4645                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4646                         break;
4647         }
4648         if (i >= MAX_WAIT_CNT) {
4649                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4650                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4651                        tp->dev->name, tr32(MAC_TX_MODE));
4652                 err |= -ENODEV;
4653         }
4654
4655         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4656         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4657         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4658
4659         tw32(FTQ_RESET, 0xffffffff);
4660         tw32(FTQ_RESET, 0x00000000);
4661
4662         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4663         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4664
4665         if (tp->hw_status)
4666                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4667         if (tp->hw_stats)
4668                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4669
4670         return err;
4671 }
4672
4673 /* tp->lock is held. */
4674 static int tg3_nvram_lock(struct tg3 *tp)
4675 {
4676         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4677                 int i;
4678
4679                 if (tp->nvram_lock_cnt == 0) {
4680                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4681                         for (i = 0; i < 8000; i++) {
4682                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4683                                         break;
4684                                 udelay(20);
4685                         }
4686                         if (i == 8000) {
4687                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4688                                 return -ENODEV;
4689                         }
4690                 }
4691                 tp->nvram_lock_cnt++;
4692         }
4693         return 0;
4694 }
4695
4696 /* tp->lock is held. */
4697 static void tg3_nvram_unlock(struct tg3 *tp)
4698 {
4699         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4700                 if (tp->nvram_lock_cnt > 0)
4701                         tp->nvram_lock_cnt--;
4702                 if (tp->nvram_lock_cnt == 0)
4703                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4704         }
4705 }
4706
4707 /* tp->lock is held. */
4708 static void tg3_enable_nvram_access(struct tg3 *tp)
4709 {
4710         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4711             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4712                 u32 nvaccess = tr32(NVRAM_ACCESS);
4713
4714                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4715         }
4716 }
4717
4718 /* tp->lock is held. */
4719 static void tg3_disable_nvram_access(struct tg3 *tp)
4720 {
4721         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4722             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4723                 u32 nvaccess = tr32(NVRAM_ACCESS);
4724
4725                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4726         }
4727 }
4728
4729 /* tp->lock is held. */
4730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4731 {
4732         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4733                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4734
4735         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4736                 switch (kind) {
4737                 case RESET_KIND_INIT:
4738                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4739                                       DRV_STATE_START);
4740                         break;
4741
4742                 case RESET_KIND_SHUTDOWN:
4743                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4744                                       DRV_STATE_UNLOAD);
4745                         break;
4746
4747                 case RESET_KIND_SUSPEND:
4748                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4749                                       DRV_STATE_SUSPEND);
4750                         break;
4751
4752                 default:
4753                         break;
4754                 };
4755         }
4756 }
4757
4758 /* tp->lock is held. */
4759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4760 {
4761         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4762                 switch (kind) {
4763                 case RESET_KIND_INIT:
4764                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4765                                       DRV_STATE_START_DONE);
4766                         break;
4767
4768                 case RESET_KIND_SHUTDOWN:
4769                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4770                                       DRV_STATE_UNLOAD_DONE);
4771                         break;
4772
4773                 default:
4774                         break;
4775                 };
4776         }
4777 }
4778
4779 /* tp->lock is held. */
4780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4781 {
4782         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4783                 switch (kind) {
4784                 case RESET_KIND_INIT:
4785                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4786                                       DRV_STATE_START);
4787                         break;
4788
4789                 case RESET_KIND_SHUTDOWN:
4790                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4791                                       DRV_STATE_UNLOAD);
4792                         break;
4793
4794                 case RESET_KIND_SUSPEND:
4795                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4796                                       DRV_STATE_SUSPEND);
4797                         break;
4798
4799                 default:
4800                         break;
4801                 };
4802         }
4803 }
4804
4805 static int tg3_poll_fw(struct tg3 *tp)
4806 {
4807         int i;
4808         u32 val;
4809
4810         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4811                 /* Wait up to 20ms for init done. */
4812                 for (i = 0; i < 200; i++) {
4813                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4814                                 return 0;
4815                         udelay(100);
4816                 }
4817                 return -ENODEV;
4818         }
4819
4820         /* Wait for firmware initialization to complete. */
4821         for (i = 0; i < 100000; i++) {
4822                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4823                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4824                         break;
4825                 udelay(10);
4826         }
4827
4828         /* Chip might not be fitted with firmware.  Some Sun onboard
4829          * parts are configured like that.  So don't signal the timeout
4830          * of the above loop as an error, but do report the lack of
4831          * running firmware once.
4832          */
4833         if (i >= 100000 &&
4834             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4835                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4836
4837                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4838                        tp->dev->name);
4839         }
4840
4841         return 0;
4842 }
4843
4844 /* Save PCI command register before chip reset */
4845 static void tg3_save_pci_state(struct tg3 *tp)
4846 {
4847         u32 val;
4848
4849         pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
4850         tp->pci_cmd = val;
4851 }
4852
4853 /* Restore PCI state after chip reset */
4854 static void tg3_restore_pci_state(struct tg3 *tp)
4855 {
4856         u32 val;
4857
4858         /* Re-enable indirect register accesses. */
4859         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4860                                tp->misc_host_ctrl);
4861
4862         /* Set MAX PCI retry to zero. */
4863         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4864         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4865             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4866                 val |= PCISTATE_RETRY_SAME_DMA;
4867         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4868
4869         pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
4870
4871         /* Make sure PCI-X relaxed ordering bit is clear. */
4872         if (tp->pcix_cap) {
4873                 u16 pcix_cmd;
4874
4875                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
4876                                      &pcix_cmd);
4877                 pcix_cmd &= ~PCI_X_CMD_ERO;
4878                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
4879                                       pcix_cmd);
4880         }
4881
4882         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4883
4884                 /* Chip reset on 5780 will reset MSI enable bit,
4885                  * so need to restore it.
4886                  */
4887                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4888                         u16 ctrl;
4889
4890                         pci_read_config_word(tp->pdev,
4891                                              tp->msi_cap + PCI_MSI_FLAGS,
4892                                              &ctrl);
4893                         pci_write_config_word(tp->pdev,
4894                                               tp->msi_cap + PCI_MSI_FLAGS,
4895                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4896                         val = tr32(MSGINT_MODE);
4897                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4898                 }
4899         }
4900 }
4901
4902 static void tg3_stop_fw(struct tg3 *);
4903
4904 /* tp->lock is held. */
4905 static int tg3_chip_reset(struct tg3 *tp)
4906 {
4907         u32 val;
4908         void (*write_op)(struct tg3 *, u32, u32);
4909         int err;
4910
4911         tg3_nvram_lock(tp);
4912
4913         /* No matching tg3_nvram_unlock() after this because
4914          * chip reset below will undo the nvram lock.
4915          */
4916         tp->nvram_lock_cnt = 0;
4917
4918         /* GRC_MISC_CFG core clock reset will clear the memory
4919          * enable bit in PCI register 4 and the MSI enable bit
4920          * on some chips, so we save relevant registers here.
4921          */
4922         tg3_save_pci_state(tp);
4923
4924         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4925             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4926             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
4927             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
4928                 tw32(GRC_FASTBOOT_PC, 0);
4929
4930         /*
4931          * We must avoid the readl() that normally takes place.
4932          * It locks machines, causes machine checks, and other
4933          * fun things.  So, temporarily disable the 5701
4934          * hardware workaround, while we do the reset.
4935          */
4936         write_op = tp->write32;
4937         if (write_op == tg3_write_flush_reg32)
4938                 tp->write32 = tg3_write32;
4939
4940         /* Prevent the irq handler from reading or writing PCI registers
4941          * during chip reset when the memory enable bit in the PCI command
4942          * register may be cleared.  The chip does not generate interrupt
4943          * at this time, but the irq handler may still be called due to irq
4944          * sharing or irqpoll.
4945          */
4946         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
4947         if (tp->hw_status) {
4948                 tp->hw_status->status = 0;
4949                 tp->hw_status->status_tag = 0;
4950         }
4951         tp->last_tag = 0;
4952         smp_mb();
4953         synchronize_irq(tp->pdev->irq);
4954
4955         /* do the reset */
4956         val = GRC_MISC_CFG_CORECLK_RESET;
4957
4958         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4959                 if (tr32(0x7e2c) == 0x60) {
4960                         tw32(0x7e2c, 0x20);
4961                 }
4962                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4963                         tw32(GRC_MISC_CFG, (1 << 29));
4964                         val |= (1 << 29);
4965                 }
4966         }
4967
4968         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4969                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
4970                 tw32(GRC_VCPU_EXT_CTRL,
4971                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
4972         }
4973
4974         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4975                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4976         tw32(GRC_MISC_CFG, val);
4977
4978         /* restore 5701 hardware bug workaround write method */
4979         tp->write32 = write_op;
4980
4981         /* Unfortunately, we have to delay before the PCI read back.
4982          * Some 575X chips even will not respond to a PCI cfg access
4983          * when the reset command is given to the chip.
4984          *
4985          * How do these hardware designers expect things to work
4986          * properly if the PCI write is posted for a long period
4987          * of time?  It is always necessary to have some method by
4988          * which a register read back can occur to push the write
4989          * out which does the reset.
4990          *
4991          * For most tg3 variants the trick below was working.
4992          * Ho hum...
4993          */
4994         udelay(120);
4995
4996         /* Flush PCI posted writes.  The normal MMIO registers
4997          * are inaccessible at this time so this is the only
4998          * way to make this reliably (actually, this is no longer
4999          * the case, see above).  I tried to use indirect
5000          * register read/write but this upset some 5701 variants.
5001          */
5002         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5003
5004         udelay(120);
5005
5006         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5007                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5008                         int i;
5009                         u32 cfg_val;
5010
5011                         /* Wait for link training to complete.  */
5012                         for (i = 0; i < 5000; i++)
5013                                 udelay(100);
5014
5015                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5016                         pci_write_config_dword(tp->pdev, 0xc4,
5017                                                cfg_val | (1 << 15));
5018                 }
5019                 /* Set PCIE max payload size and clear error status.  */
5020                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5021         }
5022
5023         tg3_restore_pci_state(tp);
5024
5025         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5026
5027         val = 0;
5028         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5029                 val = tr32(MEMARB_MODE);
5030         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5031
5032         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5033                 tg3_stop_fw(tp);
5034                 tw32(0x5000, 0x400);
5035         }
5036
5037         tw32(GRC_MODE, tp->grc_mode);
5038
5039         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5040                 val = tr32(0xc4);
5041
5042                 tw32(0xc4, val | (1 << 15));
5043         }
5044
5045         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5046             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5047                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5048                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5049                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5050                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5051         }
5052
5053         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5054                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5055                 tw32_f(MAC_MODE, tp->mac_mode);
5056         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5057                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5058                 tw32_f(MAC_MODE, tp->mac_mode);
5059         } else
5060                 tw32_f(MAC_MODE, 0);
5061         udelay(40);
5062
5063         err = tg3_poll_fw(tp);
5064         if (err)
5065                 return err;
5066
5067         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5068             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5069                 val = tr32(0x7c00);
5070
5071                 tw32(0x7c00, val | (1 << 25));
5072         }
5073
5074         /* Reprobe ASF enable state.  */
5075         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5076         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5077         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5078         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5079                 u32 nic_cfg;
5080
5081                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5082                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5083                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5084                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5085                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5086                 }
5087         }
5088
5089         return 0;
5090 }
5091
5092 /* tp->lock is held. */
5093 static void tg3_stop_fw(struct tg3 *tp)
5094 {
5095         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5096                 u32 val;
5097                 int i;
5098
5099                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5100                 val = tr32(GRC_RX_CPU_EVENT);
5101                 val |= (1 << 14);
5102                 tw32(GRC_RX_CPU_EVENT, val);
5103
5104                 /* Wait for RX cpu to ACK the event.  */
5105                 for (i = 0; i < 100; i++) {
5106                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5107                                 break;
5108                         udelay(1);
5109                 }
5110         }
5111 }
5112
5113 /* tp->lock is held. */
5114 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5115 {
5116         int err;
5117
5118         tg3_stop_fw(tp);
5119
5120         tg3_write_sig_pre_reset(tp, kind);
5121
5122         tg3_abort_hw(tp, silent);
5123         err = tg3_chip_reset(tp);
5124
5125         tg3_write_sig_legacy(tp, kind);
5126         tg3_write_sig_post_reset(tp, kind);
5127
5128         if (err)
5129                 return err;
5130
5131         return 0;
5132 }
5133
5134 #define TG3_FW_RELEASE_MAJOR    0x0
5135 #define TG3_FW_RELASE_MINOR     0x0
5136 #define TG3_FW_RELEASE_FIX      0x0
5137 #define TG3_FW_START_ADDR       0x08000000
5138 #define TG3_FW_TEXT_ADDR        0x08000000
5139 #define TG3_FW_TEXT_LEN         0x9c0
5140 #define TG3_FW_RODATA_ADDR      0x080009c0
5141 #define TG3_FW_RODATA_LEN       0x60
5142 #define TG3_FW_DATA_ADDR        0x08000a40
5143 #define TG3_FW_DATA_LEN         0x20
5144 #define TG3_FW_SBSS_ADDR        0x08000a60
5145 #define TG3_FW_SBSS_LEN         0xc
5146 #define TG3_FW_BSS_ADDR         0x08000a70
5147 #define TG3_FW_BSS_LEN          0x10
5148
5149 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5150         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5151         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5152         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5153         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5154         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5155         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5156         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5157         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5158         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5159         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5160         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5161         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5162         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5163         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5164         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5165         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5166         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5167         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5168         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5169         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5170         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5171         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5172         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5173         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5174         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5175         0, 0, 0, 0, 0, 0,
5176         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5177         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5178         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5179         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5180         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5181         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5182         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5183         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5184         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5185         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5186         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5187         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5188         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5189         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5190         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5191         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5192         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5193         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5194         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5195         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5196         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5197         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5198         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5199         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5200         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5201         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5202         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5203         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5204         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5205         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5206         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5207         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5208         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5209         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5210         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5211         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5212         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5213         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5214         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5215         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5216         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5217         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5218         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5219         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5220         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5221         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5222         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5223         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5224         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5225         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5226         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5227         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5228         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5229         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5230         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5231         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5232         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5233         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5234         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5235         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5236         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5237         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5238         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5239         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5240         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5241 };
5242
5243 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5244         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5245         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5246         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5247         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5248         0x00000000
5249 };
5250
5251 #if 0 /* All zeros, don't eat up space with it. */
5252 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5253         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5254         0x00000000, 0x00000000, 0x00000000, 0x00000000
5255 };
5256 #endif
5257
5258 #define RX_CPU_SCRATCH_BASE     0x30000
5259 #define RX_CPU_SCRATCH_SIZE     0x04000
5260 #define TX_CPU_SCRATCH_BASE     0x34000
5261 #define TX_CPU_SCRATCH_SIZE     0x04000
5262
5263 /* tp->lock is held. */
5264 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5265 {
5266         int i;
5267
5268         BUG_ON(offset == TX_CPU_BASE &&
5269             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5270
5271         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5272                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5273
5274                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5275                 return 0;
5276         }
5277         if (offset == RX_CPU_BASE) {
5278                 for (i = 0; i < 10000; i++) {
5279                         tw32(offset + CPU_STATE, 0xffffffff);
5280                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5281                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5282                                 break;
5283                 }
5284
5285                 tw32(offset + CPU_STATE, 0xffffffff);
5286                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5287                 udelay(10);
5288         } else {
5289                 for (i = 0; i < 10000; i++) {
5290                         tw32(offset + CPU_STATE, 0xffffffff);
5291                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5292                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5293                                 break;
5294                 }
5295         }
5296
5297         if (i >= 10000) {
5298                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5299                        "and %s CPU\n",
5300                        tp->dev->name,
5301                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5302                 return -ENODEV;
5303         }
5304
5305         /* Clear firmware's nvram arbitration. */
5306         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5307                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5308         return 0;
5309 }
5310
5311 struct fw_info {
5312         unsigned int text_base;
5313         unsigned int text_len;
5314         const u32 *text_data;
5315         unsigned int rodata_base;
5316         unsigned int rodata_len;
5317         const u32 *rodata_data;
5318         unsigned int data_base;
5319         unsigned int data_len;
5320         const u32 *data_data;
5321 };
5322
5323 /* tp->lock is held. */
5324 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5325                                  int cpu_scratch_size, struct fw_info *info)
5326 {
5327         int err, lock_err, i;
5328         void (*write_op)(struct tg3 *, u32, u32);
5329
5330         if (cpu_base == TX_CPU_BASE &&
5331             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5332                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5333                        "TX cpu firmware on %s which is 5705.\n",
5334                        tp->dev->name);
5335                 return -EINVAL;
5336         }
5337
5338         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5339                 write_op = tg3_write_mem;
5340         else
5341                 write_op = tg3_write_indirect_reg32;
5342
5343         /* It is possible that bootcode is still loading at this point.
5344          * Get the nvram lock first before halting the cpu.
5345          */
5346         lock_err = tg3_nvram_lock(tp);
5347         err = tg3_halt_cpu(tp, cpu_base);
5348         if (!lock_err)
5349                 tg3_nvram_unlock(tp);
5350         if (err)
5351                 goto out;
5352
5353         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5354                 write_op(tp, cpu_scratch_base + i, 0);
5355         tw32(cpu_base + CPU_STATE, 0xffffffff);
5356         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5357         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5358                 write_op(tp, (cpu_scratch_base +
5359                               (info->text_base & 0xffff) +
5360                               (i * sizeof(u32))),
5361                          (info->text_data ?
5362                           info->text_data[i] : 0));
5363         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5364                 write_op(tp, (cpu_scratch_base +
5365                               (info->rodata_base & 0xffff) +
5366                               (i * sizeof(u32))),
5367                          (info->rodata_data ?
5368                           info->rodata_data[i] : 0));
5369         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5370                 write_op(tp, (cpu_scratch_base +
5371                               (info->data_base & 0xffff) +
5372                               (i * sizeof(u32))),
5373                          (info->data_data ?
5374                           info->data_data[i] : 0));
5375
5376         err = 0;
5377
5378 out:
5379         return err;
5380 }
5381
5382 /* tp->lock is held. */
5383 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5384 {
5385         struct fw_info info;
5386         int err, i;
5387
5388         info.text_base = TG3_FW_TEXT_ADDR;
5389         info.text_len = TG3_FW_TEXT_LEN;
5390         info.text_data = &tg3FwText[0];
5391         info.rodata_base = TG3_FW_RODATA_ADDR;
5392         info.rodata_len = TG3_FW_RODATA_LEN;
5393         info.rodata_data = &tg3FwRodata[0];
5394         info.data_base = TG3_FW_DATA_ADDR;
5395         info.data_len = TG3_FW_DATA_LEN;
5396         info.data_data = NULL;
5397
5398         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5399                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5400                                     &info);
5401         if (err)
5402                 return err;
5403
5404         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5405                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5406                                     &info);
5407         if (err)
5408                 return err;
5409
5410         /* Now startup only the RX cpu. */
5411         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5412         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5413
5414         for (i = 0; i < 5; i++) {
5415                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5416                         break;
5417                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5418                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5419                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5420                 udelay(1000);
5421         }
5422         if (i >= 5) {
5423                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5424                        "to set RX CPU PC, is %08x should be %08x\n",
5425                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5426                        TG3_FW_TEXT_ADDR);
5427                 return -ENODEV;
5428         }
5429         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5430         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5431
5432         return 0;
5433 }
5434
5435
5436 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5437 #define TG3_TSO_FW_RELASE_MINOR         0x6
5438 #define TG3_TSO_FW_RELEASE_FIX          0x0
5439 #define TG3_TSO_FW_START_ADDR           0x08000000
5440 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5441 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5442 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5443 #define TG3_TSO_FW_RODATA_LEN           0x60
5444 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5445 #define TG3_TSO_FW_DATA_LEN             0x30
5446 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5447 #define TG3_TSO_FW_SBSS_LEN             0x2c
5448 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5449 #define TG3_TSO_FW_BSS_LEN              0x894
5450
5451 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5452         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5453         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5454         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5455         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5456         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5457         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5458         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5459         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5460         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5461         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5462         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5463         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5464         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5465         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5466         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5467         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5468         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5469         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5470         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5471         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5472         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5473         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5474         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5475         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5476         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5477         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5478         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5479         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5480         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5481         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5482         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5483         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5484         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5485         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5486         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5487         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5488         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5489         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5490         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5491         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5492         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5493         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5494         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5495         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5496         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5497         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5498         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5499         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5500         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5501         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5502         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5503         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5504         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5505         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5506         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5507         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5508         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5509         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5510         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5511         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5512         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5513         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5514         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5515         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5516         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5517         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5518         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5519         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5520         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5521         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5522         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5523         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5524         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5525         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5526         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5527         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5528         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5529         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5530         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5531         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5532         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5533         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5534         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5535         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5536         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5537         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5538         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5539         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5540         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5541         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5542         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5543         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5544         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5545         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5546         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5547         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5548         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5549         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5550         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5551         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5552         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5553         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5554         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5555         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5556         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5557         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5558         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5559         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5560         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5561         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5562         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5563         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5564         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5565         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5566         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5567         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5568         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5569         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5570         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5571         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5572         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5573         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5574         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5575         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5576         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5577         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5578         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5579         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5580         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5581         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5582         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5583         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5584         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5585         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5586         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5587         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5588         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5589         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5590         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5591         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5592         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5593         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5594         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5595         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5596         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5597         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5598         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5599         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5600         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5601         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5602         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5603         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5604         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5605         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5606         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5607         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5608         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5609         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5610         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5611         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5612         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5613         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5614         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5615         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5616         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5617         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5618         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5619         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5620         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5621         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5622         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5623         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5624         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5625         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5626         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5627         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5628         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5629         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5630         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5631         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5632         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5633         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5634         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5635         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5636         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5637         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5638         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5639         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5640         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5641         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5642         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5643         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5644         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5645         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5646         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5647         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5648         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5649         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5650         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5651         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5652         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5653         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5654         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5655         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5656         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5657         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5658         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5659         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5660         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5661         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5662         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5663         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5664         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5665         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5666         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5667         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5668         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5669         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5670         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5671         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5672         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5673         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5674         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5675         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5676         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5677         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5678         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5679         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5680         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5681         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5682         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5683         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5684         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5685         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5686         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5687         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5688         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5689         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5690         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5691         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5692         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5693         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5694         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5695         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5696         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5697         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5698         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5699         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5700         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5701         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5702         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5703         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5704         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5705         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5706         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5707         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5708         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5709         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5710         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5711         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5712         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5713         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5714         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5715         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5716         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5717         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5718         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5719         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5720         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5721         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5722         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5723         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5724         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5725         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5726         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5727         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5728         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5729         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5730         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5731         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5732         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5733         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5734         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5735         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5736 };
5737
5738 static const u32 tg3TsoFwRodata[] = {
5739         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5740         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5741         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5742         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5743         0x00000000,
5744 };
5745
5746 static const u32 tg3TsoFwData[] = {
5747         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5748         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5749         0x00000000,
5750 };
5751
5752 /* 5705 needs a special version of the TSO firmware.  */
5753 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5754 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5755 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5756 #define TG3_TSO5_FW_START_ADDR          0x00010000
5757 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5758 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5759 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5760 #define TG3_TSO5_FW_RODATA_LEN          0x50
5761 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5762 #define TG3_TSO5_FW_DATA_LEN            0x20
5763 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5764 #define TG3_TSO5_FW_SBSS_LEN            0x28
5765 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5766 #define TG3_TSO5_FW_BSS_LEN             0x88
5767
5768 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5769         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5770         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5771         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5772         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5773         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5774         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5775         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5776         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5777         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5778         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5779         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5780         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5781         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5782         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5783         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5784         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5785         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5786         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5787         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5788         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5789         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5790         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5791         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5792         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5793         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5794         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5795         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5796         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5797         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5798         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5799         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5800         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5801         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5802         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5803         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5804         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5805         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5806         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5807         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5808         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5809         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5810         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5811         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5812         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5813         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5814         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5815         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5816         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5817         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5818         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5819         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5820         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5821         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5822         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5823         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5824         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5825         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5826         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5827         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5828         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5829         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5830         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5831         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5832         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5833         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5834         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5835         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5836         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5837         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5838         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5839         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5840         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5841         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5842         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5843         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5844         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5845         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5846         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5847         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5848         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5849         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5850         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5851         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5852         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5853         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5854         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5855         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5856         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5857         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5858         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5859         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5860         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5861         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5862         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5863         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5864         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5865         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5866         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5867         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5868         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5869         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5870         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5871         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5872         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5873         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5874         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5875         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5876         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5877         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5878         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5879         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5880         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5881         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5882         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5883         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5884         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5885         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5886         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5887         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5888         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5889         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5890         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5891         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5892         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5893         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5894         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5895         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5896         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5897         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5898         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5899         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5900         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5901         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5902         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5903         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5904         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5905         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5906         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5907         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5908         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5909         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5910         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5911         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5912         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5913         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5914         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5915         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5916         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5917         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5918         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5919         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5920         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5921         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5922         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5923         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5924         0x00000000, 0x00000000, 0x00000000,
5925 };
5926
5927 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5928         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5929         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5930         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5931         0x00000000, 0x00000000, 0x00000000,
5932 };
5933
5934 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5935         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5936         0x00000000, 0x00000000, 0x00000000,
5937 };
5938
5939 /* tp->lock is held. */
5940 static int tg3_load_tso_firmware(struct tg3 *tp)
5941 {
5942         struct fw_info info;
5943         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5944         int err, i;
5945
5946         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5947                 return 0;
5948
5949         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5950                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5951                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5952                 info.text_data = &tg3Tso5FwText[0];
5953                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5954                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5955                 info.rodata_data = &tg3Tso5FwRodata[0];
5956                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5957                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5958                 info.data_data = &tg3Tso5FwData[0];
5959                 cpu_base = RX_CPU_BASE;
5960                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5961                 cpu_scratch_size = (info.text_len +
5962                                     info.rodata_len +
5963                                     info.data_len +
5964                                     TG3_TSO5_FW_SBSS_LEN +
5965                                     TG3_TSO5_FW_BSS_LEN);
5966         } else {
5967                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5968                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5969                 info.text_data = &tg3TsoFwText[0];
5970                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5971                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5972                 info.rodata_data = &tg3TsoFwRodata[0];
5973                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5974                 info.data_len = TG3_TSO_FW_DATA_LEN;
5975                 info.data_data = &tg3TsoFwData[0];
5976                 cpu_base = TX_CPU_BASE;
5977                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5978                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5979         }
5980
5981         err = tg3_load_firmware_cpu(tp, cpu_base,
5982                                     cpu_scratch_base, cpu_scratch_size,
5983                                     &info);
5984         if (err)
5985                 return err;
5986
5987         /* Now startup the cpu. */
5988         tw32(cpu_base + CPU_STATE, 0xffffffff);
5989         tw32_f(cpu_base + CPU_PC,    info.text_base);
5990
5991         for (i = 0; i < 5; i++) {
5992                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5993                         break;
5994                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5995                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5996                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5997                 udelay(1000);
5998         }
5999         if (i >= 5) {
6000                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6001                        "to set CPU PC, is %08x should be %08x\n",
6002                        tp->dev->name, tr32(cpu_base + CPU_PC),
6003                        info.text_base);
6004                 return -ENODEV;
6005         }
6006         tw32(cpu_base + CPU_STATE, 0xffffffff);
6007         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6008         return 0;
6009 }
6010
6011
6012 /* tp->lock is held. */
6013 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6014 {
6015         u32 addr_high, addr_low;
6016         int i;
6017
6018         addr_high = ((tp->dev->dev_addr[0] << 8) |
6019                      tp->dev->dev_addr[1]);
6020         addr_low = ((tp->dev->dev_addr[2] << 24) |
6021                     (tp->dev->dev_addr[3] << 16) |
6022                     (tp->dev->dev_addr[4] <<  8) |
6023                     (tp->dev->dev_addr[5] <<  0));
6024         for (i = 0; i < 4; i++) {
6025                 if (i == 1 && skip_mac_1)
6026                         continue;
6027                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6028                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6029         }
6030
6031         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6032             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6033                 for (i = 0; i < 12; i++) {
6034                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6035                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6036                 }
6037         }
6038
6039         addr_high = (tp->dev->dev_addr[0] +
6040                      tp->dev->dev_addr[1] +
6041                      tp->dev->dev_addr[2] +
6042                      tp->dev->dev_addr[3] +
6043                      tp->dev->dev_addr[4] +
6044                      tp->dev->dev_addr[5]) &
6045                 TX_BACKOFF_SEED_MASK;
6046         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6047 }
6048
6049 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6050 {
6051         struct tg3 *tp = netdev_priv(dev);
6052         struct sockaddr *addr = p;
6053         int err = 0, skip_mac_1 = 0;
6054
6055         if (!is_valid_ether_addr(addr->sa_data))
6056                 return -EINVAL;
6057
6058         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6059
6060         if (!netif_running(dev))
6061                 return 0;
6062
6063         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6064                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6065
6066                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6067                 addr0_low = tr32(MAC_ADDR_0_LOW);
6068                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6069                 addr1_low = tr32(MAC_ADDR_1_LOW);
6070
6071                 /* Skip MAC addr 1 if ASF is using it. */
6072                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6073                     !(addr1_high == 0 && addr1_low == 0))
6074                         skip_mac_1 = 1;
6075         }
6076         spin_lock_bh(&tp->lock);
6077         __tg3_set_mac_addr(tp, skip_mac_1);
6078         spin_unlock_bh(&tp->lock);
6079
6080         return err;
6081 }
6082
6083 /* tp->lock is held. */
6084 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6085                            dma_addr_t mapping, u32 maxlen_flags,
6086                            u32 nic_addr)
6087 {
6088         tg3_write_mem(tp,
6089                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6090                       ((u64) mapping >> 32));
6091         tg3_write_mem(tp,
6092                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6093                       ((u64) mapping & 0xffffffff));
6094         tg3_write_mem(tp,
6095                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6096                        maxlen_flags);
6097
6098         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6099                 tg3_write_mem(tp,
6100                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6101                               nic_addr);
6102 }
6103
6104 static void __tg3_set_rx_mode(struct net_device *);
6105 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6106 {
6107         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6108         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6109         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6110         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6111         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6112                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6113                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6114         }
6115         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6116         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6117         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6118                 u32 val = ec->stats_block_coalesce_usecs;
6119
6120                 if (!netif_carrier_ok(tp->dev))
6121                         val = 0;
6122
6123                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6124         }
6125 }
6126
6127 /* tp->lock is held. */
6128 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6129 {
6130         u32 val, rdmac_mode;
6131         int i, err, limit;
6132
6133         tg3_disable_ints(tp);
6134
6135         tg3_stop_fw(tp);
6136
6137         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6138
6139         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6140                 tg3_abort_hw(tp, 1);
6141         }
6142
6143         if (reset_phy)
6144                 tg3_phy_reset(tp);
6145
6146         err = tg3_chip_reset(tp);
6147         if (err)
6148                 return err;
6149
6150         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6151
6152         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6153                 val = tr32(TG3_CPMU_CTRL);
6154                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6155                 tw32(TG3_CPMU_CTRL, val);
6156         }
6157
6158         /* This works around an issue with Athlon chipsets on
6159          * B3 tigon3 silicon.  This bit has no effect on any
6160          * other revision.  But do not set this on PCI Express
6161          * chips and don't even touch the clocks if the CPMU is present.
6162          */
6163         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6164                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6165                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6166                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6167         }
6168
6169         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6170             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6171                 val = tr32(TG3PCI_PCISTATE);
6172                 val |= PCISTATE_RETRY_SAME_DMA;
6173                 tw32(TG3PCI_PCISTATE, val);
6174         }
6175
6176         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6177                 /* Enable some hw fixes.  */
6178                 val = tr32(TG3PCI_MSI_DATA);
6179                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6180                 tw32(TG3PCI_MSI_DATA, val);
6181         }
6182
6183         /* Descriptor ring init may make accesses to the
6184          * NIC SRAM area to setup the TX descriptors, so we
6185          * can only do this after the hardware has been
6186          * successfully reset.
6187          */
6188         err = tg3_init_rings(tp);
6189         if (err)
6190                 return err;
6191
6192         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) {
6193                 /* This value is determined during the probe time DMA
6194                  * engine test, tg3_test_dma.
6195                  */
6196                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6197         }
6198
6199         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6200                           GRC_MODE_4X_NIC_SEND_RINGS |
6201                           GRC_MODE_NO_TX_PHDR_CSUM |
6202                           GRC_MODE_NO_RX_PHDR_CSUM);
6203         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6204
6205         /* Pseudo-header checksum is done by hardware logic and not
6206          * the offload processers, so make the chip do the pseudo-
6207          * header checksums on receive.  For transmit it is more
6208          * convenient to do the pseudo-header checksum in software
6209          * as Linux does that on transmit for us in all cases.
6210          */
6211         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6212
6213         tw32(GRC_MODE,
6214              tp->grc_mode |
6215              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6216
6217         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6218         val = tr32(GRC_MISC_CFG);
6219         val &= ~0xff;
6220         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6221         tw32(GRC_MISC_CFG, val);
6222
6223         /* Initialize MBUF/DESC pool. */
6224         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6225                 /* Do nothing.  */
6226         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6227                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6228                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6229                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6230                 else
6231                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6232                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6233                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6234         }
6235         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6236                 int fw_len;
6237
6238                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6239                           TG3_TSO5_FW_RODATA_LEN +
6240                           TG3_TSO5_FW_DATA_LEN +
6241                           TG3_TSO5_FW_SBSS_LEN +
6242                           TG3_TSO5_FW_BSS_LEN);
6243                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6244                 tw32(BUFMGR_MB_POOL_ADDR,
6245                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6246                 tw32(BUFMGR_MB_POOL_SIZE,
6247                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6248         }
6249
6250         if (tp->dev->mtu <= ETH_DATA_LEN) {
6251                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6252                      tp->bufmgr_config.mbuf_read_dma_low_water);
6253                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6254                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6255                 tw32(BUFMGR_MB_HIGH_WATER,
6256                      tp->bufmgr_config.mbuf_high_water);
6257         } else {
6258                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6259                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6260                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6261                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6262                 tw32(BUFMGR_MB_HIGH_WATER,
6263                      tp->bufmgr_config.mbuf_high_water_jumbo);
6264         }
6265         tw32(BUFMGR_DMA_LOW_WATER,
6266              tp->bufmgr_config.dma_low_water);
6267         tw32(BUFMGR_DMA_HIGH_WATER,
6268              tp->bufmgr_config.dma_high_water);
6269
6270         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6271         for (i = 0; i < 2000; i++) {
6272                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6273                         break;
6274                 udelay(10);
6275         }
6276         if (i >= 2000) {
6277                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6278                        tp->dev->name);
6279                 return -ENODEV;
6280         }
6281
6282         /* Setup replenish threshold. */
6283         val = tp->rx_pending / 8;
6284         if (val == 0)
6285                 val = 1;
6286         else if (val > tp->rx_std_max_post)
6287                 val = tp->rx_std_max_post;
6288         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6289                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6290                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6291
6292                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6293                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6294         }
6295
6296         tw32(RCVBDI_STD_THRESH, val);
6297
6298         /* Initialize TG3_BDINFO's at:
6299          *  RCVDBDI_STD_BD:     standard eth size rx ring
6300          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6301          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6302          *
6303          * like so:
6304          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6305          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6306          *                              ring attribute flags
6307          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6308          *
6309          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6310          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6311          *
6312          * The size of each ring is fixed in the firmware, but the location is
6313          * configurable.
6314          */
6315         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6316              ((u64) tp->rx_std_mapping >> 32));
6317         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6318              ((u64) tp->rx_std_mapping & 0xffffffff));
6319         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6320              NIC_SRAM_RX_BUFFER_DESC);
6321
6322         /* Don't even try to program the JUMBO/MINI buffer descriptor
6323          * configs on 5705.
6324          */
6325         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6326                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6327                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6328         } else {
6329                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6330                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6331
6332                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6333                      BDINFO_FLAGS_DISABLED);
6334
6335                 /* Setup replenish threshold. */
6336                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6337
6338                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6339                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6340                              ((u64) tp->rx_jumbo_mapping >> 32));
6341                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6342                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6343                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6344                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6345                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6346                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6347                 } else {
6348                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6349                              BDINFO_FLAGS_DISABLED);
6350                 }
6351
6352         }
6353
6354         /* There is only one send ring on 5705/5750, no need to explicitly
6355          * disable the others.
6356          */
6357         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6358                 /* Clear out send RCB ring in SRAM. */
6359                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6360                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6361                                       BDINFO_FLAGS_DISABLED);
6362         }
6363
6364         tp->tx_prod = 0;
6365         tp->tx_cons = 0;
6366         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6367         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6368
6369         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6370                        tp->tx_desc_mapping,
6371                        (TG3_TX_RING_SIZE <<
6372                         BDINFO_FLAGS_MAXLEN_SHIFT),
6373                        NIC_SRAM_TX_BUFFER_DESC);
6374
6375         /* There is only one receive return ring on 5705/5750, no need
6376          * to explicitly disable the others.
6377          */
6378         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6379                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6380                      i += TG3_BDINFO_SIZE) {
6381                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6382                                       BDINFO_FLAGS_DISABLED);
6383                 }
6384         }
6385
6386         tp->rx_rcb_ptr = 0;
6387         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6388
6389         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6390                        tp->rx_rcb_mapping,
6391                        (TG3_RX_RCB_RING_SIZE(tp) <<
6392                         BDINFO_FLAGS_MAXLEN_SHIFT),
6393                        0);
6394
6395         tp->rx_std_ptr = tp->rx_pending;
6396         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6397                      tp->rx_std_ptr);
6398
6399         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6400                                                 tp->rx_jumbo_pending : 0;
6401         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6402                      tp->rx_jumbo_ptr);
6403
6404         /* Initialize MAC address and backoff seed. */
6405         __tg3_set_mac_addr(tp, 0);
6406
6407         /* MTU + ethernet header + FCS + optional VLAN tag */
6408         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6409
6410         /* The slot time is changed by tg3_setup_phy if we
6411          * run at gigabit with half duplex.
6412          */
6413         tw32(MAC_TX_LENGTHS,
6414              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6415              (6 << TX_LENGTHS_IPG_SHIFT) |
6416              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6417
6418         /* Receive rules. */
6419         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6420         tw32(RCVLPC_CONFIG, 0x0181);
6421
6422         /* Calculate RDMAC_MODE setting early, we need it to determine
6423          * the RCVLPC_STATE_ENABLE mask.
6424          */
6425         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6426                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6427                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6428                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6429                       RDMAC_MODE_LNGREAD_ENAB);
6430
6431         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6432                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6433                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6434                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6435
6436         /* If statement applies to 5705 and 5750 PCI devices only */
6437         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6438              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6439             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6440                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6441                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6442                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6443                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6444                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6445                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6446                 }
6447         }
6448
6449         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6450                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6451
6452         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6453                 rdmac_mode |= (1 << 27);
6454
6455         /* Receive/send statistics. */
6456         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6457                 val = tr32(RCVLPC_STATS_ENABLE);
6458                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6459                 tw32(RCVLPC_STATS_ENABLE, val);
6460         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6461                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6462                 val = tr32(RCVLPC_STATS_ENABLE);
6463                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6464                 tw32(RCVLPC_STATS_ENABLE, val);
6465         } else {
6466                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6467         }
6468         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6469         tw32(SNDDATAI_STATSENAB, 0xffffff);
6470         tw32(SNDDATAI_STATSCTRL,
6471              (SNDDATAI_SCTRL_ENABLE |
6472               SNDDATAI_SCTRL_FASTUPD));
6473
6474         /* Setup host coalescing engine. */
6475         tw32(HOSTCC_MODE, 0);
6476         for (i = 0; i < 2000; i++) {
6477                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6478                         break;
6479                 udelay(10);
6480         }
6481
6482         __tg3_set_coalesce(tp, &tp->coal);
6483
6484         /* set status block DMA address */
6485         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6486              ((u64) tp->status_mapping >> 32));
6487         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6488              ((u64) tp->status_mapping & 0xffffffff));
6489
6490         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6491                 /* Status/statistics block address.  See tg3_timer,
6492                  * the tg3_periodic_fetch_stats call there, and
6493                  * tg3_get_stats to see how this works for 5705/5750 chips.
6494                  */
6495                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6496                      ((u64) tp->stats_mapping >> 32));
6497                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6498                      ((u64) tp->stats_mapping & 0xffffffff));
6499                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6500                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6501         }
6502
6503         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6504
6505         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6506         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6507         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6508                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6509
6510         /* Clear statistics/status block in chip, and status block in ram. */
6511         for (i = NIC_SRAM_STATS_BLK;
6512              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6513              i += sizeof(u32)) {
6514                 tg3_write_mem(tp, i, 0);
6515                 udelay(40);
6516         }
6517         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6518
6519         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6520                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6521                 /* reset to prevent losing 1st rx packet intermittently */
6522                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6523                 udelay(10);
6524         }
6525
6526         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6527                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6528         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6529             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6530             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6531                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6532         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6533         udelay(40);
6534
6535         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6536          * If TG3_FLG2_IS_NIC is zero, we should read the
6537          * register to preserve the GPIO settings for LOMs. The GPIOs,
6538          * whether used as inputs or outputs, are set by boot code after
6539          * reset.
6540          */
6541         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6542                 u32 gpio_mask;
6543
6544                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6545                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6546                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6547
6548                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6549                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6550                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6551
6552                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6553                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6554
6555                 tp->grc_local_ctrl &= ~gpio_mask;
6556                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6557
6558                 /* GPIO1 must be driven high for eeprom write protect */
6559                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6560                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6561                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6562         }
6563         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6564         udelay(100);
6565
6566         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6567         tp->last_tag = 0;
6568
6569         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6570                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6571                 udelay(40);
6572         }
6573
6574         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6575                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6576                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6577                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6578                WDMAC_MODE_LNGREAD_ENAB);
6579
6580         /* If statement applies to 5705 and 5750 PCI devices only */
6581         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6582              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6583             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6584                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6585                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6586                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6587                         /* nothing */
6588                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6589                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6590                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6591                         val |= WDMAC_MODE_RX_ACCEL;
6592                 }
6593         }
6594
6595         /* Enable host coalescing bug fix */
6596         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6597             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6598             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6599                 val |= (1 << 29);
6600
6601         tw32_f(WDMAC_MODE, val);
6602         udelay(40);
6603
6604         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6605                 u16 pcix_cmd;
6606
6607                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6608                                      &pcix_cmd);
6609                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6610                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6611                         pcix_cmd |= PCI_X_CMD_READ_2K;
6612                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6613                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6614                         pcix_cmd |= PCI_X_CMD_READ_2K;
6615                 }
6616                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6617                                       pcix_cmd);
6618         }
6619
6620         tw32_f(RDMAC_MODE, rdmac_mode);
6621         udelay(40);
6622
6623         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6624         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6625                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6626         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6627         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6628         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6629         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6630         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6631         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6632                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6633         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6634         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6635
6636         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6637                 err = tg3_load_5701_a0_firmware_fix(tp);
6638                 if (err)
6639                         return err;
6640         }
6641
6642         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6643                 err = tg3_load_tso_firmware(tp);
6644                 if (err)
6645                         return err;
6646         }
6647
6648         tp->tx_mode = TX_MODE_ENABLE;
6649         tw32_f(MAC_TX_MODE, tp->tx_mode);
6650         udelay(100);
6651
6652         tp->rx_mode = RX_MODE_ENABLE;
6653         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6654                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6655
6656         tw32_f(MAC_RX_MODE, tp->rx_mode);
6657         udelay(10);
6658
6659         if (tp->link_config.phy_is_low_power) {
6660                 tp->link_config.phy_is_low_power = 0;
6661                 tp->link_config.speed = tp->link_config.orig_speed;
6662                 tp->link_config.duplex = tp->link_config.orig_duplex;
6663                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6664         }
6665
6666         tp->mi_mode = MAC_MI_MODE_BASE;
6667         tw32_f(MAC_MI_MODE, tp->mi_mode);
6668         udelay(80);
6669
6670         tw32(MAC_LED_CTRL, tp->led_ctrl);
6671
6672         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6673         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6674                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6675                 udelay(10);
6676         }
6677         tw32_f(MAC_RX_MODE, tp->rx_mode);
6678         udelay(10);
6679
6680         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6681                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6682                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6683                         /* Set drive transmission level to 1.2V  */
6684                         /* only if the signal pre-emphasis bit is not set  */
6685                         val = tr32(MAC_SERDES_CFG);
6686                         val &= 0xfffff000;
6687                         val |= 0x880;
6688                         tw32(MAC_SERDES_CFG, val);
6689                 }
6690                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6691                         tw32(MAC_SERDES_CFG, 0x616000);
6692         }
6693
6694         /* Prevent chip from dropping frames when flow control
6695          * is enabled.
6696          */
6697         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6698
6699         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6700             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6701                 /* Use hardware link auto-negotiation */
6702                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6703         }
6704
6705         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6706             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6707                 u32 tmp;
6708
6709                 tmp = tr32(SERDES_RX_CTRL);
6710                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6711                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6712                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6713                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6714         }
6715
6716         err = tg3_setup_phy(tp, 0);
6717         if (err)
6718                 return err;
6719
6720         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6721             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6722                 u32 tmp;
6723
6724                 /* Clear CRC stats. */
6725                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6726                         tg3_writephy(tp, MII_TG3_TEST1,
6727                                      tmp | MII_TG3_TEST1_CRC_EN);
6728                         tg3_readphy(tp, 0x14, &tmp);
6729                 }
6730         }
6731
6732         __tg3_set_rx_mode(tp->dev);
6733
6734         /* Initialize receive rules. */
6735         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6736         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6737         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6738         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6739
6740         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6741             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6742                 limit = 8;
6743         else
6744                 limit = 16;
6745         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6746                 limit -= 4;
6747         switch (limit) {
6748         case 16:
6749                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6750         case 15:
6751                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6752         case 14:
6753                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6754         case 13:
6755                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6756         case 12:
6757                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6758         case 11:
6759                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6760         case 10:
6761                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6762         case 9:
6763                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6764         case 8:
6765                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6766         case 7:
6767                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6768         case 6:
6769                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6770         case 5:
6771                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6772         case 4:
6773                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6774         case 3:
6775                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6776         case 2:
6777         case 1:
6778
6779         default:
6780                 break;
6781         };
6782
6783         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6784
6785         return 0;
6786 }
6787
6788 /* Called at device open time to get the chip ready for
6789  * packet processing.  Invoked with tp->lock held.
6790  */
6791 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6792 {
6793         int err;
6794
6795         /* Force the chip into D0. */
6796         err = tg3_set_power_state(tp, PCI_D0);
6797         if (err)
6798                 goto out;
6799
6800         tg3_switch_clocks(tp);
6801
6802         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6803
6804         err = tg3_reset_hw(tp, reset_phy);
6805
6806 out:
6807         return err;
6808 }
6809
6810 #define TG3_STAT_ADD32(PSTAT, REG) \
6811 do {    u32 __val = tr32(REG); \
6812         (PSTAT)->low += __val; \
6813         if ((PSTAT)->low < __val) \
6814                 (PSTAT)->high += 1; \
6815 } while (0)
6816
6817 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6818 {
6819         struct tg3_hw_stats *sp = tp->hw_stats;
6820
6821         if (!netif_carrier_ok(tp->dev))
6822                 return;
6823
6824         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6825         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6826         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6827         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6828         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6829         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6830         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6831         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6832         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6833         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6834         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6835         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6836         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6837
6838         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6839         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6840         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6841         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6842         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6843         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6844         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6845         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6846         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6847         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6848         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6849         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6850         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6851         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6852
6853         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6854         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6855         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6856 }
6857
6858 static void tg3_timer(unsigned long __opaque)
6859 {
6860         struct tg3 *tp = (struct tg3 *) __opaque;
6861
6862         if (tp->irq_sync)
6863                 goto restart_timer;
6864
6865         spin_lock(&tp->lock);
6866
6867         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6868                 /* All of this garbage is because when using non-tagged
6869                  * IRQ status the mailbox/status_block protocol the chip
6870                  * uses with the cpu is race prone.
6871                  */
6872                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6873                         tw32(GRC_LOCAL_CTRL,
6874                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6875                 } else {
6876                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6877                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6878                 }
6879
6880                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6881                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6882                         spin_unlock(&tp->lock);
6883                         schedule_work(&tp->reset_task);
6884                         return;
6885                 }
6886         }
6887
6888         /* This part only runs once per second. */
6889         if (!--tp->timer_counter) {
6890                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6891                         tg3_periodic_fetch_stats(tp);
6892
6893                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6894                         u32 mac_stat;
6895                         int phy_event;
6896
6897                         mac_stat = tr32(MAC_STATUS);
6898
6899                         phy_event = 0;
6900                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6901                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6902                                         phy_event = 1;
6903                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6904                                 phy_event = 1;
6905
6906                         if (phy_event)
6907                                 tg3_setup_phy(tp, 0);
6908                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6909                         u32 mac_stat = tr32(MAC_STATUS);
6910                         int need_setup = 0;
6911
6912                         if (netif_carrier_ok(tp->dev) &&
6913                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6914                                 need_setup = 1;
6915                         }
6916                         if (! netif_carrier_ok(tp->dev) &&
6917                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6918                                          MAC_STATUS_SIGNAL_DET))) {
6919                                 need_setup = 1;
6920                         }
6921                         if (need_setup) {
6922                                 if (!tp->serdes_counter) {
6923                                         tw32_f(MAC_MODE,
6924                                              (tp->mac_mode &
6925                                               ~MAC_MODE_PORT_MODE_MASK));
6926                                         udelay(40);
6927                                         tw32_f(MAC_MODE, tp->mac_mode);
6928                                         udelay(40);
6929                                 }
6930                                 tg3_setup_phy(tp, 0);
6931                         }
6932                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6933                         tg3_serdes_parallel_detect(tp);
6934
6935                 tp->timer_counter = tp->timer_multiplier;
6936         }
6937
6938         /* Heartbeat is only sent once every 2 seconds.
6939          *
6940          * The heartbeat is to tell the ASF firmware that the host
6941          * driver is still alive.  In the event that the OS crashes,
6942          * ASF needs to reset the hardware to free up the FIFO space
6943          * that may be filled with rx packets destined for the host.
6944          * If the FIFO is full, ASF will no longer function properly.
6945          *
6946          * Unintended resets have been reported on real time kernels
6947          * where the timer doesn't run on time.  Netpoll will also have
6948          * same problem.
6949          *
6950          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6951          * to check the ring condition when the heartbeat is expiring
6952          * before doing the reset.  This will prevent most unintended
6953          * resets.
6954          */
6955         if (!--tp->asf_counter) {
6956                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6957                         u32 val;
6958
6959                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6960                                       FWCMD_NICDRV_ALIVE3);
6961                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6962                         /* 5 seconds timeout */
6963                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6964                         val = tr32(GRC_RX_CPU_EVENT);
6965                         val |= (1 << 14);
6966                         tw32(GRC_RX_CPU_EVENT, val);
6967                 }
6968                 tp->asf_counter = tp->asf_multiplier;
6969         }
6970
6971         spin_unlock(&tp->lock);
6972
6973 restart_timer:
6974         tp->timer.expires = jiffies + tp->timer_offset;
6975         add_timer(&tp->timer);
6976 }
6977
6978 static int tg3_request_irq(struct tg3 *tp)
6979 {
6980         irq_handler_t fn;
6981         unsigned long flags;
6982         struct net_device *dev = tp->dev;
6983
6984         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6985                 fn = tg3_msi;
6986                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6987                         fn = tg3_msi_1shot;
6988                 flags = IRQF_SAMPLE_RANDOM;
6989         } else {
6990                 fn = tg3_interrupt;
6991                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6992                         fn = tg3_interrupt_tagged;
6993                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6994         }
6995         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6996 }
6997
6998 static int tg3_test_interrupt(struct tg3 *tp)
6999 {
7000         struct net_device *dev = tp->dev;
7001         int err, i, intr_ok = 0;
7002
7003         if (!netif_running(dev))
7004                 return -ENODEV;
7005
7006         tg3_disable_ints(tp);
7007
7008         free_irq(tp->pdev->irq, dev);
7009
7010         err = request_irq(tp->pdev->irq, tg3_test_isr,
7011                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7012         if (err)
7013                 return err;
7014
7015         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7016         tg3_enable_ints(tp);
7017
7018         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7019                HOSTCC_MODE_NOW);
7020
7021         for (i = 0; i < 5; i++) {
7022                 u32 int_mbox, misc_host_ctrl;
7023
7024                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7025                                         TG3_64BIT_REG_LOW);
7026                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7027
7028                 if ((int_mbox != 0) ||
7029                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7030                         intr_ok = 1;
7031                         break;
7032                 }
7033
7034                 msleep(10);
7035         }
7036
7037         tg3_disable_ints(tp);
7038
7039         free_irq(tp->pdev->irq, dev);
7040
7041         err = tg3_request_irq(tp);
7042
7043         if (err)
7044                 return err;
7045
7046         if (intr_ok)
7047                 return 0;
7048
7049         return -EIO;
7050 }
7051
7052 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7053  * successfully restored
7054  */
7055 static int tg3_test_msi(struct tg3 *tp)
7056 {
7057         struct net_device *dev = tp->dev;
7058         int err;
7059         u16 pci_cmd;
7060
7061         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7062                 return 0;
7063
7064         /* Turn off SERR reporting in case MSI terminates with Master
7065          * Abort.
7066          */
7067         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7068         pci_write_config_word(tp->pdev, PCI_COMMAND,
7069                               pci_cmd & ~PCI_COMMAND_SERR);
7070
7071         err = tg3_test_interrupt(tp);
7072
7073         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7074
7075         if (!err)
7076                 return 0;
7077
7078         /* other failures */
7079         if (err != -EIO)
7080                 return err;
7081
7082         /* MSI test failed, go back to INTx mode */
7083         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7084                "switching to INTx mode. Please report this failure to "
7085                "the PCI maintainer and include system chipset information.\n",
7086                        tp->dev->name);
7087
7088         free_irq(tp->pdev->irq, dev);
7089         pci_disable_msi(tp->pdev);
7090
7091         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7092
7093         err = tg3_request_irq(tp);
7094         if (err)
7095                 return err;
7096
7097         /* Need to reset the chip because the MSI cycle may have terminated
7098          * with Master Abort.
7099          */
7100         tg3_full_lock(tp, 1);
7101
7102         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7103         err = tg3_init_hw(tp, 1);
7104
7105         tg3_full_unlock(tp);
7106
7107         if (err)
7108                 free_irq(tp->pdev->irq, dev);
7109
7110         return err;
7111 }
7112
7113 static int tg3_open(struct net_device *dev)
7114 {
7115         struct tg3 *tp = netdev_priv(dev);
7116         int err;
7117
7118         netif_carrier_off(tp->dev);
7119
7120         tg3_full_lock(tp, 0);
7121
7122         err = tg3_set_power_state(tp, PCI_D0);
7123         if (err) {
7124                 tg3_full_unlock(tp);
7125                 return err;
7126         }
7127
7128         tg3_disable_ints(tp);
7129         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7130
7131         tg3_full_unlock(tp);
7132
7133         /* The placement of this call is tied
7134          * to the setup and use of Host TX descriptors.
7135          */
7136         err = tg3_alloc_consistent(tp);
7137         if (err)
7138                 return err;
7139
7140         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7141                 /* All MSI supporting chips should support tagged
7142                  * status.  Assert that this is the case.
7143                  */
7144                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7145                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7146                                "Not using MSI.\n", tp->dev->name);
7147                 } else if (pci_enable_msi(tp->pdev) == 0) {
7148                         u32 msi_mode;
7149
7150                         /* Hardware bug - MSI won't work if INTX disabled. */
7151                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7152                                 pci_intx(tp->pdev, 1);
7153
7154                         msi_mode = tr32(MSGINT_MODE);
7155                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7156                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7157                 }
7158         }
7159         err = tg3_request_irq(tp);
7160
7161         if (err) {
7162                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7163                         pci_disable_msi(tp->pdev);
7164                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7165                 }
7166                 tg3_free_consistent(tp);
7167                 return err;
7168         }
7169
7170         napi_enable(&tp->napi);
7171
7172         tg3_full_lock(tp, 0);
7173
7174         err = tg3_init_hw(tp, 1);
7175         if (err) {
7176                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7177                 tg3_free_rings(tp);
7178         } else {
7179                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7180                         tp->timer_offset = HZ;
7181                 else
7182                         tp->timer_offset = HZ / 10;
7183
7184                 BUG_ON(tp->timer_offset > HZ);
7185                 tp->timer_counter = tp->timer_multiplier =
7186                         (HZ / tp->timer_offset);
7187                 tp->asf_counter = tp->asf_multiplier =
7188                         ((HZ / tp->timer_offset) * 2);
7189
7190                 init_timer(&tp->timer);
7191                 tp->timer.expires = jiffies + tp->timer_offset;
7192                 tp->timer.data = (unsigned long) tp;
7193                 tp->timer.function = tg3_timer;
7194         }
7195
7196         tg3_full_unlock(tp);
7197
7198         if (err) {
7199                 napi_disable(&tp->napi);
7200                 free_irq(tp->pdev->irq, dev);
7201                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7202                         pci_disable_msi(tp->pdev);
7203                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7204                 }
7205                 tg3_free_consistent(tp);
7206                 return err;
7207         }
7208
7209         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7210                 err = tg3_test_msi(tp);
7211
7212                 if (err) {
7213                         tg3_full_lock(tp, 0);
7214
7215                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7216                                 pci_disable_msi(tp->pdev);
7217                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7218                         }
7219                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7220                         tg3_free_rings(tp);
7221                         tg3_free_consistent(tp);
7222
7223                         tg3_full_unlock(tp);
7224
7225                         napi_disable(&tp->napi);
7226
7227                         return err;
7228                 }
7229
7230                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7231                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7232                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7233
7234                                 tw32(PCIE_TRANSACTION_CFG,
7235                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7236                         }
7237                 }
7238         }
7239
7240         tg3_full_lock(tp, 0);
7241
7242         add_timer(&tp->timer);
7243         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7244         tg3_enable_ints(tp);
7245
7246         tg3_full_unlock(tp);
7247
7248         netif_start_queue(dev);
7249
7250         return 0;
7251 }
7252
7253 #if 0
7254 /*static*/ void tg3_dump_state(struct tg3 *tp)
7255 {
7256         u32 val32, val32_2, val32_3, val32_4, val32_5;
7257         u16 val16;
7258         int i;
7259
7260         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7261         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7262         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7263                val16, val32);
7264
7265         /* MAC block */
7266         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7267                tr32(MAC_MODE), tr32(MAC_STATUS));
7268         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7269                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7270         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7271                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7272         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7273                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7274
7275         /* Send data initiator control block */
7276         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7277                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7278         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7279                tr32(SNDDATAI_STATSCTRL));
7280
7281         /* Send data completion control block */
7282         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7283
7284         /* Send BD ring selector block */
7285         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7286                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7287
7288         /* Send BD initiator control block */
7289         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7290                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7291
7292         /* Send BD completion control block */
7293         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7294
7295         /* Receive list placement control block */
7296         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7297                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7298         printk("       RCVLPC_STATSCTRL[%08x]\n",
7299                tr32(RCVLPC_STATSCTRL));
7300
7301         /* Receive data and receive BD initiator control block */
7302         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7303                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7304
7305         /* Receive data completion control block */
7306         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7307                tr32(RCVDCC_MODE));
7308
7309         /* Receive BD initiator control block */
7310         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7311                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7312
7313         /* Receive BD completion control block */
7314         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7315                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7316
7317         /* Receive list selector control block */
7318         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7319                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7320
7321         /* Mbuf cluster free block */
7322         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7323                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7324
7325         /* Host coalescing control block */
7326         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7327                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7328         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7329                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7330                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7331         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7332                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7333                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7334         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7335                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7336         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7337                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7338
7339         /* Memory arbiter control block */
7340         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7341                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7342
7343         /* Buffer manager control block */
7344         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7345                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7346         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7347                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7348         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7349                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7350                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7351                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7352
7353         /* Read DMA control block */
7354         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7355                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7356
7357         /* Write DMA control block */
7358         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7359                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7360
7361         /* DMA completion block */
7362         printk("DEBUG: DMAC_MODE[%08x]\n",
7363                tr32(DMAC_MODE));
7364
7365         /* GRC block */
7366         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7367                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7368         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7369                tr32(GRC_LOCAL_CTRL));
7370
7371         /* TG3_BDINFOs */
7372         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7373                tr32(RCVDBDI_JUMBO_BD + 0x0),
7374                tr32(RCVDBDI_JUMBO_BD + 0x4),
7375                tr32(RCVDBDI_JUMBO_BD + 0x8),
7376                tr32(RCVDBDI_JUMBO_BD + 0xc));
7377         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7378                tr32(RCVDBDI_STD_BD + 0x0),
7379                tr32(RCVDBDI_STD_BD + 0x4),
7380                tr32(RCVDBDI_STD_BD + 0x8),
7381                tr32(RCVDBDI_STD_BD + 0xc));
7382         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7383                tr32(RCVDBDI_MINI_BD + 0x0),
7384                tr32(RCVDBDI_MINI_BD + 0x4),
7385                tr32(RCVDBDI_MINI_BD + 0x8),
7386                tr32(RCVDBDI_MINI_BD + 0xc));
7387
7388         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7389         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7390         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7391         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7392         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7393                val32, val32_2, val32_3, val32_4);
7394
7395         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7396         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7397         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7398         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7399         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7400                val32, val32_2, val32_3, val32_4);
7401
7402         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7403         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7404         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7405         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7406         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7407         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7408                val32, val32_2, val32_3, val32_4, val32_5);
7409
7410         /* SW status block */
7411         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7412                tp->hw_status->status,
7413                tp->hw_status->status_tag,
7414                tp->hw_status->rx_jumbo_consumer,
7415                tp->hw_status->rx_consumer,
7416                tp->hw_status->rx_mini_consumer,
7417                tp->hw_status->idx[0].rx_producer,
7418                tp->hw_status->idx[0].tx_consumer);
7419
7420         /* SW statistics block */
7421         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7422                ((u32 *)tp->hw_stats)[0],
7423                ((u32 *)tp->hw_stats)[1],
7424                ((u32 *)tp->hw_stats)[2],
7425                ((u32 *)tp->hw_stats)[3]);
7426
7427         /* Mailboxes */
7428         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7429                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7430                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7431                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7432                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7433
7434         /* NIC side send descriptors. */
7435         for (i = 0; i < 6; i++) {
7436                 unsigned long txd;
7437
7438                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7439                         + (i * sizeof(struct tg3_tx_buffer_desc));
7440                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7441                        i,
7442                        readl(txd + 0x0), readl(txd + 0x4),
7443                        readl(txd + 0x8), readl(txd + 0xc));
7444         }
7445
7446         /* NIC side RX descriptors. */
7447         for (i = 0; i < 6; i++) {
7448                 unsigned long rxd;
7449
7450                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7451                         + (i * sizeof(struct tg3_rx_buffer_desc));
7452                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7453                        i,
7454                        readl(rxd + 0x0), readl(rxd + 0x4),
7455                        readl(rxd + 0x8), readl(rxd + 0xc));
7456                 rxd += (4 * sizeof(u32));
7457                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7458                        i,
7459                        readl(rxd + 0x0), readl(rxd + 0x4),
7460                        readl(rxd + 0x8), readl(rxd + 0xc));
7461         }
7462
7463         for (i = 0; i < 6; i++) {
7464                 unsigned long rxd;
7465
7466                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7467                         + (i * sizeof(struct tg3_rx_buffer_desc));
7468                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7469                        i,
7470                        readl(rxd + 0x0), readl(rxd + 0x4),
7471                        readl(rxd + 0x8), readl(rxd + 0xc));
7472                 rxd += (4 * sizeof(u32));
7473                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7474                        i,
7475                        readl(rxd + 0x0), readl(rxd + 0x4),
7476                        readl(rxd + 0x8), readl(rxd + 0xc));
7477         }
7478 }
7479 #endif
7480
7481 static struct net_device_stats *tg3_get_stats(struct net_device *);
7482 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7483
7484 static int tg3_close(struct net_device *dev)
7485 {
7486         struct tg3 *tp = netdev_priv(dev);
7487
7488         napi_disable(&tp->napi);
7489         cancel_work_sync(&tp->reset_task);
7490
7491         netif_stop_queue(dev);
7492
7493         del_timer_sync(&tp->timer);
7494
7495         tg3_full_lock(tp, 1);
7496 #if 0
7497         tg3_dump_state(tp);
7498 #endif
7499
7500         tg3_disable_ints(tp);
7501
7502         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7503         tg3_free_rings(tp);
7504         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7505
7506         tg3_full_unlock(tp);
7507
7508         free_irq(tp->pdev->irq, dev);
7509         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7510                 pci_disable_msi(tp->pdev);
7511                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7512         }
7513
7514         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7515                sizeof(tp->net_stats_prev));
7516         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7517                sizeof(tp->estats_prev));
7518
7519         tg3_free_consistent(tp);
7520
7521         tg3_set_power_state(tp, PCI_D3hot);
7522
7523         netif_carrier_off(tp->dev);
7524
7525         return 0;
7526 }
7527
7528 static inline unsigned long get_stat64(tg3_stat64_t *val)
7529 {
7530         unsigned long ret;
7531
7532 #if (BITS_PER_LONG == 32)
7533         ret = val->low;
7534 #else
7535         ret = ((u64)val->high << 32) | ((u64)val->low);
7536 #endif
7537         return ret;
7538 }
7539
7540 static unsigned long calc_crc_errors(struct tg3 *tp)
7541 {
7542         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7543
7544         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7545             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7546              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7547                 u32 val;
7548
7549                 spin_lock_bh(&tp->lock);
7550                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7551                         tg3_writephy(tp, MII_TG3_TEST1,
7552                                      val | MII_TG3_TEST1_CRC_EN);
7553                         tg3_readphy(tp, 0x14, &val);
7554                 } else
7555                         val = 0;
7556                 spin_unlock_bh(&tp->lock);
7557
7558                 tp->phy_crc_errors += val;
7559
7560                 return tp->phy_crc_errors;
7561         }
7562
7563         return get_stat64(&hw_stats->rx_fcs_errors);
7564 }
7565
7566 #define ESTAT_ADD(member) \
7567         estats->member =        old_estats->member + \
7568                                 get_stat64(&hw_stats->member)
7569
7570 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7571 {
7572         struct tg3_ethtool_stats *estats = &tp->estats;
7573         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7574         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7575
7576         if (!hw_stats)
7577                 return old_estats;
7578
7579         ESTAT_ADD(rx_octets);
7580         ESTAT_ADD(rx_fragments);
7581         ESTAT_ADD(rx_ucast_packets);
7582         ESTAT_ADD(rx_mcast_packets);
7583         ESTAT_ADD(rx_bcast_packets);
7584         ESTAT_ADD(rx_fcs_errors);
7585         ESTAT_ADD(rx_align_errors);
7586         ESTAT_ADD(rx_xon_pause_rcvd);
7587         ESTAT_ADD(rx_xoff_pause_rcvd);
7588         ESTAT_ADD(rx_mac_ctrl_rcvd);
7589         ESTAT_ADD(rx_xoff_entered);
7590         ESTAT_ADD(rx_frame_too_long_errors);
7591         ESTAT_ADD(rx_jabbers);
7592         ESTAT_ADD(rx_undersize_packets);
7593         ESTAT_ADD(rx_in_length_errors);
7594         ESTAT_ADD(rx_out_length_errors);
7595         ESTAT_ADD(rx_64_or_less_octet_packets);
7596         ESTAT_ADD(rx_65_to_127_octet_packets);
7597         ESTAT_ADD(rx_128_to_255_octet_packets);
7598         ESTAT_ADD(rx_256_to_511_octet_packets);
7599         ESTAT_ADD(rx_512_to_1023_octet_packets);
7600         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7601         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7602         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7603         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7604         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7605
7606         ESTAT_ADD(tx_octets);
7607         ESTAT_ADD(tx_collisions);
7608         ESTAT_ADD(tx_xon_sent);
7609         ESTAT_ADD(tx_xoff_sent);
7610         ESTAT_ADD(tx_flow_control);
7611         ESTAT_ADD(tx_mac_errors);
7612         ESTAT_ADD(tx_single_collisions);
7613         ESTAT_ADD(tx_mult_collisions);
7614         ESTAT_ADD(tx_deferred);
7615         ESTAT_ADD(tx_excessive_collisions);
7616         ESTAT_ADD(tx_late_collisions);
7617         ESTAT_ADD(tx_collide_2times);
7618         ESTAT_ADD(tx_collide_3times);
7619         ESTAT_ADD(tx_collide_4times);
7620         ESTAT_ADD(tx_collide_5times);
7621         ESTAT_ADD(tx_collide_6times);
7622         ESTAT_ADD(tx_collide_7times);
7623         ESTAT_ADD(tx_collide_8times);
7624         ESTAT_ADD(tx_collide_9times);
7625         ESTAT_ADD(tx_collide_10times);
7626         ESTAT_ADD(tx_collide_11times);
7627         ESTAT_ADD(tx_collide_12times);
7628         ESTAT_ADD(tx_collide_13times);
7629         ESTAT_ADD(tx_collide_14times);
7630         ESTAT_ADD(tx_collide_15times);
7631         ESTAT_ADD(tx_ucast_packets);
7632         ESTAT_ADD(tx_mcast_packets);
7633         ESTAT_ADD(tx_bcast_packets);
7634         ESTAT_ADD(tx_carrier_sense_errors);
7635         ESTAT_ADD(tx_discards);
7636         ESTAT_ADD(tx_errors);
7637
7638         ESTAT_ADD(dma_writeq_full);
7639         ESTAT_ADD(dma_write_prioq_full);
7640         ESTAT_ADD(rxbds_empty);
7641         ESTAT_ADD(rx_discards);
7642         ESTAT_ADD(rx_errors);
7643         ESTAT_ADD(rx_threshold_hit);
7644
7645         ESTAT_ADD(dma_readq_full);
7646         ESTAT_ADD(dma_read_prioq_full);
7647         ESTAT_ADD(tx_comp_queue_full);
7648
7649         ESTAT_ADD(ring_set_send_prod_index);
7650         ESTAT_ADD(ring_status_update);
7651         ESTAT_ADD(nic_irqs);
7652         ESTAT_ADD(nic_avoided_irqs);
7653         ESTAT_ADD(nic_tx_threshold_hit);
7654
7655         return estats;
7656 }
7657
7658 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7659 {
7660         struct tg3 *tp = netdev_priv(dev);
7661         struct net_device_stats *stats = &tp->net_stats;
7662         struct net_device_stats *old_stats = &tp->net_stats_prev;
7663         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7664
7665         if (!hw_stats)
7666                 return old_stats;
7667
7668         stats->rx_packets = old_stats->rx_packets +
7669                 get_stat64(&hw_stats->rx_ucast_packets) +
7670                 get_stat64(&hw_stats->rx_mcast_packets) +
7671                 get_stat64(&hw_stats->rx_bcast_packets);
7672
7673         stats->tx_packets = old_stats->tx_packets +
7674                 get_stat64(&hw_stats->tx_ucast_packets) +
7675                 get_stat64(&hw_stats->tx_mcast_packets) +
7676                 get_stat64(&hw_stats->tx_bcast_packets);
7677
7678         stats->rx_bytes = old_stats->rx_bytes +
7679                 get_stat64(&hw_stats->rx_octets);
7680         stats->tx_bytes = old_stats->tx_bytes +
7681                 get_stat64(&hw_stats->tx_octets);
7682
7683         stats->rx_errors = old_stats->rx_errors +
7684                 get_stat64(&hw_stats->rx_errors);
7685         stats->tx_errors = old_stats->tx_errors +
7686                 get_stat64(&hw_stats->tx_errors) +
7687                 get_stat64(&hw_stats->tx_mac_errors) +
7688                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7689                 get_stat64(&hw_stats->tx_discards);
7690
7691         stats->multicast = old_stats->multicast +
7692                 get_stat64(&hw_stats->rx_mcast_packets);
7693         stats->collisions = old_stats->collisions +
7694                 get_stat64(&hw_stats->tx_collisions);
7695
7696         stats->rx_length_errors = old_stats->rx_length_errors +
7697                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7698                 get_stat64(&hw_stats->rx_undersize_packets);
7699
7700         stats->rx_over_errors = old_stats->rx_over_errors +
7701                 get_stat64(&hw_stats->rxbds_empty);
7702         stats->rx_frame_errors = old_stats->rx_frame_errors +
7703                 get_stat64(&hw_stats->rx_align_errors);
7704         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7705                 get_stat64(&hw_stats->tx_discards);
7706         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7707                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7708
7709         stats->rx_crc_errors = old_stats->rx_crc_errors +
7710                 calc_crc_errors(tp);
7711
7712         stats->rx_missed_errors = old_stats->rx_missed_errors +
7713                 get_stat64(&hw_stats->rx_discards);
7714
7715         return stats;
7716 }
7717
7718 static inline u32 calc_crc(unsigned char *buf, int len)
7719 {
7720         u32 reg;
7721         u32 tmp;
7722         int j, k;
7723
7724         reg = 0xffffffff;
7725
7726         for (j = 0; j < len; j++) {
7727                 reg ^= buf[j];
7728
7729                 for (k = 0; k < 8; k++) {
7730                         tmp = reg & 0x01;
7731
7732                         reg >>= 1;
7733
7734                         if (tmp) {
7735                                 reg ^= 0xedb88320;
7736                         }
7737                 }
7738         }
7739
7740         return ~reg;
7741 }
7742
7743 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7744 {
7745         /* accept or reject all multicast frames */
7746         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7747         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7748         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7749         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7750 }
7751
7752 static void __tg3_set_rx_mode(struct net_device *dev)
7753 {
7754         struct tg3 *tp = netdev_priv(dev);
7755         u32 rx_mode;
7756
7757         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7758                                   RX_MODE_KEEP_VLAN_TAG);
7759
7760         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7761          * flag clear.
7762          */
7763 #if TG3_VLAN_TAG_USED
7764         if (!tp->vlgrp &&
7765             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7766                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7767 #else
7768         /* By definition, VLAN is disabled always in this
7769          * case.
7770          */
7771         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7772                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7773 #endif
7774
7775         if (dev->flags & IFF_PROMISC) {
7776                 /* Promiscuous mode. */
7777                 rx_mode |= RX_MODE_PROMISC;
7778         } else if (dev->flags & IFF_ALLMULTI) {
7779                 /* Accept all multicast. */
7780                 tg3_set_multi (tp, 1);
7781         } else if (dev->mc_count < 1) {
7782                 /* Reject all multicast. */
7783                 tg3_set_multi (tp, 0);
7784         } else {
7785                 /* Accept one or more multicast(s). */
7786                 struct dev_mc_list *mclist;
7787                 unsigned int i;
7788                 u32 mc_filter[4] = { 0, };
7789                 u32 regidx;
7790                 u32 bit;
7791                 u32 crc;
7792
7793                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7794                      i++, mclist = mclist->next) {
7795
7796                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7797                         bit = ~crc & 0x7f;
7798                         regidx = (bit & 0x60) >> 5;
7799                         bit &= 0x1f;
7800                         mc_filter[regidx] |= (1 << bit);
7801                 }
7802
7803                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7804                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7805                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7806                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7807         }
7808
7809         if (rx_mode != tp->rx_mode) {
7810                 tp->rx_mode = rx_mode;
7811                 tw32_f(MAC_RX_MODE, rx_mode);
7812                 udelay(10);
7813         }
7814 }
7815
7816 static void tg3_set_rx_mode(struct net_device *dev)
7817 {
7818         struct tg3 *tp = netdev_priv(dev);
7819
7820         if (!netif_running(dev))
7821                 return;
7822
7823         tg3_full_lock(tp, 0);
7824         __tg3_set_rx_mode(dev);
7825         tg3_full_unlock(tp);
7826 }
7827
7828 #define TG3_REGDUMP_LEN         (32 * 1024)
7829
7830 static int tg3_get_regs_len(struct net_device *dev)
7831 {
7832         return TG3_REGDUMP_LEN;
7833 }
7834
7835 static void tg3_get_regs(struct net_device *dev,
7836                 struct ethtool_regs *regs, void *_p)
7837 {
7838         u32 *p = _p;
7839         struct tg3 *tp = netdev_priv(dev);
7840         u8 *orig_p = _p;
7841         int i;
7842
7843         regs->version = 0;
7844
7845         memset(p, 0, TG3_REGDUMP_LEN);
7846
7847         if (tp->link_config.phy_is_low_power)
7848                 return;
7849
7850         tg3_full_lock(tp, 0);
7851
7852 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7853 #define GET_REG32_LOOP(base,len)                \
7854 do {    p = (u32 *)(orig_p + (base));           \
7855         for (i = 0; i < len; i += 4)            \
7856                 __GET_REG32((base) + i);        \
7857 } while (0)
7858 #define GET_REG32_1(reg)                        \
7859 do {    p = (u32 *)(orig_p + (reg));            \
7860         __GET_REG32((reg));                     \
7861 } while (0)
7862
7863         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7864         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7865         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7866         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7867         GET_REG32_1(SNDDATAC_MODE);
7868         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7869         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7870         GET_REG32_1(SNDBDC_MODE);
7871         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7872         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7873         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7874         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7875         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7876         GET_REG32_1(RCVDCC_MODE);
7877         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7878         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7879         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7880         GET_REG32_1(MBFREE_MODE);
7881         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7882         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7883         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7884         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7885         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7886         GET_REG32_1(RX_CPU_MODE);
7887         GET_REG32_1(RX_CPU_STATE);
7888         GET_REG32_1(RX_CPU_PGMCTR);
7889         GET_REG32_1(RX_CPU_HWBKPT);
7890         GET_REG32_1(TX_CPU_MODE);
7891         GET_REG32_1(TX_CPU_STATE);
7892         GET_REG32_1(TX_CPU_PGMCTR);
7893         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7894         GET_REG32_LOOP(FTQ_RESET, 0x120);
7895         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7896         GET_REG32_1(DMAC_MODE);
7897         GET_REG32_LOOP(GRC_MODE, 0x4c);
7898         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7899                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7900
7901 #undef __GET_REG32
7902 #undef GET_REG32_LOOP
7903 #undef GET_REG32_1
7904
7905         tg3_full_unlock(tp);
7906 }
7907
7908 static int tg3_get_eeprom_len(struct net_device *dev)
7909 {
7910         struct tg3 *tp = netdev_priv(dev);
7911
7912         return tp->nvram_size;
7913 }
7914
7915 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7916 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7917
7918 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7919 {
7920         struct tg3 *tp = netdev_priv(dev);
7921         int ret;
7922         u8  *pd;
7923         u32 i, offset, len, val, b_offset, b_count;
7924
7925         if (tp->link_config.phy_is_low_power)
7926                 return -EAGAIN;
7927
7928         offset = eeprom->offset;
7929         len = eeprom->len;
7930         eeprom->len = 0;
7931
7932         eeprom->magic = TG3_EEPROM_MAGIC;
7933
7934         if (offset & 3) {
7935                 /* adjustments to start on required 4 byte boundary */
7936                 b_offset = offset & 3;
7937                 b_count = 4 - b_offset;
7938                 if (b_count > len) {
7939                         /* i.e. offset=1 len=2 */
7940                         b_count = len;
7941                 }
7942                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7943                 if (ret)
7944                         return ret;
7945                 val = cpu_to_le32(val);
7946                 memcpy(data, ((char*)&val) + b_offset, b_count);
7947                 len -= b_count;
7948                 offset += b_count;
7949                 eeprom->len += b_count;
7950         }
7951
7952         /* read bytes upto the last 4 byte boundary */
7953         pd = &data[eeprom->len];
7954         for (i = 0; i < (len - (len & 3)); i += 4) {
7955                 ret = tg3_nvram_read(tp, offset + i, &val);
7956                 if (ret) {
7957                         eeprom->len += i;
7958                         return ret;
7959                 }
7960                 val = cpu_to_le32(val);
7961                 memcpy(pd + i, &val, 4);
7962         }
7963         eeprom->len += i;
7964
7965         if (len & 3) {
7966                 /* read last bytes not ending on 4 byte boundary */
7967                 pd = &data[eeprom->len];
7968                 b_count = len & 3;
7969                 b_offset = offset + len - b_count;
7970                 ret = tg3_nvram_read(tp, b_offset, &val);
7971                 if (ret)
7972                         return ret;
7973                 val = cpu_to_le32(val);
7974                 memcpy(pd, ((char*)&val), b_count);
7975                 eeprom->len += b_count;
7976         }
7977         return 0;
7978 }
7979
7980 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7981
7982 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7983 {
7984         struct tg3 *tp = netdev_priv(dev);
7985         int ret;
7986         u32 offset, len, b_offset, odd_len, start, end;
7987         u8 *buf;
7988
7989         if (tp->link_config.phy_is_low_power)
7990                 return -EAGAIN;
7991
7992         if (eeprom->magic != TG3_EEPROM_MAGIC)
7993                 return -EINVAL;
7994
7995         offset = eeprom->offset;
7996         len = eeprom->len;
7997
7998         if ((b_offset = (offset & 3))) {
7999                 /* adjustments to start on required 4 byte boundary */
8000                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8001                 if (ret)
8002                         return ret;
8003                 start = cpu_to_le32(start);
8004                 len += b_offset;
8005                 offset &= ~3;
8006                 if (len < 4)
8007                         len = 4;
8008         }
8009
8010         odd_len = 0;
8011         if (len & 3) {
8012                 /* adjustments to end on required 4 byte boundary */
8013                 odd_len = 1;
8014                 len = (len + 3) & ~3;
8015                 ret = tg3_nvram_read(tp, offset+len-4, &end);
8016                 if (ret)
8017                         return ret;
8018                 end = cpu_to_le32(end);
8019         }
8020
8021         buf = data;
8022         if (b_offset || odd_len) {
8023                 buf = kmalloc(len, GFP_KERNEL);
8024                 if (!buf)
8025                         return -ENOMEM;
8026                 if (b_offset)
8027                         memcpy(buf, &start, 4);
8028                 if (odd_len)
8029                         memcpy(buf+len-4, &end, 4);
8030                 memcpy(buf + b_offset, data, eeprom->len);
8031         }
8032
8033         ret = tg3_nvram_write_block(tp, offset, len, buf);
8034
8035         if (buf != data)
8036                 kfree(buf);
8037
8038         return ret;
8039 }
8040
8041 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8042 {
8043         struct tg3 *tp = netdev_priv(dev);
8044
8045         cmd->supported = (SUPPORTED_Autoneg);
8046
8047         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8048                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8049                                    SUPPORTED_1000baseT_Full);
8050
8051         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8052                 cmd->supported |= (SUPPORTED_100baseT_Half |
8053                                   SUPPORTED_100baseT_Full |
8054                                   SUPPORTED_10baseT_Half |
8055                                   SUPPORTED_10baseT_Full |
8056                                   SUPPORTED_MII);
8057                 cmd->port = PORT_TP;
8058         } else {
8059                 cmd->supported |= SUPPORTED_FIBRE;
8060                 cmd->port = PORT_FIBRE;
8061         }
8062
8063         cmd->advertising = tp->link_config.advertising;
8064         if (netif_running(dev)) {
8065                 cmd->speed = tp->link_config.active_speed;
8066                 cmd->duplex = tp->link_config.active_duplex;
8067         }
8068         cmd->phy_address = PHY_ADDR;
8069         cmd->transceiver = 0;
8070         cmd->autoneg = tp->link_config.autoneg;
8071         cmd->maxtxpkt = 0;
8072         cmd->maxrxpkt = 0;
8073         return 0;
8074 }
8075
8076 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8077 {
8078         struct tg3 *tp = netdev_priv(dev);
8079
8080         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8081                 /* These are the only valid advertisement bits allowed.  */
8082                 if (cmd->autoneg == AUTONEG_ENABLE &&
8083                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8084                                           ADVERTISED_1000baseT_Full |
8085                                           ADVERTISED_Autoneg |
8086                                           ADVERTISED_FIBRE)))
8087                         return -EINVAL;
8088                 /* Fiber can only do SPEED_1000.  */
8089                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8090                          (cmd->speed != SPEED_1000))
8091                         return -EINVAL;
8092         /* Copper cannot force SPEED_1000.  */
8093         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8094                    (cmd->speed == SPEED_1000))
8095                 return -EINVAL;
8096         else if ((cmd->speed == SPEED_1000) &&
8097                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8098                 return -EINVAL;
8099
8100         tg3_full_lock(tp, 0);
8101
8102         tp->link_config.autoneg = cmd->autoneg;
8103         if (cmd->autoneg == AUTONEG_ENABLE) {
8104                 tp->link_config.advertising = cmd->advertising;
8105                 tp->link_config.speed = SPEED_INVALID;
8106                 tp->link_config.duplex = DUPLEX_INVALID;
8107         } else {
8108                 tp->link_config.advertising = 0;
8109                 tp->link_config.speed = cmd->speed;
8110                 tp->link_config.duplex = cmd->duplex;
8111         }
8112
8113         tp->link_config.orig_speed = tp->link_config.speed;
8114         tp->link_config.orig_duplex = tp->link_config.duplex;
8115         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8116
8117         if (netif_running(dev))
8118                 tg3_setup_phy(tp, 1);
8119
8120         tg3_full_unlock(tp);
8121
8122         return 0;
8123 }
8124
8125 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8126 {
8127         struct tg3 *tp = netdev_priv(dev);
8128
8129         strcpy(info->driver, DRV_MODULE_NAME);
8130         strcpy(info->version, DRV_MODULE_VERSION);
8131         strcpy(info->fw_version, tp->fw_ver);
8132         strcpy(info->bus_info, pci_name(tp->pdev));
8133 }
8134
8135 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8136 {
8137         struct tg3 *tp = netdev_priv(dev);
8138
8139         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8140                 wol->supported = WAKE_MAGIC;
8141         else
8142                 wol->supported = 0;
8143         wol->wolopts = 0;
8144         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8145                 wol->wolopts = WAKE_MAGIC;
8146         memset(&wol->sopass, 0, sizeof(wol->sopass));
8147 }
8148
8149 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8150 {
8151         struct tg3 *tp = netdev_priv(dev);
8152
8153         if (wol->wolopts & ~WAKE_MAGIC)
8154                 return -EINVAL;
8155         if ((wol->wolopts & WAKE_MAGIC) &&
8156             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8157                 return -EINVAL;
8158
8159         spin_lock_bh(&tp->lock);
8160         if (wol->wolopts & WAKE_MAGIC)
8161                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8162         else
8163                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8164         spin_unlock_bh(&tp->lock);
8165
8166         return 0;
8167 }
8168
8169 static u32 tg3_get_msglevel(struct net_device *dev)
8170 {
8171         struct tg3 *tp = netdev_priv(dev);
8172         return tp->msg_enable;
8173 }
8174
8175 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8176 {
8177         struct tg3 *tp = netdev_priv(dev);
8178         tp->msg_enable = value;
8179 }
8180
8181 static int tg3_set_tso(struct net_device *dev, u32 value)
8182 {
8183         struct tg3 *tp = netdev_priv(dev);
8184
8185         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8186                 if (value)
8187                         return -EINVAL;
8188                 return 0;
8189         }
8190         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8191             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8192                 if (value)
8193                         dev->features |= NETIF_F_TSO6;
8194                 else
8195                         dev->features &= ~NETIF_F_TSO6;
8196         }
8197         return ethtool_op_set_tso(dev, value);
8198 }
8199
8200 static int tg3_nway_reset(struct net_device *dev)
8201 {
8202         struct tg3 *tp = netdev_priv(dev);
8203         u32 bmcr;
8204         int r;
8205
8206         if (!netif_running(dev))
8207                 return -EAGAIN;
8208
8209         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8210                 return -EINVAL;
8211
8212         spin_lock_bh(&tp->lock);
8213         r = -EINVAL;
8214         tg3_readphy(tp, MII_BMCR, &bmcr);
8215         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8216             ((bmcr & BMCR_ANENABLE) ||
8217              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8218                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8219                                            BMCR_ANENABLE);
8220                 r = 0;
8221         }
8222         spin_unlock_bh(&tp->lock);
8223
8224         return r;
8225 }
8226
8227 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8228 {
8229         struct tg3 *tp = netdev_priv(dev);
8230
8231         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8232         ering->rx_mini_max_pending = 0;
8233         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8234                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8235         else
8236                 ering->rx_jumbo_max_pending = 0;
8237
8238         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8239
8240         ering->rx_pending = tp->rx_pending;
8241         ering->rx_mini_pending = 0;
8242         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8243                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8244         else
8245                 ering->rx_jumbo_pending = 0;
8246
8247         ering->tx_pending = tp->tx_pending;
8248 }
8249
8250 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8251 {
8252         struct tg3 *tp = netdev_priv(dev);
8253         int irq_sync = 0, err = 0;
8254
8255         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8256             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8257             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8258             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8259             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8260              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8261                 return -EINVAL;
8262
8263         if (netif_running(dev)) {
8264                 tg3_netif_stop(tp);
8265                 irq_sync = 1;
8266         }
8267
8268         tg3_full_lock(tp, irq_sync);
8269
8270         tp->rx_pending = ering->rx_pending;
8271
8272         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8273             tp->rx_pending > 63)
8274                 tp->rx_pending = 63;
8275         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8276         tp->tx_pending = ering->tx_pending;
8277
8278         if (netif_running(dev)) {
8279                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8280                 err = tg3_restart_hw(tp, 1);
8281                 if (!err)
8282                         tg3_netif_start(tp);
8283         }
8284
8285         tg3_full_unlock(tp);
8286
8287         return err;
8288 }
8289
8290 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8291 {
8292         struct tg3 *tp = netdev_priv(dev);
8293
8294         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8295         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8296         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8297 }
8298
8299 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8300 {
8301         struct tg3 *tp = netdev_priv(dev);
8302         int irq_sync = 0, err = 0;
8303
8304         if (netif_running(dev)) {
8305                 tg3_netif_stop(tp);
8306                 irq_sync = 1;
8307         }
8308
8309         tg3_full_lock(tp, irq_sync);
8310
8311         if (epause->autoneg)
8312                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8313         else
8314                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8315         if (epause->rx_pause)
8316                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8317         else
8318                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8319         if (epause->tx_pause)
8320                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8321         else
8322                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8323
8324         if (netif_running(dev)) {
8325                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8326                 err = tg3_restart_hw(tp, 1);
8327                 if (!err)
8328                         tg3_netif_start(tp);
8329         }
8330
8331         tg3_full_unlock(tp);
8332
8333         return err;
8334 }
8335
8336 static u32 tg3_get_rx_csum(struct net_device *dev)
8337 {
8338         struct tg3 *tp = netdev_priv(dev);
8339         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8340 }
8341
8342 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8343 {
8344         struct tg3 *tp = netdev_priv(dev);
8345
8346         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8347                 if (data != 0)
8348                         return -EINVAL;
8349                 return 0;
8350         }
8351
8352         spin_lock_bh(&tp->lock);
8353         if (data)
8354                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8355         else
8356                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8357         spin_unlock_bh(&tp->lock);
8358
8359         return 0;
8360 }
8361
8362 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8363 {
8364         struct tg3 *tp = netdev_priv(dev);
8365
8366         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8367                 if (data != 0)
8368                         return -EINVAL;
8369                 return 0;
8370         }
8371
8372         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8373             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8374             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
8375                 ethtool_op_set_tx_ipv6_csum(dev, data);
8376         else
8377                 ethtool_op_set_tx_csum(dev, data);
8378
8379         return 0;
8380 }
8381
8382 static int tg3_get_sset_count (struct net_device *dev, int sset)
8383 {
8384         switch (sset) {
8385         case ETH_SS_TEST:
8386                 return TG3_NUM_TEST;
8387         case ETH_SS_STATS:
8388                 return TG3_NUM_STATS;
8389         default:
8390                 return -EOPNOTSUPP;
8391         }
8392 }
8393
8394 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8395 {
8396         switch (stringset) {
8397         case ETH_SS_STATS:
8398                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8399                 break;
8400         case ETH_SS_TEST:
8401                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8402                 break;
8403         default:
8404                 WARN_ON(1);     /* we need a WARN() */
8405                 break;
8406         }
8407 }
8408
8409 static int tg3_phys_id(struct net_device *dev, u32 data)
8410 {
8411         struct tg3 *tp = netdev_priv(dev);
8412         int i;
8413
8414         if (!netif_running(tp->dev))
8415                 return -EAGAIN;
8416
8417         if (data == 0)
8418                 data = 2;
8419
8420         for (i = 0; i < (data * 2); i++) {
8421                 if ((i % 2) == 0)
8422                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8423                                            LED_CTRL_1000MBPS_ON |
8424                                            LED_CTRL_100MBPS_ON |
8425                                            LED_CTRL_10MBPS_ON |
8426                                            LED_CTRL_TRAFFIC_OVERRIDE |
8427                                            LED_CTRL_TRAFFIC_BLINK |
8428                                            LED_CTRL_TRAFFIC_LED);
8429
8430                 else
8431                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8432                                            LED_CTRL_TRAFFIC_OVERRIDE);
8433
8434                 if (msleep_interruptible(500))
8435                         break;
8436         }
8437         tw32(MAC_LED_CTRL, tp->led_ctrl);
8438         return 0;
8439 }
8440
8441 static void tg3_get_ethtool_stats (struct net_device *dev,
8442                                    struct ethtool_stats *estats, u64 *tmp_stats)
8443 {
8444         struct tg3 *tp = netdev_priv(dev);
8445         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8446 }
8447
8448 #define NVRAM_TEST_SIZE 0x100
8449 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8450 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8451 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8452
8453 static int tg3_test_nvram(struct tg3 *tp)
8454 {
8455         u32 *buf, csum, magic;
8456         int i, j, k, err = 0, size;
8457
8458         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8459                 return -EIO;
8460
8461         if (magic == TG3_EEPROM_MAGIC)
8462                 size = NVRAM_TEST_SIZE;
8463         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8464                 if ((magic & 0xe00000) == 0x200000)
8465                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8466                 else
8467                         return 0;
8468         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8469                 size = NVRAM_SELFBOOT_HW_SIZE;
8470         else
8471                 return -EIO;
8472
8473         buf = kmalloc(size, GFP_KERNEL);
8474         if (buf == NULL)
8475                 return -ENOMEM;
8476
8477         err = -EIO;
8478         for (i = 0, j = 0; i < size; i += 4, j++) {
8479                 u32 val;
8480
8481                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8482                         break;
8483                 buf[j] = cpu_to_le32(val);
8484         }
8485         if (i < size)
8486                 goto out;
8487
8488         /* Selfboot format */
8489         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8490             TG3_EEPROM_MAGIC_FW) {
8491                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8492
8493                 for (i = 0; i < size; i++)
8494                         csum8 += buf8[i];
8495
8496                 if (csum8 == 0) {
8497                         err = 0;
8498                         goto out;
8499                 }
8500
8501                 err = -EIO;
8502                 goto out;
8503         }
8504
8505         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8506             TG3_EEPROM_MAGIC_HW) {
8507                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8508                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8509                 u8 *buf8 = (u8 *) buf;
8510
8511                 /* Separate the parity bits and the data bytes.  */
8512                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8513                         if ((i == 0) || (i == 8)) {
8514                                 int l;
8515                                 u8 msk;
8516
8517                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8518                                         parity[k++] = buf8[i] & msk;
8519                                 i++;
8520                         }
8521                         else if (i == 16) {
8522                                 int l;
8523                                 u8 msk;
8524
8525                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8526                                         parity[k++] = buf8[i] & msk;
8527                                 i++;
8528
8529                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8530                                         parity[k++] = buf8[i] & msk;
8531                                 i++;
8532                         }
8533                         data[j++] = buf8[i];
8534                 }
8535
8536                 err = -EIO;
8537                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8538                         u8 hw8 = hweight8(data[i]);
8539
8540                         if ((hw8 & 0x1) && parity[i])
8541                                 goto out;
8542                         else if (!(hw8 & 0x1) && !parity[i])
8543                                 goto out;
8544                 }
8545                 err = 0;
8546                 goto out;
8547         }
8548
8549         /* Bootstrap checksum at offset 0x10 */
8550         csum = calc_crc((unsigned char *) buf, 0x10);
8551         if(csum != cpu_to_le32(buf[0x10/4]))
8552                 goto out;
8553
8554         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8555         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8556         if (csum != cpu_to_le32(buf[0xfc/4]))
8557                  goto out;
8558
8559         err = 0;
8560
8561 out:
8562         kfree(buf);
8563         return err;
8564 }
8565
8566 #define TG3_SERDES_TIMEOUT_SEC  2
8567 #define TG3_COPPER_TIMEOUT_SEC  6
8568
8569 static int tg3_test_link(struct tg3 *tp)
8570 {
8571         int i, max;
8572
8573         if (!netif_running(tp->dev))
8574                 return -ENODEV;
8575
8576         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8577                 max = TG3_SERDES_TIMEOUT_SEC;
8578         else
8579                 max = TG3_COPPER_TIMEOUT_SEC;
8580
8581         for (i = 0; i < max; i++) {
8582                 if (netif_carrier_ok(tp->dev))
8583                         return 0;
8584
8585                 if (msleep_interruptible(1000))
8586                         break;
8587         }
8588
8589         return -EIO;
8590 }
8591
8592 /* Only test the commonly used registers */
8593 static int tg3_test_registers(struct tg3 *tp)
8594 {
8595         int i, is_5705, is_5750;
8596         u32 offset, read_mask, write_mask, val, save_val, read_val;
8597         static struct {
8598                 u16 offset;
8599                 u16 flags;
8600 #define TG3_FL_5705     0x1
8601 #define TG3_FL_NOT_5705 0x2
8602 #define TG3_FL_NOT_5788 0x4
8603 #define TG3_FL_NOT_5750 0x8
8604                 u32 read_mask;
8605                 u32 write_mask;
8606         } reg_tbl[] = {
8607                 /* MAC Control Registers */
8608                 { MAC_MODE, TG3_FL_NOT_5705,
8609                         0x00000000, 0x00ef6f8c },
8610                 { MAC_MODE, TG3_FL_5705,
8611                         0x00000000, 0x01ef6b8c },
8612                 { MAC_STATUS, TG3_FL_NOT_5705,
8613                         0x03800107, 0x00000000 },
8614                 { MAC_STATUS, TG3_FL_5705,
8615                         0x03800100, 0x00000000 },
8616                 { MAC_ADDR_0_HIGH, 0x0000,
8617                         0x00000000, 0x0000ffff },
8618                 { MAC_ADDR_0_LOW, 0x0000,
8619                         0x00000000, 0xffffffff },
8620                 { MAC_RX_MTU_SIZE, 0x0000,
8621                         0x00000000, 0x0000ffff },
8622                 { MAC_TX_MODE, 0x0000,
8623                         0x00000000, 0x00000070 },
8624                 { MAC_TX_LENGTHS, 0x0000,
8625                         0x00000000, 0x00003fff },
8626                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8627                         0x00000000, 0x000007fc },
8628                 { MAC_RX_MODE, TG3_FL_5705,
8629                         0x00000000, 0x000007dc },
8630                 { MAC_HASH_REG_0, 0x0000,
8631                         0x00000000, 0xffffffff },
8632                 { MAC_HASH_REG_1, 0x0000,
8633                         0x00000000, 0xffffffff },
8634                 { MAC_HASH_REG_2, 0x0000,
8635                         0x00000000, 0xffffffff },
8636                 { MAC_HASH_REG_3, 0x0000,
8637                         0x00000000, 0xffffffff },
8638
8639                 /* Receive Data and Receive BD Initiator Control Registers. */
8640                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8641                         0x00000000, 0xffffffff },
8642                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8643                         0x00000000, 0xffffffff },
8644                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8645                         0x00000000, 0x00000003 },
8646                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8647                         0x00000000, 0xffffffff },
8648                 { RCVDBDI_STD_BD+0, 0x0000,
8649                         0x00000000, 0xffffffff },
8650                 { RCVDBDI_STD_BD+4, 0x0000,
8651                         0x00000000, 0xffffffff },
8652                 { RCVDBDI_STD_BD+8, 0x0000,
8653                         0x00000000, 0xffff0002 },
8654                 { RCVDBDI_STD_BD+0xc, 0x0000,
8655                         0x00000000, 0xffffffff },
8656
8657                 /* Receive BD Initiator Control Registers. */
8658                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8659                         0x00000000, 0xffffffff },
8660                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8661                         0x00000000, 0x000003ff },
8662                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8663                         0x00000000, 0xffffffff },
8664
8665                 /* Host Coalescing Control Registers. */
8666                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8667                         0x00000000, 0x00000004 },
8668                 { HOSTCC_MODE, TG3_FL_5705,
8669                         0x00000000, 0x000000f6 },
8670                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8671                         0x00000000, 0xffffffff },
8672                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8673                         0x00000000, 0x000003ff },
8674                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8675                         0x00000000, 0xffffffff },
8676                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8677                         0x00000000, 0x000003ff },
8678                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8679                         0x00000000, 0xffffffff },
8680                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8681                         0x00000000, 0x000000ff },
8682                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8683                         0x00000000, 0xffffffff },
8684                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8685                         0x00000000, 0x000000ff },
8686                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8687                         0x00000000, 0xffffffff },
8688                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8689                         0x00000000, 0xffffffff },
8690                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8691                         0x00000000, 0xffffffff },
8692                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8693                         0x00000000, 0x000000ff },
8694                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8695                         0x00000000, 0xffffffff },
8696                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8697                         0x00000000, 0x000000ff },
8698                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8699                         0x00000000, 0xffffffff },
8700                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8701                         0x00000000, 0xffffffff },
8702                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8703                         0x00000000, 0xffffffff },
8704                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8705                         0x00000000, 0xffffffff },
8706                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8707                         0x00000000, 0xffffffff },
8708                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8709                         0xffffffff, 0x00000000 },
8710                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8711                         0xffffffff, 0x00000000 },
8712
8713                 /* Buffer Manager Control Registers. */
8714                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8715                         0x00000000, 0x007fff80 },
8716                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8717                         0x00000000, 0x007fffff },
8718                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8719                         0x00000000, 0x0000003f },
8720                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8721                         0x00000000, 0x000001ff },
8722                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8723                         0x00000000, 0x000001ff },
8724                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8725                         0xffffffff, 0x00000000 },
8726                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8727                         0xffffffff, 0x00000000 },
8728
8729                 /* Mailbox Registers */
8730                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8731                         0x00000000, 0x000001ff },
8732                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8733                         0x00000000, 0x000001ff },
8734                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8735                         0x00000000, 0x000007ff },
8736                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8737                         0x00000000, 0x000001ff },
8738
8739                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8740         };
8741
8742         is_5705 = is_5750 = 0;
8743         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8744                 is_5705 = 1;
8745                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8746                         is_5750 = 1;
8747         }
8748
8749         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8750                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8751                         continue;
8752
8753                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8754                         continue;
8755
8756                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8757                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8758                         continue;
8759
8760                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8761                         continue;
8762
8763                 offset = (u32) reg_tbl[i].offset;
8764                 read_mask = reg_tbl[i].read_mask;
8765                 write_mask = reg_tbl[i].write_mask;
8766
8767                 /* Save the original register content */
8768                 save_val = tr32(offset);
8769
8770                 /* Determine the read-only value. */
8771                 read_val = save_val & read_mask;
8772
8773                 /* Write zero to the register, then make sure the read-only bits
8774                  * are not changed and the read/write bits are all zeros.
8775                  */
8776                 tw32(offset, 0);
8777
8778                 val = tr32(offset);
8779
8780                 /* Test the read-only and read/write bits. */
8781                 if (((val & read_mask) != read_val) || (val & write_mask))
8782                         goto out;
8783
8784                 /* Write ones to all the bits defined by RdMask and WrMask, then
8785                  * make sure the read-only bits are not changed and the
8786                  * read/write bits are all ones.
8787                  */
8788                 tw32(offset, read_mask | write_mask);
8789
8790                 val = tr32(offset);
8791
8792                 /* Test the read-only bits. */
8793                 if ((val & read_mask) != read_val)
8794                         goto out;
8795
8796                 /* Test the read/write bits. */
8797                 if ((val & write_mask) != write_mask)
8798                         goto out;
8799
8800                 tw32(offset, save_val);
8801         }
8802
8803         return 0;
8804
8805 out:
8806         if (netif_msg_hw(tp))
8807                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
8808                        offset);
8809         tw32(offset, save_val);
8810         return -EIO;
8811 }
8812
8813 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8814 {
8815         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8816         int i;
8817         u32 j;
8818
8819         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8820                 for (j = 0; j < len; j += 4) {
8821                         u32 val;
8822
8823                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8824                         tg3_read_mem(tp, offset + j, &val);
8825                         if (val != test_pattern[i])
8826                                 return -EIO;
8827                 }
8828         }
8829         return 0;
8830 }
8831
8832 static int tg3_test_memory(struct tg3 *tp)
8833 {
8834         static struct mem_entry {
8835                 u32 offset;
8836                 u32 len;
8837         } mem_tbl_570x[] = {
8838                 { 0x00000000, 0x00b50},
8839                 { 0x00002000, 0x1c000},
8840                 { 0xffffffff, 0x00000}
8841         }, mem_tbl_5705[] = {
8842                 { 0x00000100, 0x0000c},
8843                 { 0x00000200, 0x00008},
8844                 { 0x00004000, 0x00800},
8845                 { 0x00006000, 0x01000},
8846                 { 0x00008000, 0x02000},
8847                 { 0x00010000, 0x0e000},
8848                 { 0xffffffff, 0x00000}
8849         }, mem_tbl_5755[] = {
8850                 { 0x00000200, 0x00008},
8851                 { 0x00004000, 0x00800},
8852                 { 0x00006000, 0x00800},
8853                 { 0x00008000, 0x02000},
8854                 { 0x00010000, 0x0c000},
8855                 { 0xffffffff, 0x00000}
8856         }, mem_tbl_5906[] = {
8857                 { 0x00000200, 0x00008},
8858                 { 0x00004000, 0x00400},
8859                 { 0x00006000, 0x00400},
8860                 { 0x00008000, 0x01000},
8861                 { 0x00010000, 0x01000},
8862                 { 0xffffffff, 0x00000}
8863         };
8864         struct mem_entry *mem_tbl;
8865         int err = 0;
8866         int i;
8867
8868         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8869                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8870                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8871                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
8872                         mem_tbl = mem_tbl_5755;
8873                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8874                         mem_tbl = mem_tbl_5906;
8875                 else
8876                         mem_tbl = mem_tbl_5705;
8877         } else
8878                 mem_tbl = mem_tbl_570x;
8879
8880         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8881                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8882                     mem_tbl[i].len)) != 0)
8883                         break;
8884         }
8885
8886         return err;
8887 }
8888
8889 #define TG3_MAC_LOOPBACK        0
8890 #define TG3_PHY_LOOPBACK        1
8891
8892 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8893 {
8894         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8895         u32 desc_idx;
8896         struct sk_buff *skb, *rx_skb;
8897         u8 *tx_data;
8898         dma_addr_t map;
8899         int num_pkts, tx_len, rx_len, i, err;
8900         struct tg3_rx_buffer_desc *desc;
8901
8902         if (loopback_mode == TG3_MAC_LOOPBACK) {
8903                 /* HW errata - mac loopback fails in some cases on 5780.
8904                  * Normal traffic and PHY loopback are not affected by
8905                  * errata.
8906                  */
8907                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8908                         return 0;
8909
8910                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8911                            MAC_MODE_PORT_INT_LPBACK;
8912                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8913                         mac_mode |= MAC_MODE_LINK_POLARITY;
8914                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8915                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8916                 else
8917                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8918                 tw32(MAC_MODE, mac_mode);
8919         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8920                 u32 val;
8921
8922                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8923                         u32 phytest;
8924
8925                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
8926                                 u32 phy;
8927
8928                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
8929                                              phytest | MII_TG3_EPHY_SHADOW_EN);
8930                                 if (!tg3_readphy(tp, 0x1b, &phy))
8931                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
8932                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
8933                         }
8934                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
8935                 } else
8936                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
8937
8938                 tg3_phy_toggle_automdix(tp, 0);
8939
8940                 tg3_writephy(tp, MII_BMCR, val);
8941                 udelay(40);
8942
8943                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
8944                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8945                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
8946                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8947                 } else
8948                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8949
8950                 /* reset to prevent losing 1st rx packet intermittently */
8951                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8952                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8953                         udelay(10);
8954                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8955                 }
8956                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
8957                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8958                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8959                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
8960                                 mac_mode |= MAC_MODE_LINK_POLARITY;
8961                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8962                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8963                 }
8964                 tw32(MAC_MODE, mac_mode);
8965         }
8966         else
8967                 return -EINVAL;
8968
8969         err = -EIO;
8970
8971         tx_len = 1514;
8972         skb = netdev_alloc_skb(tp->dev, tx_len);
8973         if (!skb)
8974                 return -ENOMEM;
8975
8976         tx_data = skb_put(skb, tx_len);
8977         memcpy(tx_data, tp->dev->dev_addr, 6);
8978         memset(tx_data + 6, 0x0, 8);
8979
8980         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8981
8982         for (i = 14; i < tx_len; i++)
8983                 tx_data[i] = (u8) (i & 0xff);
8984
8985         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8986
8987         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8988              HOSTCC_MODE_NOW);
8989
8990         udelay(10);
8991
8992         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8993
8994         num_pkts = 0;
8995
8996         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8997
8998         tp->tx_prod++;
8999         num_pkts++;
9000
9001         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9002                      tp->tx_prod);
9003         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9004
9005         udelay(10);
9006
9007         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9008         for (i = 0; i < 25; i++) {
9009                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9010                        HOSTCC_MODE_NOW);
9011
9012                 udelay(10);
9013
9014                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9015                 rx_idx = tp->hw_status->idx[0].rx_producer;
9016                 if ((tx_idx == tp->tx_prod) &&
9017                     (rx_idx == (rx_start_idx + num_pkts)))
9018                         break;
9019         }
9020
9021         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9022         dev_kfree_skb(skb);
9023
9024         if (tx_idx != tp->tx_prod)
9025                 goto out;
9026
9027         if (rx_idx != rx_start_idx + num_pkts)
9028                 goto out;
9029
9030         desc = &tp->rx_rcb[rx_start_idx];
9031         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9032         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9033         if (opaque_key != RXD_OPAQUE_RING_STD)
9034                 goto out;
9035
9036         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9037             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9038                 goto out;
9039
9040         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9041         if (rx_len != tx_len)
9042                 goto out;
9043
9044         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9045
9046         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9047         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9048
9049         for (i = 14; i < tx_len; i++) {
9050                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9051                         goto out;
9052         }
9053         err = 0;
9054
9055         /* tg3_free_rings will unmap and free the rx_skb */
9056 out:
9057         return err;
9058 }
9059
9060 #define TG3_MAC_LOOPBACK_FAILED         1
9061 #define TG3_PHY_LOOPBACK_FAILED         2
9062 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9063                                          TG3_PHY_LOOPBACK_FAILED)
9064
9065 static int tg3_test_loopback(struct tg3 *tp)
9066 {
9067         int err = 0;
9068
9069         if (!netif_running(tp->dev))
9070                 return TG3_LOOPBACK_FAILED;
9071
9072         err = tg3_reset_hw(tp, 1);
9073         if (err)
9074                 return TG3_LOOPBACK_FAILED;
9075
9076         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9077                 err |= TG3_MAC_LOOPBACK_FAILED;
9078         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9079                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9080                         err |= TG3_PHY_LOOPBACK_FAILED;
9081         }
9082
9083         return err;
9084 }
9085
9086 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9087                           u64 *data)
9088 {
9089         struct tg3 *tp = netdev_priv(dev);
9090
9091         if (tp->link_config.phy_is_low_power)
9092                 tg3_set_power_state(tp, PCI_D0);
9093
9094         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9095
9096         if (tg3_test_nvram(tp) != 0) {
9097                 etest->flags |= ETH_TEST_FL_FAILED;
9098                 data[0] = 1;
9099         }
9100         if (tg3_test_link(tp) != 0) {
9101                 etest->flags |= ETH_TEST_FL_FAILED;
9102                 data[1] = 1;
9103         }
9104         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9105                 int err, irq_sync = 0;
9106
9107                 if (netif_running(dev)) {
9108                         tg3_netif_stop(tp);
9109                         irq_sync = 1;
9110                 }
9111
9112                 tg3_full_lock(tp, irq_sync);
9113
9114                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9115                 err = tg3_nvram_lock(tp);
9116                 tg3_halt_cpu(tp, RX_CPU_BASE);
9117                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9118                         tg3_halt_cpu(tp, TX_CPU_BASE);
9119                 if (!err)
9120                         tg3_nvram_unlock(tp);
9121
9122                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9123                         tg3_phy_reset(tp);
9124
9125                 if (tg3_test_registers(tp) != 0) {
9126                         etest->flags |= ETH_TEST_FL_FAILED;
9127                         data[2] = 1;
9128                 }
9129                 if (tg3_test_memory(tp) != 0) {
9130                         etest->flags |= ETH_TEST_FL_FAILED;
9131                         data[3] = 1;
9132                 }
9133                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9134                         etest->flags |= ETH_TEST_FL_FAILED;
9135
9136                 tg3_full_unlock(tp);
9137
9138                 if (tg3_test_interrupt(tp) != 0) {
9139                         etest->flags |= ETH_TEST_FL_FAILED;
9140                         data[5] = 1;
9141                 }
9142
9143                 tg3_full_lock(tp, 0);
9144
9145                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9146                 if (netif_running(dev)) {
9147                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9148                         if (!tg3_restart_hw(tp, 1))
9149                                 tg3_netif_start(tp);
9150                 }
9151
9152                 tg3_full_unlock(tp);
9153         }
9154         if (tp->link_config.phy_is_low_power)
9155                 tg3_set_power_state(tp, PCI_D3hot);
9156
9157 }
9158
9159 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9160 {
9161         struct mii_ioctl_data *data = if_mii(ifr);
9162         struct tg3 *tp = netdev_priv(dev);
9163         int err;
9164
9165         switch(cmd) {
9166         case SIOCGMIIPHY:
9167                 data->phy_id = PHY_ADDR;
9168
9169                 /* fallthru */
9170         case SIOCGMIIREG: {
9171                 u32 mii_regval;
9172
9173                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9174                         break;                  /* We have no PHY */
9175
9176                 if (tp->link_config.phy_is_low_power)
9177                         return -EAGAIN;
9178
9179                 spin_lock_bh(&tp->lock);
9180                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9181                 spin_unlock_bh(&tp->lock);
9182
9183                 data->val_out = mii_regval;
9184
9185                 return err;
9186         }
9187
9188         case SIOCSMIIREG:
9189                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9190                         break;                  /* We have no PHY */
9191
9192                 if (!capable(CAP_NET_ADMIN))
9193                         return -EPERM;
9194
9195                 if (tp->link_config.phy_is_low_power)
9196                         return -EAGAIN;
9197
9198                 spin_lock_bh(&tp->lock);
9199                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9200                 spin_unlock_bh(&tp->lock);
9201
9202                 return err;
9203
9204         default:
9205                 /* do nothing */
9206                 break;
9207         }
9208         return -EOPNOTSUPP;
9209 }
9210
9211 #if TG3_VLAN_TAG_USED
9212 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9213 {
9214         struct tg3 *tp = netdev_priv(dev);
9215
9216         if (netif_running(dev))
9217                 tg3_netif_stop(tp);
9218
9219         tg3_full_lock(tp, 0);
9220
9221         tp->vlgrp = grp;
9222
9223         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9224         __tg3_set_rx_mode(dev);
9225
9226         if (netif_running(dev))
9227                 tg3_netif_start(tp);
9228
9229         tg3_full_unlock(tp);
9230 }
9231 #endif
9232
9233 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9234 {
9235         struct tg3 *tp = netdev_priv(dev);
9236
9237         memcpy(ec, &tp->coal, sizeof(*ec));
9238         return 0;
9239 }
9240
9241 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9242 {
9243         struct tg3 *tp = netdev_priv(dev);
9244         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9245         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9246
9247         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9248                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9249                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9250                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9251                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9252         }
9253
9254         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9255             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9256             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9257             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9258             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9259             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9260             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9261             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9262             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9263             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9264                 return -EINVAL;
9265
9266         /* No rx interrupts will be generated if both are zero */
9267         if ((ec->rx_coalesce_usecs == 0) &&
9268             (ec->rx_max_coalesced_frames == 0))
9269                 return -EINVAL;
9270
9271         /* No tx interrupts will be generated if both are zero */
9272         if ((ec->tx_coalesce_usecs == 0) &&
9273             (ec->tx_max_coalesced_frames == 0))
9274                 return -EINVAL;
9275
9276         /* Only copy relevant parameters, ignore all others. */
9277         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9278         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9279         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9280         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9281         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9282         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9283         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9284         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9285         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9286
9287         if (netif_running(dev)) {
9288                 tg3_full_lock(tp, 0);
9289                 __tg3_set_coalesce(tp, &tp->coal);
9290                 tg3_full_unlock(tp);
9291         }
9292         return 0;
9293 }
9294
9295 static const struct ethtool_ops tg3_ethtool_ops = {
9296         .get_settings           = tg3_get_settings,
9297         .set_settings           = tg3_set_settings,
9298         .get_drvinfo            = tg3_get_drvinfo,
9299         .get_regs_len           = tg3_get_regs_len,
9300         .get_regs               = tg3_get_regs,
9301         .get_wol                = tg3_get_wol,
9302         .set_wol                = tg3_set_wol,
9303         .get_msglevel           = tg3_get_msglevel,
9304         .set_msglevel           = tg3_set_msglevel,
9305         .nway_reset             = tg3_nway_reset,
9306         .get_link               = ethtool_op_get_link,
9307         .get_eeprom_len         = tg3_get_eeprom_len,
9308         .get_eeprom             = tg3_get_eeprom,
9309         .set_eeprom             = tg3_set_eeprom,
9310         .get_ringparam          = tg3_get_ringparam,
9311         .set_ringparam          = tg3_set_ringparam,
9312         .get_pauseparam         = tg3_get_pauseparam,
9313         .set_pauseparam         = tg3_set_pauseparam,
9314         .get_rx_csum            = tg3_get_rx_csum,
9315         .set_rx_csum            = tg3_set_rx_csum,
9316         .set_tx_csum            = tg3_set_tx_csum,
9317         .set_sg                 = ethtool_op_set_sg,
9318         .set_tso                = tg3_set_tso,
9319         .self_test              = tg3_self_test,
9320         .get_strings            = tg3_get_strings,
9321         .phys_id                = tg3_phys_id,
9322         .get_ethtool_stats      = tg3_get_ethtool_stats,
9323         .get_coalesce           = tg3_get_coalesce,
9324         .set_coalesce           = tg3_set_coalesce,
9325         .get_sset_count         = tg3_get_sset_count,
9326 };
9327
9328 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9329 {
9330         u32 cursize, val, magic;
9331
9332         tp->nvram_size = EEPROM_CHIP_SIZE;
9333
9334         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9335                 return;
9336
9337         if ((magic != TG3_EEPROM_MAGIC) &&
9338             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9339             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9340                 return;
9341
9342         /*
9343          * Size the chip by reading offsets at increasing powers of two.
9344          * When we encounter our validation signature, we know the addressing
9345          * has wrapped around, and thus have our chip size.
9346          */
9347         cursize = 0x10;
9348
9349         while (cursize < tp->nvram_size) {
9350                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9351                         return;
9352
9353                 if (val == magic)
9354                         break;
9355
9356                 cursize <<= 1;
9357         }
9358
9359         tp->nvram_size = cursize;
9360 }
9361
9362 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9363 {
9364         u32 val;
9365
9366         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9367                 return;
9368
9369         /* Selfboot format */
9370         if (val != TG3_EEPROM_MAGIC) {
9371                 tg3_get_eeprom_size(tp);
9372                 return;
9373         }
9374
9375         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9376                 if (val != 0) {
9377                         tp->nvram_size = (val >> 16) * 1024;
9378                         return;
9379                 }
9380         }
9381         tp->nvram_size = 0x80000;
9382 }
9383
9384 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9385 {
9386         u32 nvcfg1;
9387
9388         nvcfg1 = tr32(NVRAM_CFG1);
9389         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9390                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9391         }
9392         else {
9393                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9394                 tw32(NVRAM_CFG1, nvcfg1);
9395         }
9396
9397         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9398             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9399                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9400                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9401                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9402                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9403                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9404                                 break;
9405                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9406                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9407                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9408                                 break;
9409                         case FLASH_VENDOR_ATMEL_EEPROM:
9410                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9411                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9412                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9413                                 break;
9414                         case FLASH_VENDOR_ST:
9415                                 tp->nvram_jedecnum = JEDEC_ST;
9416                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9417                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9418                                 break;
9419                         case FLASH_VENDOR_SAIFUN:
9420                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9421                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9422                                 break;
9423                         case FLASH_VENDOR_SST_SMALL:
9424                         case FLASH_VENDOR_SST_LARGE:
9425                                 tp->nvram_jedecnum = JEDEC_SST;
9426                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9427                                 break;
9428                 }
9429         }
9430         else {
9431                 tp->nvram_jedecnum = JEDEC_ATMEL;
9432                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9433                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9434         }
9435 }
9436
9437 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9438 {
9439         u32 nvcfg1;
9440
9441         nvcfg1 = tr32(NVRAM_CFG1);
9442
9443         /* NVRAM protection for TPM */
9444         if (nvcfg1 & (1 << 27))
9445                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9446
9447         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9448                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9449                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9450                         tp->nvram_jedecnum = JEDEC_ATMEL;
9451                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9452                         break;
9453                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9454                         tp->nvram_jedecnum = JEDEC_ATMEL;
9455                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9456                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9457                         break;
9458                 case FLASH_5752VENDOR_ST_M45PE10:
9459                 case FLASH_5752VENDOR_ST_M45PE20:
9460                 case FLASH_5752VENDOR_ST_M45PE40:
9461                         tp->nvram_jedecnum = JEDEC_ST;
9462                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9463                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9464                         break;
9465         }
9466
9467         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9468                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9469                         case FLASH_5752PAGE_SIZE_256:
9470                                 tp->nvram_pagesize = 256;
9471                                 break;
9472                         case FLASH_5752PAGE_SIZE_512:
9473                                 tp->nvram_pagesize = 512;
9474                                 break;
9475                         case FLASH_5752PAGE_SIZE_1K:
9476                                 tp->nvram_pagesize = 1024;
9477                                 break;
9478                         case FLASH_5752PAGE_SIZE_2K:
9479                                 tp->nvram_pagesize = 2048;
9480                                 break;
9481                         case FLASH_5752PAGE_SIZE_4K:
9482                                 tp->nvram_pagesize = 4096;
9483                                 break;
9484                         case FLASH_5752PAGE_SIZE_264:
9485                                 tp->nvram_pagesize = 264;
9486                                 break;
9487                 }
9488         }
9489         else {
9490                 /* For eeprom, set pagesize to maximum eeprom size */
9491                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9492
9493                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9494                 tw32(NVRAM_CFG1, nvcfg1);
9495         }
9496 }
9497
9498 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9499 {
9500         u32 nvcfg1, protect = 0;
9501
9502         nvcfg1 = tr32(NVRAM_CFG1);
9503
9504         /* NVRAM protection for TPM */
9505         if (nvcfg1 & (1 << 27)) {
9506                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9507                 protect = 1;
9508         }
9509
9510         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9511         switch (nvcfg1) {
9512                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9513                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9514                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9515                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9516                         tp->nvram_jedecnum = JEDEC_ATMEL;
9517                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9518                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9519                         tp->nvram_pagesize = 264;
9520                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9521                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9522                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9523                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9524                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9525                         else
9526                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9527                         break;
9528                 case FLASH_5752VENDOR_ST_M45PE10:
9529                 case FLASH_5752VENDOR_ST_M45PE20:
9530                 case FLASH_5752VENDOR_ST_M45PE40:
9531                         tp->nvram_jedecnum = JEDEC_ST;
9532                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9533                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9534                         tp->nvram_pagesize = 256;
9535                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9536                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9537                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9538                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9539                         else
9540                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9541                         break;
9542         }
9543 }
9544
9545 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9546 {
9547         u32 nvcfg1;
9548
9549         nvcfg1 = tr32(NVRAM_CFG1);
9550
9551         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9552                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9553                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9554                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9555                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9556                         tp->nvram_jedecnum = JEDEC_ATMEL;
9557                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9558                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9559
9560                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9561                         tw32(NVRAM_CFG1, nvcfg1);
9562                         break;
9563                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9564                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9565                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9566                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9567                         tp->nvram_jedecnum = JEDEC_ATMEL;
9568                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9569                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9570                         tp->nvram_pagesize = 264;
9571                         break;
9572                 case FLASH_5752VENDOR_ST_M45PE10:
9573                 case FLASH_5752VENDOR_ST_M45PE20:
9574                 case FLASH_5752VENDOR_ST_M45PE40:
9575                         tp->nvram_jedecnum = JEDEC_ST;
9576                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9577                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9578                         tp->nvram_pagesize = 256;
9579                         break;
9580         }
9581 }
9582
9583 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9584 {
9585         tp->nvram_jedecnum = JEDEC_ATMEL;
9586         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9587         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9588 }
9589
9590 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9591 static void __devinit tg3_nvram_init(struct tg3 *tp)
9592 {
9593         tw32_f(GRC_EEPROM_ADDR,
9594              (EEPROM_ADDR_FSM_RESET |
9595               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9596                EEPROM_ADDR_CLKPERD_SHIFT)));
9597
9598         msleep(1);
9599
9600         /* Enable seeprom accesses. */
9601         tw32_f(GRC_LOCAL_CTRL,
9602              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9603         udelay(100);
9604
9605         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9606             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9607                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9608
9609                 if (tg3_nvram_lock(tp)) {
9610                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9611                                "tg3_nvram_init failed.\n", tp->dev->name);
9612                         return;
9613                 }
9614                 tg3_enable_nvram_access(tp);
9615
9616                 tp->nvram_size = 0;
9617
9618                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9619                         tg3_get_5752_nvram_info(tp);
9620                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9621                         tg3_get_5755_nvram_info(tp);
9622                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9623                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
9624                         tg3_get_5787_nvram_info(tp);
9625                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9626                         tg3_get_5906_nvram_info(tp);
9627                 else
9628                         tg3_get_nvram_info(tp);
9629
9630                 if (tp->nvram_size == 0)
9631                         tg3_get_nvram_size(tp);
9632
9633                 tg3_disable_nvram_access(tp);
9634                 tg3_nvram_unlock(tp);
9635
9636         } else {
9637                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9638
9639                 tg3_get_eeprom_size(tp);
9640         }
9641 }
9642
9643 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9644                                         u32 offset, u32 *val)
9645 {
9646         u32 tmp;
9647         int i;
9648
9649         if (offset > EEPROM_ADDR_ADDR_MASK ||
9650             (offset % 4) != 0)
9651                 return -EINVAL;
9652
9653         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9654                                         EEPROM_ADDR_DEVID_MASK |
9655                                         EEPROM_ADDR_READ);
9656         tw32(GRC_EEPROM_ADDR,
9657              tmp |
9658              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9659              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9660               EEPROM_ADDR_ADDR_MASK) |
9661              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9662
9663         for (i = 0; i < 1000; i++) {
9664                 tmp = tr32(GRC_EEPROM_ADDR);
9665
9666                 if (tmp & EEPROM_ADDR_COMPLETE)
9667                         break;
9668                 msleep(1);
9669         }
9670         if (!(tmp & EEPROM_ADDR_COMPLETE))
9671                 return -EBUSY;
9672
9673         *val = tr32(GRC_EEPROM_DATA);
9674         return 0;
9675 }
9676
9677 #define NVRAM_CMD_TIMEOUT 10000
9678
9679 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9680 {
9681         int i;
9682
9683         tw32(NVRAM_CMD, nvram_cmd);
9684         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9685                 udelay(10);
9686                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9687                         udelay(10);
9688                         break;
9689                 }
9690         }
9691         if (i == NVRAM_CMD_TIMEOUT) {
9692                 return -EBUSY;
9693         }
9694         return 0;
9695 }
9696
9697 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9698 {
9699         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9700             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9701             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9702             (tp->nvram_jedecnum == JEDEC_ATMEL))
9703
9704                 addr = ((addr / tp->nvram_pagesize) <<
9705                         ATMEL_AT45DB0X1B_PAGE_POS) +
9706                        (addr % tp->nvram_pagesize);
9707
9708         return addr;
9709 }
9710
9711 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9712 {
9713         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9714             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9715             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9716             (tp->nvram_jedecnum == JEDEC_ATMEL))
9717
9718                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9719                         tp->nvram_pagesize) +
9720                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9721
9722         return addr;
9723 }
9724
9725 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9726 {
9727         int ret;
9728
9729         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9730                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9731
9732         offset = tg3_nvram_phys_addr(tp, offset);
9733
9734         if (offset > NVRAM_ADDR_MSK)
9735                 return -EINVAL;
9736
9737         ret = tg3_nvram_lock(tp);
9738         if (ret)
9739                 return ret;
9740
9741         tg3_enable_nvram_access(tp);
9742
9743         tw32(NVRAM_ADDR, offset);
9744         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9745                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9746
9747         if (ret == 0)
9748                 *val = swab32(tr32(NVRAM_RDDATA));
9749
9750         tg3_disable_nvram_access(tp);
9751
9752         tg3_nvram_unlock(tp);
9753
9754         return ret;
9755 }
9756
9757 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9758 {
9759         int err;
9760         u32 tmp;
9761
9762         err = tg3_nvram_read(tp, offset, &tmp);
9763         *val = swab32(tmp);
9764         return err;
9765 }
9766
9767 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9768                                     u32 offset, u32 len, u8 *buf)
9769 {
9770         int i, j, rc = 0;
9771         u32 val;
9772
9773         for (i = 0; i < len; i += 4) {
9774                 u32 addr, data;
9775
9776                 addr = offset + i;
9777
9778                 memcpy(&data, buf + i, 4);
9779
9780                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9781
9782                 val = tr32(GRC_EEPROM_ADDR);
9783                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9784
9785                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9786                         EEPROM_ADDR_READ);
9787                 tw32(GRC_EEPROM_ADDR, val |
9788                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9789                         (addr & EEPROM_ADDR_ADDR_MASK) |
9790                         EEPROM_ADDR_START |
9791                         EEPROM_ADDR_WRITE);
9792
9793                 for (j = 0; j < 1000; j++) {
9794                         val = tr32(GRC_EEPROM_ADDR);
9795
9796                         if (val & EEPROM_ADDR_COMPLETE)
9797                                 break;
9798                         msleep(1);
9799                 }
9800                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9801                         rc = -EBUSY;
9802                         break;
9803                 }
9804         }
9805
9806         return rc;
9807 }
9808
9809 /* offset and length are dword aligned */
9810 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9811                 u8 *buf)
9812 {
9813         int ret = 0;
9814         u32 pagesize = tp->nvram_pagesize;
9815         u32 pagemask = pagesize - 1;
9816         u32 nvram_cmd;
9817         u8 *tmp;
9818
9819         tmp = kmalloc(pagesize, GFP_KERNEL);
9820         if (tmp == NULL)
9821                 return -ENOMEM;
9822
9823         while (len) {
9824                 int j;
9825                 u32 phy_addr, page_off, size;
9826
9827                 phy_addr = offset & ~pagemask;
9828
9829                 for (j = 0; j < pagesize; j += 4) {
9830                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9831                                                 (u32 *) (tmp + j))))
9832                                 break;
9833                 }
9834                 if (ret)
9835                         break;
9836
9837                 page_off = offset & pagemask;
9838                 size = pagesize;
9839                 if (len < size)
9840                         size = len;
9841
9842                 len -= size;
9843
9844                 memcpy(tmp + page_off, buf, size);
9845
9846                 offset = offset + (pagesize - page_off);
9847
9848                 tg3_enable_nvram_access(tp);
9849
9850                 /*
9851                  * Before we can erase the flash page, we need
9852                  * to issue a special "write enable" command.
9853                  */
9854                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9855
9856                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9857                         break;
9858
9859                 /* Erase the target page */
9860                 tw32(NVRAM_ADDR, phy_addr);
9861
9862                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9863                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9864
9865                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9866                         break;
9867
9868                 /* Issue another write enable to start the write. */
9869                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9870
9871                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9872                         break;
9873
9874                 for (j = 0; j < pagesize; j += 4) {
9875                         u32 data;
9876
9877                         data = *((u32 *) (tmp + j));
9878                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9879
9880                         tw32(NVRAM_ADDR, phy_addr + j);
9881
9882                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9883                                 NVRAM_CMD_WR;
9884
9885                         if (j == 0)
9886                                 nvram_cmd |= NVRAM_CMD_FIRST;
9887                         else if (j == (pagesize - 4))
9888                                 nvram_cmd |= NVRAM_CMD_LAST;
9889
9890                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9891                                 break;
9892                 }
9893                 if (ret)
9894                         break;
9895         }
9896
9897         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9898         tg3_nvram_exec_cmd(tp, nvram_cmd);
9899
9900         kfree(tmp);
9901
9902         return ret;
9903 }
9904
9905 /* offset and length are dword aligned */
9906 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9907                 u8 *buf)
9908 {
9909         int i, ret = 0;
9910
9911         for (i = 0; i < len; i += 4, offset += 4) {
9912                 u32 data, page_off, phy_addr, nvram_cmd;
9913
9914                 memcpy(&data, buf + i, 4);
9915                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9916
9917                 page_off = offset % tp->nvram_pagesize;
9918
9919                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9920
9921                 tw32(NVRAM_ADDR, phy_addr);
9922
9923                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9924
9925                 if ((page_off == 0) || (i == 0))
9926                         nvram_cmd |= NVRAM_CMD_FIRST;
9927                 if (page_off == (tp->nvram_pagesize - 4))
9928                         nvram_cmd |= NVRAM_CMD_LAST;
9929
9930                 if (i == (len - 4))
9931                         nvram_cmd |= NVRAM_CMD_LAST;
9932
9933                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9934                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9935                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9936                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
9937                     (tp->nvram_jedecnum == JEDEC_ST) &&
9938                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9939
9940                         if ((ret = tg3_nvram_exec_cmd(tp,
9941                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9942                                 NVRAM_CMD_DONE)))
9943
9944                                 break;
9945                 }
9946                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9947                         /* We always do complete word writes to eeprom. */
9948                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9949                 }
9950
9951                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9952                         break;
9953         }
9954         return ret;
9955 }
9956
9957 /* offset and length are dword aligned */
9958 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9959 {
9960         int ret;
9961
9962         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9963                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9964                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9965                 udelay(40);
9966         }
9967
9968         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9969                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9970         }
9971         else {
9972                 u32 grc_mode;
9973
9974                 ret = tg3_nvram_lock(tp);
9975                 if (ret)
9976                         return ret;
9977
9978                 tg3_enable_nvram_access(tp);
9979                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9980                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9981                         tw32(NVRAM_WRITE1, 0x406);
9982
9983                 grc_mode = tr32(GRC_MODE);
9984                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9985
9986                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9987                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9988
9989                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9990                                 buf);
9991                 }
9992                 else {
9993                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9994                                 buf);
9995                 }
9996
9997                 grc_mode = tr32(GRC_MODE);
9998                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9999
10000                 tg3_disable_nvram_access(tp);
10001                 tg3_nvram_unlock(tp);
10002         }
10003
10004         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10005                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10006                 udelay(40);
10007         }
10008
10009         return ret;
10010 }
10011
10012 struct subsys_tbl_ent {
10013         u16 subsys_vendor, subsys_devid;
10014         u32 phy_id;
10015 };
10016
10017 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10018         /* Broadcom boards. */
10019         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10020         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10021         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10022         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10023         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10024         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10025         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10026         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10027         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10028         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10029         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10030
10031         /* 3com boards. */
10032         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10033         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10034         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10035         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10036         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10037
10038         /* DELL boards. */
10039         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10040         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10041         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10042         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10043
10044         /* Compaq boards. */
10045         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10046         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10047         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10048         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10049         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10050
10051         /* IBM boards. */
10052         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10053 };
10054
10055 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10056 {
10057         int i;
10058
10059         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10060                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10061                      tp->pdev->subsystem_vendor) &&
10062                     (subsys_id_to_phy_id[i].subsys_devid ==
10063                      tp->pdev->subsystem_device))
10064                         return &subsys_id_to_phy_id[i];
10065         }
10066         return NULL;
10067 }
10068
10069 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10070 {
10071         u32 val;
10072         u16 pmcsr;
10073
10074         /* On some early chips the SRAM cannot be accessed in D3hot state,
10075          * so need make sure we're in D0.
10076          */
10077         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10078         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10079         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10080         msleep(1);
10081
10082         /* Make sure register accesses (indirect or otherwise)
10083          * will function correctly.
10084          */
10085         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10086                                tp->misc_host_ctrl);
10087
10088         /* The memory arbiter has to be enabled in order for SRAM accesses
10089          * to succeed.  Normally on powerup the tg3 chip firmware will make
10090          * sure it is enabled, but other entities such as system netboot
10091          * code might disable it.
10092          */
10093         val = tr32(MEMARB_MODE);
10094         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10095
10096         tp->phy_id = PHY_ID_INVALID;
10097         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10098
10099         /* Assume an onboard device and WOL capable by default.  */
10100         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10101
10102         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10103                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10104                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10105                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10106                 }
10107                 if (tr32(VCPU_CFGSHDW) & VCPU_CFGSHDW_ASPM_DBNC)
10108                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10109                 return;
10110         }
10111
10112         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10113         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10114                 u32 nic_cfg, led_cfg;
10115                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10116                 int eeprom_phy_serdes = 0;
10117
10118                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10119                 tp->nic_sram_data_cfg = nic_cfg;
10120
10121                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10122                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10123                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10124                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10125                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10126                     (ver > 0) && (ver < 0x100))
10127                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10128
10129                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10130                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10131                         eeprom_phy_serdes = 1;
10132
10133                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10134                 if (nic_phy_id != 0) {
10135                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10136                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10137
10138                         eeprom_phy_id  = (id1 >> 16) << 10;
10139                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10140                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10141                 } else
10142                         eeprom_phy_id = 0;
10143
10144                 tp->phy_id = eeprom_phy_id;
10145                 if (eeprom_phy_serdes) {
10146                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10147                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10148                         else
10149                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10150                 }
10151
10152                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10153                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10154                                     SHASTA_EXT_LED_MODE_MASK);
10155                 else
10156                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10157
10158                 switch (led_cfg) {
10159                 default:
10160                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10161                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10162                         break;
10163
10164                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10165                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10166                         break;
10167
10168                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10169                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10170
10171                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10172                          * read on some older 5700/5701 bootcode.
10173                          */
10174                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10175                             ASIC_REV_5700 ||
10176                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10177                             ASIC_REV_5701)
10178                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10179
10180                         break;
10181
10182                 case SHASTA_EXT_LED_SHARED:
10183                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10184                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10185                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10186                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10187                                                  LED_CTRL_MODE_PHY_2);
10188                         break;
10189
10190                 case SHASTA_EXT_LED_MAC:
10191                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10192                         break;
10193
10194                 case SHASTA_EXT_LED_COMBO:
10195                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10196                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10197                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10198                                                  LED_CTRL_MODE_PHY_2);
10199                         break;
10200
10201                 };
10202
10203                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10204                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10205                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10206                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10207
10208                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10209                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10210                         if ((tp->pdev->subsystem_vendor ==
10211                              PCI_VENDOR_ID_ARIMA) &&
10212                             (tp->pdev->subsystem_device == 0x205a ||
10213                              tp->pdev->subsystem_device == 0x2063))
10214                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10215                 } else {
10216                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10217                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10218                 }
10219
10220                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10221                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10222                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10223                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10224                 }
10225                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10226                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10227                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10228
10229                 if (cfg2 & (1 << 17))
10230                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10231
10232                 /* serdes signal pre-emphasis in register 0x590 set by */
10233                 /* bootcode if bit 18 is set */
10234                 if (cfg2 & (1 << 18))
10235                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10236
10237                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10238                         u32 cfg3;
10239
10240                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10241                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10242                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10243                 }
10244         }
10245 }
10246
10247 static int __devinit tg3_phy_probe(struct tg3 *tp)
10248 {
10249         u32 hw_phy_id_1, hw_phy_id_2;
10250         u32 hw_phy_id, hw_phy_id_masked;
10251         int err;
10252
10253         /* Reading the PHY ID register can conflict with ASF
10254          * firwmare access to the PHY hardware.
10255          */
10256         err = 0;
10257         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
10258                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10259         } else {
10260                 /* Now read the physical PHY_ID from the chip and verify
10261                  * that it is sane.  If it doesn't look good, we fall back
10262                  * to either the hard-coded table based PHY_ID and failing
10263                  * that the value found in the eeprom area.
10264                  */
10265                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10266                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10267
10268                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10269                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10270                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10271
10272                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10273         }
10274
10275         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10276                 tp->phy_id = hw_phy_id;
10277                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10278                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10279                 else
10280                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10281         } else {
10282                 if (tp->phy_id != PHY_ID_INVALID) {
10283                         /* Do nothing, phy ID already set up in
10284                          * tg3_get_eeprom_hw_cfg().
10285                          */
10286                 } else {
10287                         struct subsys_tbl_ent *p;
10288
10289                         /* No eeprom signature?  Try the hardcoded
10290                          * subsys device table.
10291                          */
10292                         p = lookup_by_subsys(tp);
10293                         if (!p)
10294                                 return -ENODEV;
10295
10296                         tp->phy_id = p->phy_id;
10297                         if (!tp->phy_id ||
10298                             tp->phy_id == PHY_ID_BCM8002)
10299                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10300                 }
10301         }
10302
10303         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10304             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10305                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10306
10307                 tg3_readphy(tp, MII_BMSR, &bmsr);
10308                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10309                     (bmsr & BMSR_LSTATUS))
10310                         goto skip_phy_reset;
10311
10312                 err = tg3_phy_reset(tp);
10313                 if (err)
10314                         return err;
10315
10316                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10317                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10318                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10319                 tg3_ctrl = 0;
10320                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10321                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10322                                     MII_TG3_CTRL_ADV_1000_FULL);
10323                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10324                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10325                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10326                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10327                 }
10328
10329                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10330                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10331                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10332                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10333                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10334
10335                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10336                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10337
10338                         tg3_writephy(tp, MII_BMCR,
10339                                      BMCR_ANENABLE | BMCR_ANRESTART);
10340                 }
10341                 tg3_phy_set_wirespeed(tp);
10342
10343                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10344                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10345                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10346         }
10347
10348 skip_phy_reset:
10349         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10350                 err = tg3_init_5401phy_dsp(tp);
10351                 if (err)
10352                         return err;
10353         }
10354
10355         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10356                 err = tg3_init_5401phy_dsp(tp);
10357         }
10358
10359         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10360                 tp->link_config.advertising =
10361                         (ADVERTISED_1000baseT_Half |
10362                          ADVERTISED_1000baseT_Full |
10363                          ADVERTISED_Autoneg |
10364                          ADVERTISED_FIBRE);
10365         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10366                 tp->link_config.advertising &=
10367                         ~(ADVERTISED_1000baseT_Half |
10368                           ADVERTISED_1000baseT_Full);
10369
10370         return err;
10371 }
10372
10373 static void __devinit tg3_read_partno(struct tg3 *tp)
10374 {
10375         unsigned char vpd_data[256];
10376         unsigned int i;
10377         u32 magic;
10378
10379         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10380                 goto out_not_found;
10381
10382         if (magic == TG3_EEPROM_MAGIC) {
10383                 for (i = 0; i < 256; i += 4) {
10384                         u32 tmp;
10385
10386                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10387                                 goto out_not_found;
10388
10389                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10390                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10391                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10392                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10393                 }
10394         } else {
10395                 int vpd_cap;
10396
10397                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10398                 for (i = 0; i < 256; i += 4) {
10399                         u32 tmp, j = 0;
10400                         u16 tmp16;
10401
10402                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10403                                               i);
10404                         while (j++ < 100) {
10405                                 pci_read_config_word(tp->pdev, vpd_cap +
10406                                                      PCI_VPD_ADDR, &tmp16);
10407                                 if (tmp16 & 0x8000)
10408                                         break;
10409                                 msleep(1);
10410                         }
10411                         if (!(tmp16 & 0x8000))
10412                                 goto out_not_found;
10413
10414                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10415                                               &tmp);
10416                         tmp = cpu_to_le32(tmp);
10417                         memcpy(&vpd_data[i], &tmp, 4);
10418                 }
10419         }
10420
10421         /* Now parse and find the part number. */
10422         for (i = 0; i < 254; ) {
10423                 unsigned char val = vpd_data[i];
10424                 unsigned int block_end;
10425
10426                 if (val == 0x82 || val == 0x91) {
10427                         i = (i + 3 +
10428                              (vpd_data[i + 1] +
10429                               (vpd_data[i + 2] << 8)));
10430                         continue;
10431                 }
10432
10433                 if (val != 0x90)
10434                         goto out_not_found;
10435
10436                 block_end = (i + 3 +
10437                              (vpd_data[i + 1] +
10438                               (vpd_data[i + 2] << 8)));
10439                 i += 3;
10440
10441                 if (block_end > 256)
10442                         goto out_not_found;
10443
10444                 while (i < (block_end - 2)) {
10445                         if (vpd_data[i + 0] == 'P' &&
10446                             vpd_data[i + 1] == 'N') {
10447                                 int partno_len = vpd_data[i + 2];
10448
10449                                 i += 3;
10450                                 if (partno_len > 24 || (partno_len + i) > 256)
10451                                         goto out_not_found;
10452
10453                                 memcpy(tp->board_part_number,
10454                                        &vpd_data[i], partno_len);
10455
10456                                 /* Success. */
10457                                 return;
10458                         }
10459                         i += 3 + vpd_data[i + 2];
10460                 }
10461
10462                 /* Part number not found. */
10463                 goto out_not_found;
10464         }
10465
10466 out_not_found:
10467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10468                 strcpy(tp->board_part_number, "BCM95906");
10469         else
10470                 strcpy(tp->board_part_number, "none");
10471 }
10472
10473 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10474 {
10475         u32 val, offset, start;
10476
10477         if (tg3_nvram_read_swab(tp, 0, &val))
10478                 return;
10479
10480         if (val != TG3_EEPROM_MAGIC)
10481                 return;
10482
10483         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10484             tg3_nvram_read_swab(tp, 0x4, &start))
10485                 return;
10486
10487         offset = tg3_nvram_logical_addr(tp, offset);
10488         if (tg3_nvram_read_swab(tp, offset, &val))
10489                 return;
10490
10491         if ((val & 0xfc000000) == 0x0c000000) {
10492                 u32 ver_offset, addr;
10493                 int i;
10494
10495                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10496                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10497                         return;
10498
10499                 if (val != 0)
10500                         return;
10501
10502                 addr = offset + ver_offset - start;
10503                 for (i = 0; i < 16; i += 4) {
10504                         if (tg3_nvram_read(tp, addr + i, &val))
10505                                 return;
10506
10507                         val = cpu_to_le32(val);
10508                         memcpy(tp->fw_ver + i, &val, 4);
10509                 }
10510         }
10511 }
10512
10513 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10514
10515 static int __devinit tg3_get_invariants(struct tg3 *tp)
10516 {
10517         static struct pci_device_id write_reorder_chipsets[] = {
10518                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10519                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10520                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10521                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10522                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10523                              PCI_DEVICE_ID_VIA_8385_0) },
10524                 { },
10525         };
10526         u32 misc_ctrl_reg;
10527         u32 cacheline_sz_reg;
10528         u32 pci_state_reg, grc_misc_cfg;
10529         u32 val;
10530         u16 pci_cmd;
10531         int err, pcie_cap;
10532
10533         /* Force memory write invalidate off.  If we leave it on,
10534          * then on 5700_BX chips we have to enable a workaround.
10535          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10536          * to match the cacheline size.  The Broadcom driver have this
10537          * workaround but turns MWI off all the times so never uses
10538          * it.  This seems to suggest that the workaround is insufficient.
10539          */
10540         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10541         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10542         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10543
10544         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10545          * has the register indirect write enable bit set before
10546          * we try to access any of the MMIO registers.  It is also
10547          * critical that the PCI-X hw workaround situation is decided
10548          * before that as well.
10549          */
10550         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10551                               &misc_ctrl_reg);
10552
10553         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10554                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10555         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10556                 u32 prod_id_asic_rev;
10557
10558                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10559                                       &prod_id_asic_rev);
10560                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10561         }
10562
10563         /* Wrong chip ID in 5752 A0. This code can be removed later
10564          * as A0 is not in production.
10565          */
10566         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10567                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10568
10569         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10570          * we need to disable memory and use config. cycles
10571          * only to access all registers. The 5702/03 chips
10572          * can mistakenly decode the special cycles from the
10573          * ICH chipsets as memory write cycles, causing corruption
10574          * of register and memory space. Only certain ICH bridges
10575          * will drive special cycles with non-zero data during the
10576          * address phase which can fall within the 5703's address
10577          * range. This is not an ICH bug as the PCI spec allows
10578          * non-zero address during special cycles. However, only
10579          * these ICH bridges are known to drive non-zero addresses
10580          * during special cycles.
10581          *
10582          * Since special cycles do not cross PCI bridges, we only
10583          * enable this workaround if the 5703 is on the secondary
10584          * bus of these ICH bridges.
10585          */
10586         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10587             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10588                 static struct tg3_dev_id {
10589                         u32     vendor;
10590                         u32     device;
10591                         u32     rev;
10592                 } ich_chipsets[] = {
10593                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10594                           PCI_ANY_ID },
10595                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10596                           PCI_ANY_ID },
10597                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10598                           0xa },
10599                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10600                           PCI_ANY_ID },
10601                         { },
10602                 };
10603                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10604                 struct pci_dev *bridge = NULL;
10605
10606                 while (pci_id->vendor != 0) {
10607                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10608                                                 bridge);
10609                         if (!bridge) {
10610                                 pci_id++;
10611                                 continue;
10612                         }
10613                         if (pci_id->rev != PCI_ANY_ID) {
10614                                 if (bridge->revision > pci_id->rev)
10615                                         continue;
10616                         }
10617                         if (bridge->subordinate &&
10618                             (bridge->subordinate->number ==
10619                              tp->pdev->bus->number)) {
10620
10621                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10622                                 pci_dev_put(bridge);
10623                                 break;
10624                         }
10625                 }
10626         }
10627
10628         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10629          * DMA addresses > 40-bit. This bridge may have other additional
10630          * 57xx devices behind it in some 4-port NIC designs for example.
10631          * Any tg3 device found behind the bridge will also need the 40-bit
10632          * DMA workaround.
10633          */
10634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10635             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10636                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10637                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10638                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10639         }
10640         else {
10641                 struct pci_dev *bridge = NULL;
10642
10643                 do {
10644                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10645                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10646                                                 bridge);
10647                         if (bridge && bridge->subordinate &&
10648                             (bridge->subordinate->number <=
10649                              tp->pdev->bus->number) &&
10650                             (bridge->subordinate->subordinate >=
10651                              tp->pdev->bus->number)) {
10652                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10653                                 pci_dev_put(bridge);
10654                                 break;
10655                         }
10656                 } while (bridge);
10657         }
10658
10659         /* Initialize misc host control in PCI block. */
10660         tp->misc_host_ctrl |= (misc_ctrl_reg &
10661                                MISC_HOST_CTRL_CHIPREV);
10662         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10663                                tp->misc_host_ctrl);
10664
10665         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10666                               &cacheline_sz_reg);
10667
10668         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10669         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10670         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10671         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10672
10673         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10674             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10675                 tp->pdev_peer = tg3_find_peer(tp);
10676
10677         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10678             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10679             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10680             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10682             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
10683             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10684                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10685
10686         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10687             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10688                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10689
10690         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10691                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
10692                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
10693                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
10694                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
10695                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
10696                      tp->pdev_peer == tp->pdev))
10697                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
10698
10699                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10700                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10701                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10702                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10703                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10704                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10705                 } else {
10706                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
10707                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10708                                 ASIC_REV_5750 &&
10709                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10710                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
10711                 }
10712         }
10713
10714         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10715             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10716             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10717             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10718             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
10719             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
10720             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10721                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10722
10723         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
10724         if (pcie_cap != 0) {
10725                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10726                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10727                         u16 lnkctl;
10728
10729                         pci_read_config_word(tp->pdev,
10730                                              pcie_cap + PCI_EXP_LNKCTL,
10731                                              &lnkctl);
10732                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
10733                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
10734                 }
10735         }
10736
10737         /* If we have an AMD 762 or VIA K8T800 chipset, write
10738          * reordering to the mailbox registers done by the host
10739          * controller can cause major troubles.  We read back from
10740          * every mailbox register write to force the writes to be
10741          * posted to the chip in order.
10742          */
10743         if (pci_dev_present(write_reorder_chipsets) &&
10744             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10745                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10746
10747         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10748             tp->pci_lat_timer < 64) {
10749                 tp->pci_lat_timer = 64;
10750
10751                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10752                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10753                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10754                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10755
10756                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10757                                        cacheline_sz_reg);
10758         }
10759
10760         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
10761             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10762                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
10763                 if (!tp->pcix_cap) {
10764                         printk(KERN_ERR PFX "Cannot find PCI-X "
10765                                             "capability, aborting.\n");
10766                         return -EIO;
10767                 }
10768         }
10769
10770         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10771                               &pci_state_reg);
10772
10773         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10774                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10775
10776                 /* If this is a 5700 BX chipset, and we are in PCI-X
10777                  * mode, enable register write workaround.
10778                  *
10779                  * The workaround is to use indirect register accesses
10780                  * for all chip writes not to mailbox registers.
10781                  */
10782                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10783                         u32 pm_reg;
10784
10785                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10786
10787                         /* The chip can have it's power management PCI config
10788                          * space registers clobbered due to this bug.
10789                          * So explicitly force the chip into D0 here.
10790                          */
10791                         pci_read_config_dword(tp->pdev,
10792                                               tp->pm_cap + PCI_PM_CTRL,
10793                                               &pm_reg);
10794                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10795                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10796                         pci_write_config_dword(tp->pdev,
10797                                                tp->pm_cap + PCI_PM_CTRL,
10798                                                pm_reg);
10799
10800                         /* Also, force SERR#/PERR# in PCI command. */
10801                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10802                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10803                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10804                 }
10805         }
10806
10807         /* 5700 BX chips need to have their TX producer index mailboxes
10808          * written twice to workaround a bug.
10809          */
10810         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10811                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10812
10813         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10814                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10815         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10816                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10817
10818         /* Chip-specific fixup from Broadcom driver */
10819         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10820             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10821                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10822                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10823         }
10824
10825         /* Default fast path register access methods */
10826         tp->read32 = tg3_read32;
10827         tp->write32 = tg3_write32;
10828         tp->read32_mbox = tg3_read32;
10829         tp->write32_mbox = tg3_write32;
10830         tp->write32_tx_mbox = tg3_write32;
10831         tp->write32_rx_mbox = tg3_write32;
10832
10833         /* Various workaround register access methods */
10834         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10835                 tp->write32 = tg3_write_indirect_reg32;
10836         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10837                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10838                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
10839                 /*
10840                  * Back to back register writes can cause problems on these
10841                  * chips, the workaround is to read back all reg writes
10842                  * except those to mailbox regs.
10843                  *
10844                  * See tg3_write_indirect_reg32().
10845                  */
10846                 tp->write32 = tg3_write_flush_reg32;
10847         }
10848
10849
10850         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10851             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10852                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10853                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10854                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10855         }
10856
10857         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10858                 tp->read32 = tg3_read_indirect_reg32;
10859                 tp->write32 = tg3_write_indirect_reg32;
10860                 tp->read32_mbox = tg3_read_indirect_mbox;
10861                 tp->write32_mbox = tg3_write_indirect_mbox;
10862                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10863                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10864
10865                 iounmap(tp->regs);
10866                 tp->regs = NULL;
10867
10868                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10869                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10870                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10871         }
10872         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10873                 tp->read32_mbox = tg3_read32_mbox_5906;
10874                 tp->write32_mbox = tg3_write32_mbox_5906;
10875                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
10876                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
10877         }
10878
10879         if (tp->write32 == tg3_write_indirect_reg32 ||
10880             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10881              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10882               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10883                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10884
10885         /* Get eeprom hw config before calling tg3_set_power_state().
10886          * In particular, the TG3_FLG2_IS_NIC flag must be
10887          * determined before calling tg3_set_power_state() so that
10888          * we know whether or not to switch out of Vaux power.
10889          * When the flag is set, it means that GPIO1 is used for eeprom
10890          * write protect and also implies that it is a LOM where GPIOs
10891          * are not used to switch power.
10892          */
10893         tg3_get_eeprom_hw_cfg(tp);
10894
10895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10896                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
10897
10898         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10899          * GPIO1 driven high will bring 5700's external PHY out of reset.
10900          * It is also used as eeprom write protect on LOMs.
10901          */
10902         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10903         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10904             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10905                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10906                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10907         /* Unused GPIO3 must be driven as output on 5752 because there
10908          * are no pull-up resistors on unused GPIO pins.
10909          */
10910         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10911                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10912
10913         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10914                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10915
10916         /* Force the chip into D0. */
10917         err = tg3_set_power_state(tp, PCI_D0);
10918         if (err) {
10919                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10920                        pci_name(tp->pdev));
10921                 return err;
10922         }
10923
10924         /* 5700 B0 chips do not support checksumming correctly due
10925          * to hardware bugs.
10926          */
10927         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10928                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10929
10930         /* Derive initial jumbo mode from MTU assigned in
10931          * ether_setup() via the alloc_etherdev() call
10932          */
10933         if (tp->dev->mtu > ETH_DATA_LEN &&
10934             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10935                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10936
10937         /* Determine WakeOnLan speed to use. */
10938         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10939             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10940             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10941             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10942                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10943         } else {
10944                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10945         }
10946
10947         /* A few boards don't want Ethernet@WireSpeed phy feature */
10948         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10949             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10950              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10951              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10952             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
10953             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10954                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10955
10956         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10957             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10958                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10959         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10960                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10961
10962         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10963                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10964                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10965                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) {
10966                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
10967                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
10968                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10969                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
10970                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
10971                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10972                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10973         }
10974
10975         tp->coalesce_mode = 0;
10976         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10977             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10978                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10979
10980         /* Initialize MAC MI mode, polling disabled. */
10981         tw32_f(MAC_MI_MODE, tp->mi_mode);
10982         udelay(80);
10983
10984         /* Initialize data/descriptor byte/word swapping. */
10985         val = tr32(GRC_MODE);
10986         val &= GRC_MODE_HOST_STACKUP;
10987         tw32(GRC_MODE, val | tp->grc_mode);
10988
10989         tg3_switch_clocks(tp);
10990
10991         /* Clear this out for sanity. */
10992         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10993
10994         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10995                               &pci_state_reg);
10996         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10997             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10998                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10999
11000                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11001                     chiprevid == CHIPREV_ID_5701_B0 ||
11002                     chiprevid == CHIPREV_ID_5701_B2 ||
11003                     chiprevid == CHIPREV_ID_5701_B5) {
11004                         void __iomem *sram_base;
11005
11006                         /* Write some dummy words into the SRAM status block
11007                          * area, see if it reads back correctly.  If the return
11008                          * value is bad, force enable the PCIX workaround.
11009                          */
11010                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11011
11012                         writel(0x00000000, sram_base);
11013                         writel(0x00000000, sram_base + 4);
11014                         writel(0xffffffff, sram_base + 4);
11015                         if (readl(sram_base) != 0x00000000)
11016                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11017                 }
11018         }
11019
11020         udelay(50);
11021         tg3_nvram_init(tp);
11022
11023         grc_misc_cfg = tr32(GRC_MISC_CFG);
11024         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11025
11026         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11027             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11028              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11029                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11030
11031         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11032             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11033                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11034         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11035                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11036                                       HOSTCC_MODE_CLRTICK_TXBD);
11037
11038                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11039                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11040                                        tp->misc_host_ctrl);
11041         }
11042
11043         /* these are limited to 10/100 only */
11044         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11045              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11046             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11047              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11048              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11049               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11050               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11051             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11052              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11053               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11054               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11055             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11056                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11057
11058         err = tg3_phy_probe(tp);
11059         if (err) {
11060                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11061                        pci_name(tp->pdev), err);
11062                 /* ... but do not return immediately ... */
11063         }
11064
11065         tg3_read_partno(tp);
11066         tg3_read_fw_ver(tp);
11067
11068         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11069                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11070         } else {
11071                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11072                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11073                 else
11074                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11075         }
11076
11077         /* 5700 {AX,BX} chips have a broken status block link
11078          * change bit implementation, so we must use the
11079          * status register in those cases.
11080          */
11081         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11082                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11083         else
11084                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11085
11086         /* The led_ctrl is set during tg3_phy_probe, here we might
11087          * have to force the link status polling mechanism based
11088          * upon subsystem IDs.
11089          */
11090         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11091             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11092             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11093                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11094                                   TG3_FLAG_USE_LINKCHG_REG);
11095         }
11096
11097         /* For all SERDES we poll the MAC status register. */
11098         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11099                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11100         else
11101                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11102
11103         /* All chips before 5787 can get confused if TX buffers
11104          * straddle the 4GB address boundary in some cases.
11105          */
11106         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11107             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11108             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11109             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11110                 tp->dev->hard_start_xmit = tg3_start_xmit;
11111         else
11112                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11113
11114         tp->rx_offset = 2;
11115         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11116             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11117                 tp->rx_offset = 0;
11118
11119         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11120
11121         /* Increment the rx prod index on the rx std ring by at most
11122          * 8 for these chips to workaround hw errata.
11123          */
11124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11125             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11126             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11127                 tp->rx_std_max_post = 8;
11128
11129         /* By default, disable wake-on-lan.  User can change this
11130          * using ETHTOOL_SWOL.
11131          */
11132         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
11133
11134         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11135                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11136                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11137
11138         return err;
11139 }
11140
11141 #ifdef CONFIG_SPARC
11142 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11143 {
11144         struct net_device *dev = tp->dev;
11145         struct pci_dev *pdev = tp->pdev;
11146         struct device_node *dp = pci_device_to_OF_node(pdev);
11147         const unsigned char *addr;
11148         int len;
11149
11150         addr = of_get_property(dp, "local-mac-address", &len);
11151         if (addr && len == 6) {
11152                 memcpy(dev->dev_addr, addr, 6);
11153                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11154                 return 0;
11155         }
11156         return -ENODEV;
11157 }
11158
11159 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11160 {
11161         struct net_device *dev = tp->dev;
11162
11163         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11164         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11165         return 0;
11166 }
11167 #endif
11168
11169 static int __devinit tg3_get_device_address(struct tg3 *tp)
11170 {
11171         struct net_device *dev = tp->dev;
11172         u32 hi, lo, mac_offset;
11173         int addr_ok = 0;
11174
11175 #ifdef CONFIG_SPARC
11176         if (!tg3_get_macaddr_sparc(tp))
11177                 return 0;
11178 #endif
11179
11180         mac_offset = 0x7c;
11181         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11182             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11183                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11184                         mac_offset = 0xcc;
11185                 if (tg3_nvram_lock(tp))
11186                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11187                 else
11188                         tg3_nvram_unlock(tp);
11189         }
11190         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11191                 mac_offset = 0x10;
11192
11193         /* First try to get it from MAC address mailbox. */
11194         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11195         if ((hi >> 16) == 0x484b) {
11196                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11197                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11198
11199                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11200                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11201                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11202                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11203                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11204
11205                 /* Some old bootcode may report a 0 MAC address in SRAM */
11206                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11207         }
11208         if (!addr_ok) {
11209                 /* Next, try NVRAM. */
11210                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11211                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11212                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11213                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11214                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11215                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11216                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11217                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11218                 }
11219                 /* Finally just fetch it out of the MAC control regs. */
11220                 else {
11221                         hi = tr32(MAC_ADDR_0_HIGH);
11222                         lo = tr32(MAC_ADDR_0_LOW);
11223
11224                         dev->dev_addr[5] = lo & 0xff;
11225                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11226                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11227                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11228                         dev->dev_addr[1] = hi & 0xff;
11229                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11230                 }
11231         }
11232
11233         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11234 #ifdef CONFIG_SPARC64
11235                 if (!tg3_get_default_macaddr_sparc(tp))
11236                         return 0;
11237 #endif
11238                 return -EINVAL;
11239         }
11240         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11241         return 0;
11242 }
11243
11244 #define BOUNDARY_SINGLE_CACHELINE       1
11245 #define BOUNDARY_MULTI_CACHELINE        2
11246
11247 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11248 {
11249         int cacheline_size;
11250         u8 byte;
11251         int goal;
11252
11253         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11254         if (byte == 0)
11255                 cacheline_size = 1024;
11256         else
11257                 cacheline_size = (int) byte * 4;
11258
11259         /* On 5703 and later chips, the boundary bits have no
11260          * effect.
11261          */
11262         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11263             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11264             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11265                 goto out;
11266
11267 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11268         goal = BOUNDARY_MULTI_CACHELINE;
11269 #else
11270 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11271         goal = BOUNDARY_SINGLE_CACHELINE;
11272 #else
11273         goal = 0;
11274 #endif
11275 #endif
11276
11277         if (!goal)
11278                 goto out;
11279
11280         /* PCI controllers on most RISC systems tend to disconnect
11281          * when a device tries to burst across a cache-line boundary.
11282          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11283          *
11284          * Unfortunately, for PCI-E there are only limited
11285          * write-side controls for this, and thus for reads
11286          * we will still get the disconnects.  We'll also waste
11287          * these PCI cycles for both read and write for chips
11288          * other than 5700 and 5701 which do not implement the
11289          * boundary bits.
11290          */
11291         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11292             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11293                 switch (cacheline_size) {
11294                 case 16:
11295                 case 32:
11296                 case 64:
11297                 case 128:
11298                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11299                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11300                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11301                         } else {
11302                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11303                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11304                         }
11305                         break;
11306
11307                 case 256:
11308                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11309                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11310                         break;
11311
11312                 default:
11313                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11314                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11315                         break;
11316                 };
11317         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11318                 switch (cacheline_size) {
11319                 case 16:
11320                 case 32:
11321                 case 64:
11322                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11323                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11324                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11325                                 break;
11326                         }
11327                         /* fallthrough */
11328                 case 128:
11329                 default:
11330                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11331                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11332                         break;
11333                 };
11334         } else {
11335                 switch (cacheline_size) {
11336                 case 16:
11337                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11338                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11339                                         DMA_RWCTRL_WRITE_BNDRY_16);
11340                                 break;
11341                         }
11342                         /* fallthrough */
11343                 case 32:
11344                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11345                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11346                                         DMA_RWCTRL_WRITE_BNDRY_32);
11347                                 break;
11348                         }
11349                         /* fallthrough */
11350                 case 64:
11351                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11352                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11353                                         DMA_RWCTRL_WRITE_BNDRY_64);
11354                                 break;
11355                         }
11356                         /* fallthrough */
11357                 case 128:
11358                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11359                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11360                                         DMA_RWCTRL_WRITE_BNDRY_128);
11361                                 break;
11362                         }
11363                         /* fallthrough */
11364                 case 256:
11365                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11366                                 DMA_RWCTRL_WRITE_BNDRY_256);
11367                         break;
11368                 case 512:
11369                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11370                                 DMA_RWCTRL_WRITE_BNDRY_512);
11371                         break;
11372                 case 1024:
11373                 default:
11374                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11375                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11376                         break;
11377                 };
11378         }
11379
11380 out:
11381         return val;
11382 }
11383
11384 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11385 {
11386         struct tg3_internal_buffer_desc test_desc;
11387         u32 sram_dma_descs;
11388         int i, ret;
11389
11390         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11391
11392         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11393         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11394         tw32(RDMAC_STATUS, 0);
11395         tw32(WDMAC_STATUS, 0);
11396
11397         tw32(BUFMGR_MODE, 0);
11398         tw32(FTQ_RESET, 0);
11399
11400         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11401         test_desc.addr_lo = buf_dma & 0xffffffff;
11402         test_desc.nic_mbuf = 0x00002100;
11403         test_desc.len = size;
11404
11405         /*
11406          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11407          * the *second* time the tg3 driver was getting loaded after an
11408          * initial scan.
11409          *
11410          * Broadcom tells me:
11411          *   ...the DMA engine is connected to the GRC block and a DMA
11412          *   reset may affect the GRC block in some unpredictable way...
11413          *   The behavior of resets to individual blocks has not been tested.
11414          *
11415          * Broadcom noted the GRC reset will also reset all sub-components.
11416          */
11417         if (to_device) {
11418                 test_desc.cqid_sqid = (13 << 8) | 2;
11419
11420                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11421                 udelay(40);
11422         } else {
11423                 test_desc.cqid_sqid = (16 << 8) | 7;
11424
11425                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11426                 udelay(40);
11427         }
11428         test_desc.flags = 0x00000005;
11429
11430         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11431                 u32 val;
11432
11433                 val = *(((u32 *)&test_desc) + i);
11434                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11435                                        sram_dma_descs + (i * sizeof(u32)));
11436                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11437         }
11438         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11439
11440         if (to_device) {
11441                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11442         } else {
11443                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11444         }
11445
11446         ret = -ENODEV;
11447         for (i = 0; i < 40; i++) {
11448                 u32 val;
11449
11450                 if (to_device)
11451                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11452                 else
11453                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11454                 if ((val & 0xffff) == sram_dma_descs) {
11455                         ret = 0;
11456                         break;
11457                 }
11458
11459                 udelay(100);
11460         }
11461
11462         return ret;
11463 }
11464
11465 #define TEST_BUFFER_SIZE        0x2000
11466
11467 static int __devinit tg3_test_dma(struct tg3 *tp)
11468 {
11469         dma_addr_t buf_dma;
11470         u32 *buf, saved_dma_rwctrl;
11471         int ret;
11472
11473         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11474         if (!buf) {
11475                 ret = -ENOMEM;
11476                 goto out_nofree;
11477         }
11478
11479         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11480                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11481
11482         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11483
11484         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11485                 /* DMA read watermark not used on PCIE */
11486                 tp->dma_rwctrl |= 0x00180000;
11487         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11488                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11489                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11490                         tp->dma_rwctrl |= 0x003f0000;
11491                 else
11492                         tp->dma_rwctrl |= 0x003f000f;
11493         } else {
11494                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11495                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11496                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11497                         u32 read_water = 0x7;
11498
11499                         /* If the 5704 is behind the EPB bridge, we can
11500                          * do the less restrictive ONE_DMA workaround for
11501                          * better performance.
11502                          */
11503                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11504                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11505                                 tp->dma_rwctrl |= 0x8000;
11506                         else if (ccval == 0x6 || ccval == 0x7)
11507                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11508
11509                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11510                                 read_water = 4;
11511                         /* Set bit 23 to enable PCIX hw bug fix */
11512                         tp->dma_rwctrl |=
11513                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11514                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11515                                 (1 << 23);
11516                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11517                         /* 5780 always in PCIX mode */
11518                         tp->dma_rwctrl |= 0x00144000;
11519                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11520                         /* 5714 always in PCIX mode */
11521                         tp->dma_rwctrl |= 0x00148000;
11522                 } else {
11523                         tp->dma_rwctrl |= 0x001b000f;
11524                 }
11525         }
11526
11527         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11528             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11529                 tp->dma_rwctrl &= 0xfffffff0;
11530
11531         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11532             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11533                 /* Remove this if it causes problems for some boards. */
11534                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11535
11536                 /* On 5700/5701 chips, we need to set this bit.
11537                  * Otherwise the chip will issue cacheline transactions
11538                  * to streamable DMA memory with not all the byte
11539                  * enables turned on.  This is an error on several
11540                  * RISC PCI controllers, in particular sparc64.
11541                  *
11542                  * On 5703/5704 chips, this bit has been reassigned
11543                  * a different meaning.  In particular, it is used
11544                  * on those chips to enable a PCI-X workaround.
11545                  */
11546                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11547         }
11548
11549         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11550
11551 #if 0
11552         /* Unneeded, already done by tg3_get_invariants.  */
11553         tg3_switch_clocks(tp);
11554 #endif
11555
11556         ret = 0;
11557         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11558             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11559                 goto out;
11560
11561         /* It is best to perform DMA test with maximum write burst size
11562          * to expose the 5700/5701 write DMA bug.
11563          */
11564         saved_dma_rwctrl = tp->dma_rwctrl;
11565         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11566         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11567
11568         while (1) {
11569                 u32 *p = buf, i;
11570
11571                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11572                         p[i] = i;
11573
11574                 /* Send the buffer to the chip. */
11575                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11576                 if (ret) {
11577                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11578                         break;
11579                 }
11580
11581 #if 0
11582                 /* validate data reached card RAM correctly. */
11583                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11584                         u32 val;
11585                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11586                         if (le32_to_cpu(val) != p[i]) {
11587                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11588                                 /* ret = -ENODEV here? */
11589                         }
11590                         p[i] = 0;
11591                 }
11592 #endif
11593                 /* Now read it back. */
11594                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11595                 if (ret) {
11596                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11597
11598                         break;
11599                 }
11600
11601                 /* Verify it. */
11602                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11603                         if (p[i] == i)
11604                                 continue;
11605
11606                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11607                             DMA_RWCTRL_WRITE_BNDRY_16) {
11608                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11609                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11610                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11611                                 break;
11612                         } else {
11613                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11614                                 ret = -ENODEV;
11615                                 goto out;
11616                         }
11617                 }
11618
11619                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11620                         /* Success. */
11621                         ret = 0;
11622                         break;
11623                 }
11624         }
11625         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11626             DMA_RWCTRL_WRITE_BNDRY_16) {
11627                 static struct pci_device_id dma_wait_state_chipsets[] = {
11628                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11629                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11630                         { },
11631                 };
11632
11633                 /* DMA test passed without adjusting DMA boundary,
11634                  * now look for chipsets that are known to expose the
11635                  * DMA bug without failing the test.
11636                  */
11637                 if (pci_dev_present(dma_wait_state_chipsets)) {
11638                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11639                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11640                 }
11641                 else
11642                         /* Safe to use the calculated DMA boundary. */
11643                         tp->dma_rwctrl = saved_dma_rwctrl;
11644
11645                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11646         }
11647
11648 out:
11649         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11650 out_nofree:
11651         return ret;
11652 }
11653
11654 static void __devinit tg3_init_link_config(struct tg3 *tp)
11655 {
11656         tp->link_config.advertising =
11657                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11658                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11659                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11660                  ADVERTISED_Autoneg | ADVERTISED_MII);
11661         tp->link_config.speed = SPEED_INVALID;
11662         tp->link_config.duplex = DUPLEX_INVALID;
11663         tp->link_config.autoneg = AUTONEG_ENABLE;
11664         tp->link_config.active_speed = SPEED_INVALID;
11665         tp->link_config.active_duplex = DUPLEX_INVALID;
11666         tp->link_config.phy_is_low_power = 0;
11667         tp->link_config.orig_speed = SPEED_INVALID;
11668         tp->link_config.orig_duplex = DUPLEX_INVALID;
11669         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11670 }
11671
11672 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11673 {
11674         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11675                 tp->bufmgr_config.mbuf_read_dma_low_water =
11676                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11677                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11678                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11679                 tp->bufmgr_config.mbuf_high_water =
11680                         DEFAULT_MB_HIGH_WATER_5705;
11681                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11682                         tp->bufmgr_config.mbuf_mac_rx_low_water =
11683                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
11684                         tp->bufmgr_config.mbuf_high_water =
11685                                 DEFAULT_MB_HIGH_WATER_5906;
11686                 }
11687
11688                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11689                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11690                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11691                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11692                 tp->bufmgr_config.mbuf_high_water_jumbo =
11693                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11694         } else {
11695                 tp->bufmgr_config.mbuf_read_dma_low_water =
11696                         DEFAULT_MB_RDMA_LOW_WATER;
11697                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11698                         DEFAULT_MB_MACRX_LOW_WATER;
11699                 tp->bufmgr_config.mbuf_high_water =
11700                         DEFAULT_MB_HIGH_WATER;
11701
11702                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11703                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11704                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11705                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11706                 tp->bufmgr_config.mbuf_high_water_jumbo =
11707                         DEFAULT_MB_HIGH_WATER_JUMBO;
11708         }
11709
11710         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11711         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11712 }
11713
11714 static char * __devinit tg3_phy_string(struct tg3 *tp)
11715 {
11716         switch (tp->phy_id & PHY_ID_MASK) {
11717         case PHY_ID_BCM5400:    return "5400";
11718         case PHY_ID_BCM5401:    return "5401";
11719         case PHY_ID_BCM5411:    return "5411";
11720         case PHY_ID_BCM5701:    return "5701";
11721         case PHY_ID_BCM5703:    return "5703";
11722         case PHY_ID_BCM5704:    return "5704";
11723         case PHY_ID_BCM5705:    return "5705";
11724         case PHY_ID_BCM5750:    return "5750";
11725         case PHY_ID_BCM5752:    return "5752";
11726         case PHY_ID_BCM5714:    return "5714";
11727         case PHY_ID_BCM5780:    return "5780";
11728         case PHY_ID_BCM5755:    return "5755";
11729         case PHY_ID_BCM5787:    return "5787";
11730         case PHY_ID_BCM5784:    return "5784";
11731         case PHY_ID_BCM5756:    return "5722/5756";
11732         case PHY_ID_BCM5906:    return "5906";
11733         case PHY_ID_BCM8002:    return "8002/serdes";
11734         case 0:                 return "serdes";
11735         default:                return "unknown";
11736         };
11737 }
11738
11739 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11740 {
11741         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11742                 strcpy(str, "PCI Express");
11743                 return str;
11744         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11745                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11746
11747                 strcpy(str, "PCIX:");
11748
11749                 if ((clock_ctrl == 7) ||
11750                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11751                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11752                         strcat(str, "133MHz");
11753                 else if (clock_ctrl == 0)
11754                         strcat(str, "33MHz");
11755                 else if (clock_ctrl == 2)
11756                         strcat(str, "50MHz");
11757                 else if (clock_ctrl == 4)
11758                         strcat(str, "66MHz");
11759                 else if (clock_ctrl == 6)
11760                         strcat(str, "100MHz");
11761         } else {
11762                 strcpy(str, "PCI:");
11763                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11764                         strcat(str, "66MHz");
11765                 else
11766                         strcat(str, "33MHz");
11767         }
11768         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11769                 strcat(str, ":32-bit");
11770         else
11771                 strcat(str, ":64-bit");
11772         return str;
11773 }
11774
11775 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11776 {
11777         struct pci_dev *peer;
11778         unsigned int func, devnr = tp->pdev->devfn & ~7;
11779
11780         for (func = 0; func < 8; func++) {
11781                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11782                 if (peer && peer != tp->pdev)
11783                         break;
11784                 pci_dev_put(peer);
11785         }
11786         /* 5704 can be configured in single-port mode, set peer to
11787          * tp->pdev in that case.
11788          */
11789         if (!peer) {
11790                 peer = tp->pdev;
11791                 return peer;
11792         }
11793
11794         /*
11795          * We don't need to keep the refcount elevated; there's no way
11796          * to remove one half of this device without removing the other
11797          */
11798         pci_dev_put(peer);
11799
11800         return peer;
11801 }
11802
11803 static void __devinit tg3_init_coal(struct tg3 *tp)
11804 {
11805         struct ethtool_coalesce *ec = &tp->coal;
11806
11807         memset(ec, 0, sizeof(*ec));
11808         ec->cmd = ETHTOOL_GCOALESCE;
11809         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11810         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11811         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11812         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11813         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11814         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11815         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11816         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11817         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11818
11819         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11820                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11821                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11822                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11823                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11824                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11825         }
11826
11827         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11828                 ec->rx_coalesce_usecs_irq = 0;
11829                 ec->tx_coalesce_usecs_irq = 0;
11830                 ec->stats_block_coalesce_usecs = 0;
11831         }
11832 }
11833
11834 static int __devinit tg3_init_one(struct pci_dev *pdev,
11835                                   const struct pci_device_id *ent)
11836 {
11837         static int tg3_version_printed = 0;
11838         unsigned long tg3reg_base, tg3reg_len;
11839         struct net_device *dev;
11840         struct tg3 *tp;
11841         int i, err, pm_cap;
11842         char str[40];
11843         u64 dma_mask, persist_dma_mask;
11844
11845         if (tg3_version_printed++ == 0)
11846                 printk(KERN_INFO "%s", version);
11847
11848         err = pci_enable_device(pdev);
11849         if (err) {
11850                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11851                        "aborting.\n");
11852                 return err;
11853         }
11854
11855         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11856                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11857                        "base address, aborting.\n");
11858                 err = -ENODEV;
11859                 goto err_out_disable_pdev;
11860         }
11861
11862         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11863         if (err) {
11864                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11865                        "aborting.\n");
11866                 goto err_out_disable_pdev;
11867         }
11868
11869         pci_set_master(pdev);
11870
11871         /* Find power-management capability. */
11872         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11873         if (pm_cap == 0) {
11874                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11875                        "aborting.\n");
11876                 err = -EIO;
11877                 goto err_out_free_res;
11878         }
11879
11880         tg3reg_base = pci_resource_start(pdev, 0);
11881         tg3reg_len = pci_resource_len(pdev, 0);
11882
11883         dev = alloc_etherdev(sizeof(*tp));
11884         if (!dev) {
11885                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11886                 err = -ENOMEM;
11887                 goto err_out_free_res;
11888         }
11889
11890         SET_NETDEV_DEV(dev, &pdev->dev);
11891
11892 #if TG3_VLAN_TAG_USED
11893         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11894         dev->vlan_rx_register = tg3_vlan_rx_register;
11895 #endif
11896
11897         tp = netdev_priv(dev);
11898         tp->pdev = pdev;
11899         tp->dev = dev;
11900         tp->pm_cap = pm_cap;
11901         tp->mac_mode = TG3_DEF_MAC_MODE;
11902         tp->rx_mode = TG3_DEF_RX_MODE;
11903         tp->tx_mode = TG3_DEF_TX_MODE;
11904         tp->mi_mode = MAC_MI_MODE_BASE;
11905         if (tg3_debug > 0)
11906                 tp->msg_enable = tg3_debug;
11907         else
11908                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11909
11910         /* The word/byte swap controls here control register access byte
11911          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11912          * setting below.
11913          */
11914         tp->misc_host_ctrl =
11915                 MISC_HOST_CTRL_MASK_PCI_INT |
11916                 MISC_HOST_CTRL_WORD_SWAP |
11917                 MISC_HOST_CTRL_INDIR_ACCESS |
11918                 MISC_HOST_CTRL_PCISTATE_RW;
11919
11920         /* The NONFRM (non-frame) byte/word swap controls take effect
11921          * on descriptor entries, anything which isn't packet data.
11922          *
11923          * The StrongARM chips on the board (one for tx, one for rx)
11924          * are running in big-endian mode.
11925          */
11926         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11927                         GRC_MODE_WSWAP_NONFRM_DATA);
11928 #ifdef __BIG_ENDIAN
11929         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11930 #endif
11931         spin_lock_init(&tp->lock);
11932         spin_lock_init(&tp->indirect_lock);
11933         INIT_WORK(&tp->reset_task, tg3_reset_task);
11934
11935         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11936         if (!tp->regs) {
11937                 printk(KERN_ERR PFX "Cannot map device registers, "
11938                        "aborting.\n");
11939                 err = -ENOMEM;
11940                 goto err_out_free_dev;
11941         }
11942
11943         tg3_init_link_config(tp);
11944
11945         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11946         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11947         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11948
11949         dev->open = tg3_open;
11950         dev->stop = tg3_close;
11951         dev->get_stats = tg3_get_stats;
11952         dev->set_multicast_list = tg3_set_rx_mode;
11953         dev->set_mac_address = tg3_set_mac_addr;
11954         dev->do_ioctl = tg3_ioctl;
11955         dev->tx_timeout = tg3_tx_timeout;
11956         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
11957         dev->ethtool_ops = &tg3_ethtool_ops;
11958         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11959         dev->change_mtu = tg3_change_mtu;
11960         dev->irq = pdev->irq;
11961 #ifdef CONFIG_NET_POLL_CONTROLLER
11962         dev->poll_controller = tg3_poll_controller;
11963 #endif
11964
11965         err = tg3_get_invariants(tp);
11966         if (err) {
11967                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11968                        "aborting.\n");
11969                 goto err_out_iounmap;
11970         }
11971
11972         /* The EPB bridge inside 5714, 5715, and 5780 and any
11973          * device behind the EPB cannot support DMA addresses > 40-bit.
11974          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11975          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11976          * do DMA address check in tg3_start_xmit().
11977          */
11978         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11979                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11980         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11981                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11982 #ifdef CONFIG_HIGHMEM
11983                 dma_mask = DMA_64BIT_MASK;
11984 #endif
11985         } else
11986                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11987
11988         /* Configure DMA attributes. */
11989         if (dma_mask > DMA_32BIT_MASK) {
11990                 err = pci_set_dma_mask(pdev, dma_mask);
11991                 if (!err) {
11992                         dev->features |= NETIF_F_HIGHDMA;
11993                         err = pci_set_consistent_dma_mask(pdev,
11994                                                           persist_dma_mask);
11995                         if (err < 0) {
11996                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11997                                        "DMA for consistent allocations\n");
11998                                 goto err_out_iounmap;
11999                         }
12000                 }
12001         }
12002         if (err || dma_mask == DMA_32BIT_MASK) {
12003                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12004                 if (err) {
12005                         printk(KERN_ERR PFX "No usable DMA configuration, "
12006                                "aborting.\n");
12007                         goto err_out_iounmap;
12008                 }
12009         }
12010
12011         tg3_init_bufmgr_config(tp);
12012
12013         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12014                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12015         }
12016         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12017             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12018             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12019             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12020             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12021                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12022         } else {
12023                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12024         }
12025
12026         /* TSO is on by default on chips that support hardware TSO.
12027          * Firmware TSO on older chips gives lower performance, so it
12028          * is off by default, but can be enabled using ethtool.
12029          */
12030         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12031                 dev->features |= NETIF_F_TSO;
12032                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12033                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12034                         dev->features |= NETIF_F_TSO6;
12035         }
12036
12037
12038         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12039             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12040             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12041                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12042                 tp->rx_pending = 63;
12043         }
12044
12045         err = tg3_get_device_address(tp);
12046         if (err) {
12047                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12048                        "aborting.\n");
12049                 goto err_out_iounmap;
12050         }
12051
12052         /*
12053          * Reset chip in case UNDI or EFI driver did not shutdown
12054          * DMA self test will enable WDMAC and we'll see (spurious)
12055          * pending DMA on the PCI bus at that point.
12056          */
12057         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12058             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12059                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12060                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12061         }
12062
12063         err = tg3_test_dma(tp);
12064         if (err) {
12065                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12066                 goto err_out_iounmap;
12067         }
12068
12069         /* Tigon3 can do ipv4 only... and some chips have buggy
12070          * checksumming.
12071          */
12072         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12073                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12074                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12075                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12076                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
12077                         dev->features |= NETIF_F_IPV6_CSUM;
12078
12079                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12080         } else
12081                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12082
12083         /* flow control autonegotiation is default behavior */
12084         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12085
12086         tg3_init_coal(tp);
12087
12088         pci_set_drvdata(pdev, dev);
12089
12090         err = register_netdev(dev);
12091         if (err) {
12092                 printk(KERN_ERR PFX "Cannot register net device, "
12093                        "aborting.\n");
12094                 goto err_out_iounmap;
12095         }
12096
12097         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
12098                dev->name,
12099                tp->board_part_number,
12100                tp->pci_chip_rev_id,
12101                tg3_phy_string(tp),
12102                tg3_bus_string(tp, str),
12103                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12104                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12105                  "10/100/1000Base-T")));
12106
12107         for (i = 0; i < 6; i++)
12108                 printk("%2.2x%c", dev->dev_addr[i],
12109                        i == 5 ? '\n' : ':');
12110
12111         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12112                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12113                dev->name,
12114                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12115                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12116                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12117                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12118                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12119                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12120         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12121                dev->name, tp->dma_rwctrl,
12122                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12123                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12124
12125         return 0;
12126
12127 err_out_iounmap:
12128         if (tp->regs) {
12129                 iounmap(tp->regs);
12130                 tp->regs = NULL;
12131         }
12132
12133 err_out_free_dev:
12134         free_netdev(dev);
12135
12136 err_out_free_res:
12137         pci_release_regions(pdev);
12138
12139 err_out_disable_pdev:
12140         pci_disable_device(pdev);
12141         pci_set_drvdata(pdev, NULL);
12142         return err;
12143 }
12144
12145 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12146 {
12147         struct net_device *dev = pci_get_drvdata(pdev);
12148
12149         if (dev) {
12150                 struct tg3 *tp = netdev_priv(dev);
12151
12152                 flush_scheduled_work();
12153                 unregister_netdev(dev);
12154                 if (tp->regs) {
12155                         iounmap(tp->regs);
12156                         tp->regs = NULL;
12157                 }
12158                 free_netdev(dev);
12159                 pci_release_regions(pdev);
12160                 pci_disable_device(pdev);
12161                 pci_set_drvdata(pdev, NULL);
12162         }
12163 }
12164
12165 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12166 {
12167         struct net_device *dev = pci_get_drvdata(pdev);
12168         struct tg3 *tp = netdev_priv(dev);
12169         int err;
12170
12171         /* PCI register 4 needs to be saved whether netif_running() or not.
12172          * MSI address and data need to be saved if using MSI and
12173          * netif_running().
12174          */
12175         pci_save_state(pdev);
12176
12177         if (!netif_running(dev))
12178                 return 0;
12179
12180         flush_scheduled_work();
12181         tg3_netif_stop(tp);
12182
12183         del_timer_sync(&tp->timer);
12184
12185         tg3_full_lock(tp, 1);
12186         tg3_disable_ints(tp);
12187         tg3_full_unlock(tp);
12188
12189         netif_device_detach(dev);
12190
12191         tg3_full_lock(tp, 0);
12192         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12193         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12194         tg3_full_unlock(tp);
12195
12196         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12197         if (err) {
12198                 tg3_full_lock(tp, 0);
12199
12200                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12201                 if (tg3_restart_hw(tp, 1))
12202                         goto out;
12203
12204                 tp->timer.expires = jiffies + tp->timer_offset;
12205                 add_timer(&tp->timer);
12206
12207                 netif_device_attach(dev);
12208                 tg3_netif_start(tp);
12209
12210 out:
12211                 tg3_full_unlock(tp);
12212         }
12213
12214         return err;
12215 }
12216
12217 static int tg3_resume(struct pci_dev *pdev)
12218 {
12219         struct net_device *dev = pci_get_drvdata(pdev);
12220         struct tg3 *tp = netdev_priv(dev);
12221         int err;
12222
12223         pci_restore_state(tp->pdev);
12224
12225         if (!netif_running(dev))
12226                 return 0;
12227
12228         err = tg3_set_power_state(tp, PCI_D0);
12229         if (err)
12230                 return err;
12231
12232         /* Hardware bug - MSI won't work if INTX disabled. */
12233         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
12234             (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
12235                 pci_intx(tp->pdev, 1);
12236
12237         netif_device_attach(dev);
12238
12239         tg3_full_lock(tp, 0);
12240
12241         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12242         err = tg3_restart_hw(tp, 1);
12243         if (err)
12244                 goto out;
12245
12246         tp->timer.expires = jiffies + tp->timer_offset;
12247         add_timer(&tp->timer);
12248
12249         tg3_netif_start(tp);
12250
12251 out:
12252         tg3_full_unlock(tp);
12253
12254         return err;
12255 }
12256
12257 static struct pci_driver tg3_driver = {
12258         .name           = DRV_MODULE_NAME,
12259         .id_table       = tg3_pci_tbl,
12260         .probe          = tg3_init_one,
12261         .remove         = __devexit_p(tg3_remove_one),
12262         .suspend        = tg3_suspend,
12263         .resume         = tg3_resume
12264 };
12265
12266 static int __init tg3_init(void)
12267 {
12268         return pci_register_driver(&tg3_driver);
12269 }
12270
12271 static void __exit tg3_cleanup(void)
12272 {
12273         pci_unregister_driver(&tg3_driver);
12274 }
12275
12276 module_init(tg3_init);
12277 module_exit(tg3_cleanup);