]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/tg3.c
[TG3]: add 5780 basic jumbo frame support
[linux-2.6-omap-h63xx.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39
40 #include <net/checksum.h>
41
42 #include <asm/system.h>
43 #include <asm/io.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
46
47 #ifdef CONFIG_SPARC64
48 #include <asm/idprom.h>
49 #include <asm/oplib.h>
50 #include <asm/pbm.h>
51 #endif
52
53 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54 #define TG3_VLAN_TAG_USED 1
55 #else
56 #define TG3_VLAN_TAG_USED 0
57 #endif
58
59 #ifdef NETIF_F_TSO
60 #define TG3_TSO_SUPPORT 1
61 #else
62 #define TG3_TSO_SUPPORT 0
63 #endif
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.33"
70 #define DRV_MODULE_RELDATE      "July 5, 2005"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define TX_RING_GAP(TP) \
125         (TG3_TX_RING_SIZE - (TP)->tx_pending)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
128           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
129           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { 0, }
245 };
246
247 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
248
249 static struct {
250         const char string[ETH_GSTRING_LEN];
251 } ethtool_stats_keys[TG3_NUM_STATS] = {
252         { "rx_octets" },
253         { "rx_fragments" },
254         { "rx_ucast_packets" },
255         { "rx_mcast_packets" },
256         { "rx_bcast_packets" },
257         { "rx_fcs_errors" },
258         { "rx_align_errors" },
259         { "rx_xon_pause_rcvd" },
260         { "rx_xoff_pause_rcvd" },
261         { "rx_mac_ctrl_rcvd" },
262         { "rx_xoff_entered" },
263         { "rx_frame_too_long_errors" },
264         { "rx_jabbers" },
265         { "rx_undersize_packets" },
266         { "rx_in_length_errors" },
267         { "rx_out_length_errors" },
268         { "rx_64_or_less_octet_packets" },
269         { "rx_65_to_127_octet_packets" },
270         { "rx_128_to_255_octet_packets" },
271         { "rx_256_to_511_octet_packets" },
272         { "rx_512_to_1023_octet_packets" },
273         { "rx_1024_to_1522_octet_packets" },
274         { "rx_1523_to_2047_octet_packets" },
275         { "rx_2048_to_4095_octet_packets" },
276         { "rx_4096_to_8191_octet_packets" },
277         { "rx_8192_to_9022_octet_packets" },
278
279         { "tx_octets" },
280         { "tx_collisions" },
281
282         { "tx_xon_sent" },
283         { "tx_xoff_sent" },
284         { "tx_flow_control" },
285         { "tx_mac_errors" },
286         { "tx_single_collisions" },
287         { "tx_mult_collisions" },
288         { "tx_deferred" },
289         { "tx_excessive_collisions" },
290         { "tx_late_collisions" },
291         { "tx_collide_2times" },
292         { "tx_collide_3times" },
293         { "tx_collide_4times" },
294         { "tx_collide_5times" },
295         { "tx_collide_6times" },
296         { "tx_collide_7times" },
297         { "tx_collide_8times" },
298         { "tx_collide_9times" },
299         { "tx_collide_10times" },
300         { "tx_collide_11times" },
301         { "tx_collide_12times" },
302         { "tx_collide_13times" },
303         { "tx_collide_14times" },
304         { "tx_collide_15times" },
305         { "tx_ucast_packets" },
306         { "tx_mcast_packets" },
307         { "tx_bcast_packets" },
308         { "tx_carrier_sense_errors" },
309         { "tx_discards" },
310         { "tx_errors" },
311
312         { "dma_writeq_full" },
313         { "dma_write_prioq_full" },
314         { "rxbds_empty" },
315         { "rx_discards" },
316         { "rx_errors" },
317         { "rx_threshold_hit" },
318
319         { "dma_readq_full" },
320         { "dma_read_prioq_full" },
321         { "tx_comp_queue_full" },
322
323         { "ring_set_send_prod_index" },
324         { "ring_status_update" },
325         { "nic_irqs" },
326         { "nic_avoided_irqs" },
327         { "nic_tx_threshold_hit" }
328 };
329
330 static struct {
331         const char string[ETH_GSTRING_LEN];
332 } ethtool_test_keys[TG3_NUM_TEST] = {
333         { "nvram test     (online) " },
334         { "link test      (online) " },
335         { "register test  (offline)" },
336         { "memory test    (offline)" },
337         { "loopback test  (offline)" },
338         { "interrupt test (offline)" },
339 };
340
341 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
342 {
343         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
344                 spin_lock_bh(&tp->indirect_lock);
345                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
346                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
347                 spin_unlock_bh(&tp->indirect_lock);
348         } else {
349                 writel(val, tp->regs + off);
350                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
351                         readl(tp->regs + off);
352         }
353 }
354
355 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
356 {
357         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
358                 spin_lock_bh(&tp->indirect_lock);
359                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
360                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361                 spin_unlock_bh(&tp->indirect_lock);
362         } else {
363                 void __iomem *dest = tp->regs + off;
364                 writel(val, dest);
365                 readl(dest);    /* always flush PCI write */
366         }
367 }
368
369 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
370 {
371         void __iomem *mbox = tp->regs + off;
372         writel(val, mbox);
373         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
374                 readl(mbox);
375 }
376
377 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
378 {
379         void __iomem *mbox = tp->regs + off;
380         writel(val, mbox);
381         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
382                 writel(val, mbox);
383         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
384                 readl(mbox);
385 }
386
387 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
388 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
389 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
390
391 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
392 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
393 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
394 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
395 #define tr32(reg)               readl(tp->regs + (reg))
396 #define tr16(reg)               readw(tp->regs + (reg))
397 #define tr8(reg)                readb(tp->regs + (reg))
398
399 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
400 {
401         spin_lock_bh(&tp->indirect_lock);
402         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
403         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
404
405         /* Always leave this as zero. */
406         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
407         spin_unlock_bh(&tp->indirect_lock);
408 }
409
410 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
411 {
412         spin_lock_bh(&tp->indirect_lock);
413         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
414         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
415
416         /* Always leave this as zero. */
417         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
418         spin_unlock_bh(&tp->indirect_lock);
419 }
420
421 static void tg3_disable_ints(struct tg3 *tp)
422 {
423         tw32(TG3PCI_MISC_HOST_CTRL,
424              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
425         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
426         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
427 }
428
429 static inline void tg3_cond_int(struct tg3 *tp)
430 {
431         if (tp->hw_status->status & SD_STATUS_UPDATED)
432                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
433 }
434
435 static void tg3_enable_ints(struct tg3 *tp)
436 {
437         tp->irq_sync = 0;
438         wmb();
439
440         tw32(TG3PCI_MISC_HOST_CTRL,
441              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
442         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
443                      (tp->last_tag << 24));
444         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
445         tg3_cond_int(tp);
446 }
447
448 static inline unsigned int tg3_has_work(struct tg3 *tp)
449 {
450         struct tg3_hw_status *sblk = tp->hw_status;
451         unsigned int work_exists = 0;
452
453         /* check for phy events */
454         if (!(tp->tg3_flags &
455               (TG3_FLAG_USE_LINKCHG_REG |
456                TG3_FLAG_POLL_SERDES))) {
457                 if (sblk->status & SD_STATUS_LINK_CHG)
458                         work_exists = 1;
459         }
460         /* check for RX/TX work to do */
461         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
462             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
463                 work_exists = 1;
464
465         return work_exists;
466 }
467
468 /* tg3_restart_ints
469  *  similar to tg3_enable_ints, but it accurately determines whether there
470  *  is new work pending and can return without flushing the PIO write
471  *  which reenables interrupts 
472  */
473 static void tg3_restart_ints(struct tg3 *tp)
474 {
475         tw32(TG3PCI_MISC_HOST_CTRL,
476                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
477         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
478                      tp->last_tag << 24);
479         mmiowb();
480
481         /* When doing tagged status, this work check is unnecessary.
482          * The last_tag we write above tells the chip which piece of
483          * work we've completed.
484          */
485         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
486             tg3_has_work(tp))
487                 tw32(HOSTCC_MODE, tp->coalesce_mode |
488                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
489 }
490
491 static inline void tg3_netif_stop(struct tg3 *tp)
492 {
493         tp->dev->trans_start = jiffies; /* prevent tx timeout */
494         netif_poll_disable(tp->dev);
495         netif_tx_disable(tp->dev);
496 }
497
498 static inline void tg3_netif_start(struct tg3 *tp)
499 {
500         netif_wake_queue(tp->dev);
501         /* NOTE: unconditional netif_wake_queue is only appropriate
502          * so long as all callers are assured to have free tx slots
503          * (such as after tg3_init_hw)
504          */
505         netif_poll_enable(tp->dev);
506         tp->hw_status->status |= SD_STATUS_UPDATED;
507         tg3_enable_ints(tp);
508 }
509
510 static void tg3_switch_clocks(struct tg3 *tp)
511 {
512         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
513         u32 orig_clock_ctrl;
514
515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
516                 return;
517
518         orig_clock_ctrl = clock_ctrl;
519         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
520                        CLOCK_CTRL_CLKRUN_OENABLE |
521                        0x1f);
522         tp->pci_clock_ctrl = clock_ctrl;
523
524         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
525                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
526                         tw32_f(TG3PCI_CLOCK_CTRL,
527                                clock_ctrl | CLOCK_CTRL_625_CORE);
528                         udelay(40);
529                 }
530         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
531                 tw32_f(TG3PCI_CLOCK_CTRL,
532                      clock_ctrl |
533                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
534                 udelay(40);
535                 tw32_f(TG3PCI_CLOCK_CTRL,
536                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
537                 udelay(40);
538         }
539         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
540         udelay(40);
541 }
542
543 #define PHY_BUSY_LOOPS  5000
544
545 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
546 {
547         u32 frame_val;
548         unsigned int loops;
549         int ret;
550
551         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
552                 tw32_f(MAC_MI_MODE,
553                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
554                 udelay(80);
555         }
556
557         *val = 0x0;
558
559         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
560                       MI_COM_PHY_ADDR_MASK);
561         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
562                       MI_COM_REG_ADDR_MASK);
563         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
564         
565         tw32_f(MAC_MI_COM, frame_val);
566
567         loops = PHY_BUSY_LOOPS;
568         while (loops != 0) {
569                 udelay(10);
570                 frame_val = tr32(MAC_MI_COM);
571
572                 if ((frame_val & MI_COM_BUSY) == 0) {
573                         udelay(5);
574                         frame_val = tr32(MAC_MI_COM);
575                         break;
576                 }
577                 loops -= 1;
578         }
579
580         ret = -EBUSY;
581         if (loops != 0) {
582                 *val = frame_val & MI_COM_DATA_MASK;
583                 ret = 0;
584         }
585
586         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
587                 tw32_f(MAC_MI_MODE, tp->mi_mode);
588                 udelay(80);
589         }
590
591         return ret;
592 }
593
594 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
595 {
596         u32 frame_val;
597         unsigned int loops;
598         int ret;
599
600         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
601                 tw32_f(MAC_MI_MODE,
602                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
603                 udelay(80);
604         }
605
606         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
607                       MI_COM_PHY_ADDR_MASK);
608         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
609                       MI_COM_REG_ADDR_MASK);
610         frame_val |= (val & MI_COM_DATA_MASK);
611         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
612         
613         tw32_f(MAC_MI_COM, frame_val);
614
615         loops = PHY_BUSY_LOOPS;
616         while (loops != 0) {
617                 udelay(10);
618                 frame_val = tr32(MAC_MI_COM);
619                 if ((frame_val & MI_COM_BUSY) == 0) {
620                         udelay(5);
621                         frame_val = tr32(MAC_MI_COM);
622                         break;
623                 }
624                 loops -= 1;
625         }
626
627         ret = -EBUSY;
628         if (loops != 0)
629                 ret = 0;
630
631         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
632                 tw32_f(MAC_MI_MODE, tp->mi_mode);
633                 udelay(80);
634         }
635
636         return ret;
637 }
638
639 static void tg3_phy_set_wirespeed(struct tg3 *tp)
640 {
641         u32 val;
642
643         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
644                 return;
645
646         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
647             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
648                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
649                              (val | (1 << 15) | (1 << 4)));
650 }
651
652 static int tg3_bmcr_reset(struct tg3 *tp)
653 {
654         u32 phy_control;
655         int limit, err;
656
657         /* OK, reset it, and poll the BMCR_RESET bit until it
658          * clears or we time out.
659          */
660         phy_control = BMCR_RESET;
661         err = tg3_writephy(tp, MII_BMCR, phy_control);
662         if (err != 0)
663                 return -EBUSY;
664
665         limit = 5000;
666         while (limit--) {
667                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
668                 if (err != 0)
669                         return -EBUSY;
670
671                 if ((phy_control & BMCR_RESET) == 0) {
672                         udelay(40);
673                         break;
674                 }
675                 udelay(10);
676         }
677         if (limit <= 0)
678                 return -EBUSY;
679
680         return 0;
681 }
682
683 static int tg3_wait_macro_done(struct tg3 *tp)
684 {
685         int limit = 100;
686
687         while (limit--) {
688                 u32 tmp32;
689
690                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
691                         if ((tmp32 & 0x1000) == 0)
692                                 break;
693                 }
694         }
695         if (limit <= 0)
696                 return -EBUSY;
697
698         return 0;
699 }
700
701 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
702 {
703         static const u32 test_pat[4][6] = {
704         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
705         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
706         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
707         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
708         };
709         int chan;
710
711         for (chan = 0; chan < 4; chan++) {
712                 int i;
713
714                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
715                              (chan * 0x2000) | 0x0200);
716                 tg3_writephy(tp, 0x16, 0x0002);
717
718                 for (i = 0; i < 6; i++)
719                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
720                                      test_pat[chan][i]);
721
722                 tg3_writephy(tp, 0x16, 0x0202);
723                 if (tg3_wait_macro_done(tp)) {
724                         *resetp = 1;
725                         return -EBUSY;
726                 }
727
728                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
729                              (chan * 0x2000) | 0x0200);
730                 tg3_writephy(tp, 0x16, 0x0082);
731                 if (tg3_wait_macro_done(tp)) {
732                         *resetp = 1;
733                         return -EBUSY;
734                 }
735
736                 tg3_writephy(tp, 0x16, 0x0802);
737                 if (tg3_wait_macro_done(tp)) {
738                         *resetp = 1;
739                         return -EBUSY;
740                 }
741
742                 for (i = 0; i < 6; i += 2) {
743                         u32 low, high;
744
745                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
746                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
747                             tg3_wait_macro_done(tp)) {
748                                 *resetp = 1;
749                                 return -EBUSY;
750                         }
751                         low &= 0x7fff;
752                         high &= 0x000f;
753                         if (low != test_pat[chan][i] ||
754                             high != test_pat[chan][i+1]) {
755                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
756                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
757                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
758
759                                 return -EBUSY;
760                         }
761                 }
762         }
763
764         return 0;
765 }
766
767 static int tg3_phy_reset_chanpat(struct tg3 *tp)
768 {
769         int chan;
770
771         for (chan = 0; chan < 4; chan++) {
772                 int i;
773
774                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
775                              (chan * 0x2000) | 0x0200);
776                 tg3_writephy(tp, 0x16, 0x0002);
777                 for (i = 0; i < 6; i++)
778                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
779                 tg3_writephy(tp, 0x16, 0x0202);
780                 if (tg3_wait_macro_done(tp))
781                         return -EBUSY;
782         }
783
784         return 0;
785 }
786
787 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
788 {
789         u32 reg32, phy9_orig;
790         int retries, do_phy_reset, err;
791
792         retries = 10;
793         do_phy_reset = 1;
794         do {
795                 if (do_phy_reset) {
796                         err = tg3_bmcr_reset(tp);
797                         if (err)
798                                 return err;
799                         do_phy_reset = 0;
800                 }
801
802                 /* Disable transmitter and interrupt.  */
803                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
804                         continue;
805
806                 reg32 |= 0x3000;
807                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
808
809                 /* Set full-duplex, 1000 mbps.  */
810                 tg3_writephy(tp, MII_BMCR,
811                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
812
813                 /* Set to master mode.  */
814                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
815                         continue;
816
817                 tg3_writephy(tp, MII_TG3_CTRL,
818                              (MII_TG3_CTRL_AS_MASTER |
819                               MII_TG3_CTRL_ENABLE_AS_MASTER));
820
821                 /* Enable SM_DSP_CLOCK and 6dB.  */
822                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
823
824                 /* Block the PHY control access.  */
825                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
826                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
827
828                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
829                 if (!err)
830                         break;
831         } while (--retries);
832
833         err = tg3_phy_reset_chanpat(tp);
834         if (err)
835                 return err;
836
837         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
838         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
839
840         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
841         tg3_writephy(tp, 0x16, 0x0000);
842
843         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
844             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
845                 /* Set Extended packet length bit for jumbo frames */
846                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
847         }
848         else {
849                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
850         }
851
852         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
853
854         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
855                 reg32 &= ~0x3000;
856                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
857         } else if (!err)
858                 err = -EBUSY;
859
860         return err;
861 }
862
863 /* This will reset the tigon3 PHY if there is no valid
864  * link unless the FORCE argument is non-zero.
865  */
866 static int tg3_phy_reset(struct tg3 *tp)
867 {
868         u32 phy_status;
869         int err;
870
871         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
872         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
873         if (err != 0)
874                 return -EBUSY;
875
876         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
877             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
878             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
879                 err = tg3_phy_reset_5703_4_5(tp);
880                 if (err)
881                         return err;
882                 goto out;
883         }
884
885         err = tg3_bmcr_reset(tp);
886         if (err)
887                 return err;
888
889 out:
890         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
891                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
892                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
893                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
894                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
895                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
896                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
897         }
898         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
899                 tg3_writephy(tp, 0x1c, 0x8d68);
900                 tg3_writephy(tp, 0x1c, 0x8d68);
901         }
902         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
903                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
904                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
905                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
906                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
907                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
908                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
909                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
910                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
911         }
912         /* Set Extended packet length bit (bit 14) on all chips that */
913         /* support jumbo frames */
914         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
915                 /* Cannot do read-modify-write on 5401 */
916                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
917         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
918                 u32 phy_reg;
919
920                 /* Set bit 14 with read-modify-write to preserve other bits */
921                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
922                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
923                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
924         }
925
926         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
927          * jumbo frames transmission.
928          */
929         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
930                 u32 phy_reg;
931
932                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
933                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
934                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
935         }
936
937         tg3_phy_set_wirespeed(tp);
938         return 0;
939 }
940
941 static void tg3_frob_aux_power(struct tg3 *tp)
942 {
943         struct tg3 *tp_peer = tp;
944
945         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
946                 return;
947
948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
949                 tp_peer = pci_get_drvdata(tp->pdev_peer);
950                 if (!tp_peer)
951                         BUG();
952         }
953
954
955         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
956             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
957                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
958                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
959                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
960                              (GRC_LCLCTRL_GPIO_OE0 |
961                               GRC_LCLCTRL_GPIO_OE1 |
962                               GRC_LCLCTRL_GPIO_OE2 |
963                               GRC_LCLCTRL_GPIO_OUTPUT0 |
964                               GRC_LCLCTRL_GPIO_OUTPUT1));
965                         udelay(100);
966                 } else {
967                         u32 no_gpio2;
968                         u32 grc_local_ctrl;
969
970                         if (tp_peer != tp &&
971                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
972                                 return;
973
974                         /* On 5753 and variants, GPIO2 cannot be used. */
975                         no_gpio2 = tp->nic_sram_data_cfg &
976                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
977
978                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
979                                          GRC_LCLCTRL_GPIO_OE1 |
980                                          GRC_LCLCTRL_GPIO_OE2 |
981                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
982                                          GRC_LCLCTRL_GPIO_OUTPUT2;
983                         if (no_gpio2) {
984                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
985                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
986                         }
987                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
988                                                 grc_local_ctrl);
989                         udelay(100);
990
991                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
992
993                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
994                                                 grc_local_ctrl);
995                         udelay(100);
996
997                         if (!no_gpio2) {
998                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
999                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1000                                        grc_local_ctrl);
1001                                 udelay(100);
1002                         }
1003                 }
1004         } else {
1005                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1006                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1007                         if (tp_peer != tp &&
1008                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1009                                 return;
1010
1011                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1012                              (GRC_LCLCTRL_GPIO_OE1 |
1013                               GRC_LCLCTRL_GPIO_OUTPUT1));
1014                         udelay(100);
1015
1016                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1017                              (GRC_LCLCTRL_GPIO_OE1));
1018                         udelay(100);
1019
1020                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1021                              (GRC_LCLCTRL_GPIO_OE1 |
1022                               GRC_LCLCTRL_GPIO_OUTPUT1));
1023                         udelay(100);
1024                 }
1025         }
1026 }
1027
1028 static int tg3_setup_phy(struct tg3 *, int);
1029
1030 #define RESET_KIND_SHUTDOWN     0
1031 #define RESET_KIND_INIT         1
1032 #define RESET_KIND_SUSPEND      2
1033
1034 static void tg3_write_sig_post_reset(struct tg3 *, int);
1035 static int tg3_halt_cpu(struct tg3 *, u32);
1036
1037 static int tg3_set_power_state(struct tg3 *tp, int state)
1038 {
1039         u32 misc_host_ctrl;
1040         u16 power_control, power_caps;
1041         int pm = tp->pm_cap;
1042
1043         /* Make sure register accesses (indirect or otherwise)
1044          * will function correctly.
1045          */
1046         pci_write_config_dword(tp->pdev,
1047                                TG3PCI_MISC_HOST_CTRL,
1048                                tp->misc_host_ctrl);
1049
1050         pci_read_config_word(tp->pdev,
1051                              pm + PCI_PM_CTRL,
1052                              &power_control);
1053         power_control |= PCI_PM_CTRL_PME_STATUS;
1054         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1055         switch (state) {
1056         case 0:
1057                 power_control |= 0;
1058                 pci_write_config_word(tp->pdev,
1059                                       pm + PCI_PM_CTRL,
1060                                       power_control);
1061                 udelay(100);    /* Delay after power state change */
1062
1063                 /* Switch out of Vaux if it is not a LOM */
1064                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1065                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1066                         udelay(100);
1067                 }
1068
1069                 return 0;
1070
1071         case 1:
1072                 power_control |= 1;
1073                 break;
1074
1075         case 2:
1076                 power_control |= 2;
1077                 break;
1078
1079         case 3:
1080                 power_control |= 3;
1081                 break;
1082
1083         default:
1084                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1085                        "requested.\n",
1086                        tp->dev->name, state);
1087                 return -EINVAL;
1088         };
1089
1090         power_control |= PCI_PM_CTRL_PME_ENABLE;
1091
1092         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1093         tw32(TG3PCI_MISC_HOST_CTRL,
1094              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1095
1096         if (tp->link_config.phy_is_low_power == 0) {
1097                 tp->link_config.phy_is_low_power = 1;
1098                 tp->link_config.orig_speed = tp->link_config.speed;
1099                 tp->link_config.orig_duplex = tp->link_config.duplex;
1100                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1101         }
1102
1103         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1104                 tp->link_config.speed = SPEED_10;
1105                 tp->link_config.duplex = DUPLEX_HALF;
1106                 tp->link_config.autoneg = AUTONEG_ENABLE;
1107                 tg3_setup_phy(tp, 0);
1108         }
1109
1110         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1111
1112         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1113                 u32 mac_mode;
1114
1115                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1116                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1117                         udelay(40);
1118
1119                         mac_mode = MAC_MODE_PORT_MODE_MII;
1120
1121                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1122                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1123                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1124                 } else {
1125                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1126                 }
1127
1128                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1129                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1130
1131                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1132                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1133                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1134
1135                 tw32_f(MAC_MODE, mac_mode);
1136                 udelay(100);
1137
1138                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1139                 udelay(10);
1140         }
1141
1142         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1143             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1144              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1145                 u32 base_val;
1146
1147                 base_val = tp->pci_clock_ctrl;
1148                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1149                              CLOCK_CTRL_TXCLK_DISABLE);
1150
1151                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1152                      CLOCK_CTRL_ALTCLK |
1153                      CLOCK_CTRL_PWRDOWN_PLL133);
1154                 udelay(40);
1155         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1156                 /* do nothing */
1157         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1158                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1159                 u32 newbits1, newbits2;
1160
1161                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1162                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1163                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1164                                     CLOCK_CTRL_TXCLK_DISABLE |
1165                                     CLOCK_CTRL_ALTCLK);
1166                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1167                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1168                         newbits1 = CLOCK_CTRL_625_CORE;
1169                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1170                 } else {
1171                         newbits1 = CLOCK_CTRL_ALTCLK;
1172                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1173                 }
1174
1175                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1176                 udelay(40);
1177
1178                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1179                 udelay(40);
1180
1181                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1182                         u32 newbits3;
1183
1184                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1185                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1186                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1187                                             CLOCK_CTRL_TXCLK_DISABLE |
1188                                             CLOCK_CTRL_44MHZ_CORE);
1189                         } else {
1190                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1191                         }
1192
1193                         tw32_f(TG3PCI_CLOCK_CTRL,
1194                                          tp->pci_clock_ctrl | newbits3);
1195                         udelay(40);
1196                 }
1197         }
1198
1199         tg3_frob_aux_power(tp);
1200
1201         /* Workaround for unstable PLL clock */
1202         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1203             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1204                 u32 val = tr32(0x7d00);
1205
1206                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1207                 tw32(0x7d00, val);
1208                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1209                         tg3_halt_cpu(tp, RX_CPU_BASE);
1210         }
1211
1212         /* Finally, set the new power state. */
1213         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1214         udelay(100);    /* Delay after power state change */
1215
1216         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1217
1218         return 0;
1219 }
1220
1221 static void tg3_link_report(struct tg3 *tp)
1222 {
1223         if (!netif_carrier_ok(tp->dev)) {
1224                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1225         } else {
1226                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1227                        tp->dev->name,
1228                        (tp->link_config.active_speed == SPEED_1000 ?
1229                         1000 :
1230                         (tp->link_config.active_speed == SPEED_100 ?
1231                          100 : 10)),
1232                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1233                         "full" : "half"));
1234
1235                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1236                        "%s for RX.\n",
1237                        tp->dev->name,
1238                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1239                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1240         }
1241 }
1242
1243 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1244 {
1245         u32 new_tg3_flags = 0;
1246         u32 old_rx_mode = tp->rx_mode;
1247         u32 old_tx_mode = tp->tx_mode;
1248
1249         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1250                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1251                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1252                                 if (remote_adv & LPA_PAUSE_CAP)
1253                                         new_tg3_flags |=
1254                                                 (TG3_FLAG_RX_PAUSE |
1255                                                 TG3_FLAG_TX_PAUSE);
1256                                 else if (remote_adv & LPA_PAUSE_ASYM)
1257                                         new_tg3_flags |=
1258                                                 (TG3_FLAG_RX_PAUSE);
1259                         } else {
1260                                 if (remote_adv & LPA_PAUSE_CAP)
1261                                         new_tg3_flags |=
1262                                                 (TG3_FLAG_RX_PAUSE |
1263                                                 TG3_FLAG_TX_PAUSE);
1264                         }
1265                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1266                         if ((remote_adv & LPA_PAUSE_CAP) &&
1267                         (remote_adv & LPA_PAUSE_ASYM))
1268                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1269                 }
1270
1271                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1272                 tp->tg3_flags |= new_tg3_flags;
1273         } else {
1274                 new_tg3_flags = tp->tg3_flags;
1275         }
1276
1277         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1278                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1279         else
1280                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1281
1282         if (old_rx_mode != tp->rx_mode) {
1283                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1284         }
1285         
1286         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1287                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1288         else
1289                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1290
1291         if (old_tx_mode != tp->tx_mode) {
1292                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1293         }
1294 }
1295
1296 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1297 {
1298         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1299         case MII_TG3_AUX_STAT_10HALF:
1300                 *speed = SPEED_10;
1301                 *duplex = DUPLEX_HALF;
1302                 break;
1303
1304         case MII_TG3_AUX_STAT_10FULL:
1305                 *speed = SPEED_10;
1306                 *duplex = DUPLEX_FULL;
1307                 break;
1308
1309         case MII_TG3_AUX_STAT_100HALF:
1310                 *speed = SPEED_100;
1311                 *duplex = DUPLEX_HALF;
1312                 break;
1313
1314         case MII_TG3_AUX_STAT_100FULL:
1315                 *speed = SPEED_100;
1316                 *duplex = DUPLEX_FULL;
1317                 break;
1318
1319         case MII_TG3_AUX_STAT_1000HALF:
1320                 *speed = SPEED_1000;
1321                 *duplex = DUPLEX_HALF;
1322                 break;
1323
1324         case MII_TG3_AUX_STAT_1000FULL:
1325                 *speed = SPEED_1000;
1326                 *duplex = DUPLEX_FULL;
1327                 break;
1328
1329         default:
1330                 *speed = SPEED_INVALID;
1331                 *duplex = DUPLEX_INVALID;
1332                 break;
1333         };
1334 }
1335
1336 static void tg3_phy_copper_begin(struct tg3 *tp)
1337 {
1338         u32 new_adv;
1339         int i;
1340
1341         if (tp->link_config.phy_is_low_power) {
1342                 /* Entering low power mode.  Disable gigabit and
1343                  * 100baseT advertisements.
1344                  */
1345                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1346
1347                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1348                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1349                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1350                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1351
1352                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1353         } else if (tp->link_config.speed == SPEED_INVALID) {
1354                 tp->link_config.advertising =
1355                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1356                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1357                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1358                          ADVERTISED_Autoneg | ADVERTISED_MII);
1359
1360                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1361                         tp->link_config.advertising &=
1362                                 ~(ADVERTISED_1000baseT_Half |
1363                                   ADVERTISED_1000baseT_Full);
1364
1365                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1366                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1367                         new_adv |= ADVERTISE_10HALF;
1368                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1369                         new_adv |= ADVERTISE_10FULL;
1370                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1371                         new_adv |= ADVERTISE_100HALF;
1372                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1373                         new_adv |= ADVERTISE_100FULL;
1374                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1375
1376                 if (tp->link_config.advertising &
1377                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1378                         new_adv = 0;
1379                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1380                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1381                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1382                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1383                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1384                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1385                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1386                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1387                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1388                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1389                 } else {
1390                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1391                 }
1392         } else {
1393                 /* Asking for a specific link mode. */
1394                 if (tp->link_config.speed == SPEED_1000) {
1395                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1396                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1397
1398                         if (tp->link_config.duplex == DUPLEX_FULL)
1399                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1400                         else
1401                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1402                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1403                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1404                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1405                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1406                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1407                 } else {
1408                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1409
1410                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1411                         if (tp->link_config.speed == SPEED_100) {
1412                                 if (tp->link_config.duplex == DUPLEX_FULL)
1413                                         new_adv |= ADVERTISE_100FULL;
1414                                 else
1415                                         new_adv |= ADVERTISE_100HALF;
1416                         } else {
1417                                 if (tp->link_config.duplex == DUPLEX_FULL)
1418                                         new_adv |= ADVERTISE_10FULL;
1419                                 else
1420                                         new_adv |= ADVERTISE_10HALF;
1421                         }
1422                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1423                 }
1424         }
1425
1426         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1427             tp->link_config.speed != SPEED_INVALID) {
1428                 u32 bmcr, orig_bmcr;
1429
1430                 tp->link_config.active_speed = tp->link_config.speed;
1431                 tp->link_config.active_duplex = tp->link_config.duplex;
1432
1433                 bmcr = 0;
1434                 switch (tp->link_config.speed) {
1435                 default:
1436                 case SPEED_10:
1437                         break;
1438
1439                 case SPEED_100:
1440                         bmcr |= BMCR_SPEED100;
1441                         break;
1442
1443                 case SPEED_1000:
1444                         bmcr |= TG3_BMCR_SPEED1000;
1445                         break;
1446                 };
1447
1448                 if (tp->link_config.duplex == DUPLEX_FULL)
1449                         bmcr |= BMCR_FULLDPLX;
1450
1451                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1452                     (bmcr != orig_bmcr)) {
1453                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1454                         for (i = 0; i < 1500; i++) {
1455                                 u32 tmp;
1456
1457                                 udelay(10);
1458                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1459                                     tg3_readphy(tp, MII_BMSR, &tmp))
1460                                         continue;
1461                                 if (!(tmp & BMSR_LSTATUS)) {
1462                                         udelay(40);
1463                                         break;
1464                                 }
1465                         }
1466                         tg3_writephy(tp, MII_BMCR, bmcr);
1467                         udelay(40);
1468                 }
1469         } else {
1470                 tg3_writephy(tp, MII_BMCR,
1471                              BMCR_ANENABLE | BMCR_ANRESTART);
1472         }
1473 }
1474
1475 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1476 {
1477         int err;
1478
1479         /* Turn off tap power management. */
1480         /* Set Extended packet length bit */
1481         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1482
1483         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1484         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1485
1486         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1487         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1488
1489         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1490         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1491
1492         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1493         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1494
1495         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1496         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1497
1498         udelay(40);
1499
1500         return err;
1501 }
1502
1503 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1504 {
1505         u32 adv_reg, all_mask;
1506
1507         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1508                 return 0;
1509
1510         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1511                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1512         if ((adv_reg & all_mask) != all_mask)
1513                 return 0;
1514         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1515                 u32 tg3_ctrl;
1516
1517                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1518                         return 0;
1519
1520                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1521                             MII_TG3_CTRL_ADV_1000_FULL);
1522                 if ((tg3_ctrl & all_mask) != all_mask)
1523                         return 0;
1524         }
1525         return 1;
1526 }
1527
1528 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1529 {
1530         int current_link_up;
1531         u32 bmsr, dummy;
1532         u16 current_speed;
1533         u8 current_duplex;
1534         int i, err;
1535
1536         tw32(MAC_EVENT, 0);
1537
1538         tw32_f(MAC_STATUS,
1539              (MAC_STATUS_SYNC_CHANGED |
1540               MAC_STATUS_CFG_CHANGED |
1541               MAC_STATUS_MI_COMPLETION |
1542               MAC_STATUS_LNKSTATE_CHANGED));
1543         udelay(40);
1544
1545         tp->mi_mode = MAC_MI_MODE_BASE;
1546         tw32_f(MAC_MI_MODE, tp->mi_mode);
1547         udelay(80);
1548
1549         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1550
1551         /* Some third-party PHYs need to be reset on link going
1552          * down.
1553          */
1554         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1555              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1556              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1557             netif_carrier_ok(tp->dev)) {
1558                 tg3_readphy(tp, MII_BMSR, &bmsr);
1559                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1560                     !(bmsr & BMSR_LSTATUS))
1561                         force_reset = 1;
1562         }
1563         if (force_reset)
1564                 tg3_phy_reset(tp);
1565
1566         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1567                 tg3_readphy(tp, MII_BMSR, &bmsr);
1568                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1569                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1570                         bmsr = 0;
1571
1572                 if (!(bmsr & BMSR_LSTATUS)) {
1573                         err = tg3_init_5401phy_dsp(tp);
1574                         if (err)
1575                                 return err;
1576
1577                         tg3_readphy(tp, MII_BMSR, &bmsr);
1578                         for (i = 0; i < 1000; i++) {
1579                                 udelay(10);
1580                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1581                                     (bmsr & BMSR_LSTATUS)) {
1582                                         udelay(40);
1583                                         break;
1584                                 }
1585                         }
1586
1587                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1588                             !(bmsr & BMSR_LSTATUS) &&
1589                             tp->link_config.active_speed == SPEED_1000) {
1590                                 err = tg3_phy_reset(tp);
1591                                 if (!err)
1592                                         err = tg3_init_5401phy_dsp(tp);
1593                                 if (err)
1594                                         return err;
1595                         }
1596                 }
1597         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1598                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1599                 /* 5701 {A0,B0} CRC bug workaround */
1600                 tg3_writephy(tp, 0x15, 0x0a75);
1601                 tg3_writephy(tp, 0x1c, 0x8c68);
1602                 tg3_writephy(tp, 0x1c, 0x8d68);
1603                 tg3_writephy(tp, 0x1c, 0x8c68);
1604         }
1605
1606         /* Clear pending interrupts... */
1607         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1608         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1609
1610         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1611                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1612         else
1613                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1614
1615         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1616             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1617                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1618                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1619                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1620                 else
1621                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1622         }
1623
1624         current_link_up = 0;
1625         current_speed = SPEED_INVALID;
1626         current_duplex = DUPLEX_INVALID;
1627
1628         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1629                 u32 val;
1630
1631                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1632                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1633                 if (!(val & (1 << 10))) {
1634                         val |= (1 << 10);
1635                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1636                         goto relink;
1637                 }
1638         }
1639
1640         bmsr = 0;
1641         for (i = 0; i < 100; i++) {
1642                 tg3_readphy(tp, MII_BMSR, &bmsr);
1643                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1644                     (bmsr & BMSR_LSTATUS))
1645                         break;
1646                 udelay(40);
1647         }
1648
1649         if (bmsr & BMSR_LSTATUS) {
1650                 u32 aux_stat, bmcr;
1651
1652                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1653                 for (i = 0; i < 2000; i++) {
1654                         udelay(10);
1655                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1656                             aux_stat)
1657                                 break;
1658                 }
1659
1660                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1661                                              &current_speed,
1662                                              &current_duplex);
1663
1664                 bmcr = 0;
1665                 for (i = 0; i < 200; i++) {
1666                         tg3_readphy(tp, MII_BMCR, &bmcr);
1667                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1668                                 continue;
1669                         if (bmcr && bmcr != 0x7fff)
1670                                 break;
1671                         udelay(10);
1672                 }
1673
1674                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1675                         if (bmcr & BMCR_ANENABLE) {
1676                                 current_link_up = 1;
1677
1678                                 /* Force autoneg restart if we are exiting
1679                                  * low power mode.
1680                                  */
1681                                 if (!tg3_copper_is_advertising_all(tp))
1682                                         current_link_up = 0;
1683                         } else {
1684                                 current_link_up = 0;
1685                         }
1686                 } else {
1687                         if (!(bmcr & BMCR_ANENABLE) &&
1688                             tp->link_config.speed == current_speed &&
1689                             tp->link_config.duplex == current_duplex) {
1690                                 current_link_up = 1;
1691                         } else {
1692                                 current_link_up = 0;
1693                         }
1694                 }
1695
1696                 tp->link_config.active_speed = current_speed;
1697                 tp->link_config.active_duplex = current_duplex;
1698         }
1699
1700         if (current_link_up == 1 &&
1701             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1702             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1703                 u32 local_adv, remote_adv;
1704
1705                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1706                         local_adv = 0;
1707                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1708
1709                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1710                         remote_adv = 0;
1711
1712                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1713
1714                 /* If we are not advertising full pause capability,
1715                  * something is wrong.  Bring the link down and reconfigure.
1716                  */
1717                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1718                         current_link_up = 0;
1719                 } else {
1720                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1721                 }
1722         }
1723 relink:
1724         if (current_link_up == 0) {
1725                 u32 tmp;
1726
1727                 tg3_phy_copper_begin(tp);
1728
1729                 tg3_readphy(tp, MII_BMSR, &tmp);
1730                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1731                     (tmp & BMSR_LSTATUS))
1732                         current_link_up = 1;
1733         }
1734
1735         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1736         if (current_link_up == 1) {
1737                 if (tp->link_config.active_speed == SPEED_100 ||
1738                     tp->link_config.active_speed == SPEED_10)
1739                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1740                 else
1741                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1742         } else
1743                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1744
1745         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1746         if (tp->link_config.active_duplex == DUPLEX_HALF)
1747                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1748
1749         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1750         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1751                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1752                     (current_link_up == 1 &&
1753                      tp->link_config.active_speed == SPEED_10))
1754                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1755         } else {
1756                 if (current_link_up == 1)
1757                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1758         }
1759
1760         /* ??? Without this setting Netgear GA302T PHY does not
1761          * ??? send/receive packets...
1762          */
1763         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1764             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1765                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1766                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1767                 udelay(80);
1768         }
1769
1770         tw32_f(MAC_MODE, tp->mac_mode);
1771         udelay(40);
1772
1773         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1774                 /* Polled via timer. */
1775                 tw32_f(MAC_EVENT, 0);
1776         } else {
1777                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1778         }
1779         udelay(40);
1780
1781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1782             current_link_up == 1 &&
1783             tp->link_config.active_speed == SPEED_1000 &&
1784             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1785              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1786                 udelay(120);
1787                 tw32_f(MAC_STATUS,
1788                      (MAC_STATUS_SYNC_CHANGED |
1789                       MAC_STATUS_CFG_CHANGED));
1790                 udelay(40);
1791                 tg3_write_mem(tp,
1792                               NIC_SRAM_FIRMWARE_MBOX,
1793                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1794         }
1795
1796         if (current_link_up != netif_carrier_ok(tp->dev)) {
1797                 if (current_link_up)
1798                         netif_carrier_on(tp->dev);
1799                 else
1800                         netif_carrier_off(tp->dev);
1801                 tg3_link_report(tp);
1802         }
1803
1804         return 0;
1805 }
1806
1807 struct tg3_fiber_aneginfo {
1808         int state;
1809 #define ANEG_STATE_UNKNOWN              0
1810 #define ANEG_STATE_AN_ENABLE            1
1811 #define ANEG_STATE_RESTART_INIT         2
1812 #define ANEG_STATE_RESTART              3
1813 #define ANEG_STATE_DISABLE_LINK_OK      4
1814 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1815 #define ANEG_STATE_ABILITY_DETECT       6
1816 #define ANEG_STATE_ACK_DETECT_INIT      7
1817 #define ANEG_STATE_ACK_DETECT           8
1818 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1819 #define ANEG_STATE_COMPLETE_ACK         10
1820 #define ANEG_STATE_IDLE_DETECT_INIT     11
1821 #define ANEG_STATE_IDLE_DETECT          12
1822 #define ANEG_STATE_LINK_OK              13
1823 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1824 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1825
1826         u32 flags;
1827 #define MR_AN_ENABLE            0x00000001
1828 #define MR_RESTART_AN           0x00000002
1829 #define MR_AN_COMPLETE          0x00000004
1830 #define MR_PAGE_RX              0x00000008
1831 #define MR_NP_LOADED            0x00000010
1832 #define MR_TOGGLE_TX            0x00000020
1833 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1834 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1835 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1836 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1837 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1838 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1839 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1840 #define MR_TOGGLE_RX            0x00002000
1841 #define MR_NP_RX                0x00004000
1842
1843 #define MR_LINK_OK              0x80000000
1844
1845         unsigned long link_time, cur_time;
1846
1847         u32 ability_match_cfg;
1848         int ability_match_count;
1849
1850         char ability_match, idle_match, ack_match;
1851
1852         u32 txconfig, rxconfig;
1853 #define ANEG_CFG_NP             0x00000080
1854 #define ANEG_CFG_ACK            0x00000040
1855 #define ANEG_CFG_RF2            0x00000020
1856 #define ANEG_CFG_RF1            0x00000010
1857 #define ANEG_CFG_PS2            0x00000001
1858 #define ANEG_CFG_PS1            0x00008000
1859 #define ANEG_CFG_HD             0x00004000
1860 #define ANEG_CFG_FD             0x00002000
1861 #define ANEG_CFG_INVAL          0x00001f06
1862
1863 };
1864 #define ANEG_OK         0
1865 #define ANEG_DONE       1
1866 #define ANEG_TIMER_ENAB 2
1867 #define ANEG_FAILED     -1
1868
1869 #define ANEG_STATE_SETTLE_TIME  10000
1870
1871 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1872                                    struct tg3_fiber_aneginfo *ap)
1873 {
1874         unsigned long delta;
1875         u32 rx_cfg_reg;
1876         int ret;
1877
1878         if (ap->state == ANEG_STATE_UNKNOWN) {
1879                 ap->rxconfig = 0;
1880                 ap->link_time = 0;
1881                 ap->cur_time = 0;
1882                 ap->ability_match_cfg = 0;
1883                 ap->ability_match_count = 0;
1884                 ap->ability_match = 0;
1885                 ap->idle_match = 0;
1886                 ap->ack_match = 0;
1887         }
1888         ap->cur_time++;
1889
1890         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1891                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1892
1893                 if (rx_cfg_reg != ap->ability_match_cfg) {
1894                         ap->ability_match_cfg = rx_cfg_reg;
1895                         ap->ability_match = 0;
1896                         ap->ability_match_count = 0;
1897                 } else {
1898                         if (++ap->ability_match_count > 1) {
1899                                 ap->ability_match = 1;
1900                                 ap->ability_match_cfg = rx_cfg_reg;
1901                         }
1902                 }
1903                 if (rx_cfg_reg & ANEG_CFG_ACK)
1904                         ap->ack_match = 1;
1905                 else
1906                         ap->ack_match = 0;
1907
1908                 ap->idle_match = 0;
1909         } else {
1910                 ap->idle_match = 1;
1911                 ap->ability_match_cfg = 0;
1912                 ap->ability_match_count = 0;
1913                 ap->ability_match = 0;
1914                 ap->ack_match = 0;
1915
1916                 rx_cfg_reg = 0;
1917         }
1918
1919         ap->rxconfig = rx_cfg_reg;
1920         ret = ANEG_OK;
1921
1922         switch(ap->state) {
1923         case ANEG_STATE_UNKNOWN:
1924                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1925                         ap->state = ANEG_STATE_AN_ENABLE;
1926
1927                 /* fallthru */
1928         case ANEG_STATE_AN_ENABLE:
1929                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1930                 if (ap->flags & MR_AN_ENABLE) {
1931                         ap->link_time = 0;
1932                         ap->cur_time = 0;
1933                         ap->ability_match_cfg = 0;
1934                         ap->ability_match_count = 0;
1935                         ap->ability_match = 0;
1936                         ap->idle_match = 0;
1937                         ap->ack_match = 0;
1938
1939                         ap->state = ANEG_STATE_RESTART_INIT;
1940                 } else {
1941                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1942                 }
1943                 break;
1944
1945         case ANEG_STATE_RESTART_INIT:
1946                 ap->link_time = ap->cur_time;
1947                 ap->flags &= ~(MR_NP_LOADED);
1948                 ap->txconfig = 0;
1949                 tw32(MAC_TX_AUTO_NEG, 0);
1950                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1951                 tw32_f(MAC_MODE, tp->mac_mode);
1952                 udelay(40);
1953
1954                 ret = ANEG_TIMER_ENAB;
1955                 ap->state = ANEG_STATE_RESTART;
1956
1957                 /* fallthru */
1958         case ANEG_STATE_RESTART:
1959                 delta = ap->cur_time - ap->link_time;
1960                 if (delta > ANEG_STATE_SETTLE_TIME) {
1961                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1962                 } else {
1963                         ret = ANEG_TIMER_ENAB;
1964                 }
1965                 break;
1966
1967         case ANEG_STATE_DISABLE_LINK_OK:
1968                 ret = ANEG_DONE;
1969                 break;
1970
1971         case ANEG_STATE_ABILITY_DETECT_INIT:
1972                 ap->flags &= ~(MR_TOGGLE_TX);
1973                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1974                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1975                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1976                 tw32_f(MAC_MODE, tp->mac_mode);
1977                 udelay(40);
1978
1979                 ap->state = ANEG_STATE_ABILITY_DETECT;
1980                 break;
1981
1982         case ANEG_STATE_ABILITY_DETECT:
1983                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1984                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1985                 }
1986                 break;
1987
1988         case ANEG_STATE_ACK_DETECT_INIT:
1989                 ap->txconfig |= ANEG_CFG_ACK;
1990                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1991                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1992                 tw32_f(MAC_MODE, tp->mac_mode);
1993                 udelay(40);
1994
1995                 ap->state = ANEG_STATE_ACK_DETECT;
1996
1997                 /* fallthru */
1998         case ANEG_STATE_ACK_DETECT:
1999                 if (ap->ack_match != 0) {
2000                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2001                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2002                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2003                         } else {
2004                                 ap->state = ANEG_STATE_AN_ENABLE;
2005                         }
2006                 } else if (ap->ability_match != 0 &&
2007                            ap->rxconfig == 0) {
2008                         ap->state = ANEG_STATE_AN_ENABLE;
2009                 }
2010                 break;
2011
2012         case ANEG_STATE_COMPLETE_ACK_INIT:
2013                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2014                         ret = ANEG_FAILED;
2015                         break;
2016                 }
2017                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2018                                MR_LP_ADV_HALF_DUPLEX |
2019                                MR_LP_ADV_SYM_PAUSE |
2020                                MR_LP_ADV_ASYM_PAUSE |
2021                                MR_LP_ADV_REMOTE_FAULT1 |
2022                                MR_LP_ADV_REMOTE_FAULT2 |
2023                                MR_LP_ADV_NEXT_PAGE |
2024                                MR_TOGGLE_RX |
2025                                MR_NP_RX);
2026                 if (ap->rxconfig & ANEG_CFG_FD)
2027                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2028                 if (ap->rxconfig & ANEG_CFG_HD)
2029                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2030                 if (ap->rxconfig & ANEG_CFG_PS1)
2031                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2032                 if (ap->rxconfig & ANEG_CFG_PS2)
2033                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2034                 if (ap->rxconfig & ANEG_CFG_RF1)
2035                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2036                 if (ap->rxconfig & ANEG_CFG_RF2)
2037                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2038                 if (ap->rxconfig & ANEG_CFG_NP)
2039                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2040
2041                 ap->link_time = ap->cur_time;
2042
2043                 ap->flags ^= (MR_TOGGLE_TX);
2044                 if (ap->rxconfig & 0x0008)
2045                         ap->flags |= MR_TOGGLE_RX;
2046                 if (ap->rxconfig & ANEG_CFG_NP)
2047                         ap->flags |= MR_NP_RX;
2048                 ap->flags |= MR_PAGE_RX;
2049
2050                 ap->state = ANEG_STATE_COMPLETE_ACK;
2051                 ret = ANEG_TIMER_ENAB;
2052                 break;
2053
2054         case ANEG_STATE_COMPLETE_ACK:
2055                 if (ap->ability_match != 0 &&
2056                     ap->rxconfig == 0) {
2057                         ap->state = ANEG_STATE_AN_ENABLE;
2058                         break;
2059                 }
2060                 delta = ap->cur_time - ap->link_time;
2061                 if (delta > ANEG_STATE_SETTLE_TIME) {
2062                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2063                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2064                         } else {
2065                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2066                                     !(ap->flags & MR_NP_RX)) {
2067                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2068                                 } else {
2069                                         ret = ANEG_FAILED;
2070                                 }
2071                         }
2072                 }
2073                 break;
2074
2075         case ANEG_STATE_IDLE_DETECT_INIT:
2076                 ap->link_time = ap->cur_time;
2077                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2078                 tw32_f(MAC_MODE, tp->mac_mode);
2079                 udelay(40);
2080
2081                 ap->state = ANEG_STATE_IDLE_DETECT;
2082                 ret = ANEG_TIMER_ENAB;
2083                 break;
2084
2085         case ANEG_STATE_IDLE_DETECT:
2086                 if (ap->ability_match != 0 &&
2087                     ap->rxconfig == 0) {
2088                         ap->state = ANEG_STATE_AN_ENABLE;
2089                         break;
2090                 }
2091                 delta = ap->cur_time - ap->link_time;
2092                 if (delta > ANEG_STATE_SETTLE_TIME) {
2093                         /* XXX another gem from the Broadcom driver :( */
2094                         ap->state = ANEG_STATE_LINK_OK;
2095                 }
2096                 break;
2097
2098         case ANEG_STATE_LINK_OK:
2099                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2100                 ret = ANEG_DONE;
2101                 break;
2102
2103         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2104                 /* ??? unimplemented */
2105                 break;
2106
2107         case ANEG_STATE_NEXT_PAGE_WAIT:
2108                 /* ??? unimplemented */
2109                 break;
2110
2111         default:
2112                 ret = ANEG_FAILED;
2113                 break;
2114         };
2115
2116         return ret;
2117 }
2118
2119 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2120 {
2121         int res = 0;
2122         struct tg3_fiber_aneginfo aninfo;
2123         int status = ANEG_FAILED;
2124         unsigned int tick;
2125         u32 tmp;
2126
2127         tw32_f(MAC_TX_AUTO_NEG, 0);
2128
2129         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2130         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2131         udelay(40);
2132
2133         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2134         udelay(40);
2135
2136         memset(&aninfo, 0, sizeof(aninfo));
2137         aninfo.flags |= MR_AN_ENABLE;
2138         aninfo.state = ANEG_STATE_UNKNOWN;
2139         aninfo.cur_time = 0;
2140         tick = 0;
2141         while (++tick < 195000) {
2142                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2143                 if (status == ANEG_DONE || status == ANEG_FAILED)
2144                         break;
2145
2146                 udelay(1);
2147         }
2148
2149         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2150         tw32_f(MAC_MODE, tp->mac_mode);
2151         udelay(40);
2152
2153         *flags = aninfo.flags;
2154
2155         if (status == ANEG_DONE &&
2156             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2157                              MR_LP_ADV_FULL_DUPLEX)))
2158                 res = 1;
2159
2160         return res;
2161 }
2162
2163 static void tg3_init_bcm8002(struct tg3 *tp)
2164 {
2165         u32 mac_status = tr32(MAC_STATUS);
2166         int i;
2167
2168         /* Reset when initting first time or we have a link. */
2169         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2170             !(mac_status & MAC_STATUS_PCS_SYNCED))
2171                 return;
2172
2173         /* Set PLL lock range. */
2174         tg3_writephy(tp, 0x16, 0x8007);
2175
2176         /* SW reset */
2177         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2178
2179         /* Wait for reset to complete. */
2180         /* XXX schedule_timeout() ... */
2181         for (i = 0; i < 500; i++)
2182                 udelay(10);
2183
2184         /* Config mode; select PMA/Ch 1 regs. */
2185         tg3_writephy(tp, 0x10, 0x8411);
2186
2187         /* Enable auto-lock and comdet, select txclk for tx. */
2188         tg3_writephy(tp, 0x11, 0x0a10);
2189
2190         tg3_writephy(tp, 0x18, 0x00a0);
2191         tg3_writephy(tp, 0x16, 0x41ff);
2192
2193         /* Assert and deassert POR. */
2194         tg3_writephy(tp, 0x13, 0x0400);
2195         udelay(40);
2196         tg3_writephy(tp, 0x13, 0x0000);
2197
2198         tg3_writephy(tp, 0x11, 0x0a50);
2199         udelay(40);
2200         tg3_writephy(tp, 0x11, 0x0a10);
2201
2202         /* Wait for signal to stabilize */
2203         /* XXX schedule_timeout() ... */
2204         for (i = 0; i < 15000; i++)
2205                 udelay(10);
2206
2207         /* Deselect the channel register so we can read the PHYID
2208          * later.
2209          */
2210         tg3_writephy(tp, 0x10, 0x8011);
2211 }
2212
2213 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2214 {
2215         u32 sg_dig_ctrl, sg_dig_status;
2216         u32 serdes_cfg, expected_sg_dig_ctrl;
2217         int workaround, port_a;
2218         int current_link_up;
2219
2220         serdes_cfg = 0;
2221         expected_sg_dig_ctrl = 0;
2222         workaround = 0;
2223         port_a = 1;
2224         current_link_up = 0;
2225
2226         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2227             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2228                 workaround = 1;
2229                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2230                         port_a = 0;
2231
2232                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2233                 /* preserve bits 20-23 for voltage regulator */
2234                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2235         }
2236
2237         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2238
2239         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2240                 if (sg_dig_ctrl & (1 << 31)) {
2241                         if (workaround) {
2242                                 u32 val = serdes_cfg;
2243
2244                                 if (port_a)
2245                                         val |= 0xc010000;
2246                                 else
2247                                         val |= 0x4010000;
2248                                 tw32_f(MAC_SERDES_CFG, val);
2249                         }
2250                         tw32_f(SG_DIG_CTRL, 0x01388400);
2251                 }
2252                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2253                         tg3_setup_flow_control(tp, 0, 0);
2254                         current_link_up = 1;
2255                 }
2256                 goto out;
2257         }
2258
2259         /* Want auto-negotiation.  */
2260         expected_sg_dig_ctrl = 0x81388400;
2261
2262         /* Pause capability */
2263         expected_sg_dig_ctrl |= (1 << 11);
2264
2265         /* Asymettric pause */
2266         expected_sg_dig_ctrl |= (1 << 12);
2267
2268         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2269                 if (workaround)
2270                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2271                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2272                 udelay(5);
2273                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2274
2275                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2276         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2277                                  MAC_STATUS_SIGNAL_DET)) {
2278                 int i;
2279
2280                 /* Giver time to negotiate (~200ms) */
2281                 for (i = 0; i < 40000; i++) {
2282                         sg_dig_status = tr32(SG_DIG_STATUS);
2283                         if (sg_dig_status & (0x3))
2284                                 break;
2285                         udelay(5);
2286                 }
2287                 mac_status = tr32(MAC_STATUS);
2288
2289                 if ((sg_dig_status & (1 << 1)) &&
2290                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2291                         u32 local_adv, remote_adv;
2292
2293                         local_adv = ADVERTISE_PAUSE_CAP;
2294                         remote_adv = 0;
2295                         if (sg_dig_status & (1 << 19))
2296                                 remote_adv |= LPA_PAUSE_CAP;
2297                         if (sg_dig_status & (1 << 20))
2298                                 remote_adv |= LPA_PAUSE_ASYM;
2299
2300                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2301                         current_link_up = 1;
2302                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2303                 } else if (!(sg_dig_status & (1 << 1))) {
2304                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2305                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2306                         else {
2307                                 if (workaround) {
2308                                         u32 val = serdes_cfg;
2309
2310                                         if (port_a)
2311                                                 val |= 0xc010000;
2312                                         else
2313                                                 val |= 0x4010000;
2314
2315                                         tw32_f(MAC_SERDES_CFG, val);
2316                                 }
2317
2318                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2319                                 udelay(40);
2320
2321                                 /* Link parallel detection - link is up */
2322                                 /* only if we have PCS_SYNC and not */
2323                                 /* receiving config code words */
2324                                 mac_status = tr32(MAC_STATUS);
2325                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2326                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2327                                         tg3_setup_flow_control(tp, 0, 0);
2328                                         current_link_up = 1;
2329                                 }
2330                         }
2331                 }
2332         }
2333
2334 out:
2335         return current_link_up;
2336 }
2337
2338 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2339 {
2340         int current_link_up = 0;
2341
2342         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2343                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2344                 goto out;
2345         }
2346
2347         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2348                 u32 flags;
2349                 int i;
2350   
2351                 if (fiber_autoneg(tp, &flags)) {
2352                         u32 local_adv, remote_adv;
2353
2354                         local_adv = ADVERTISE_PAUSE_CAP;
2355                         remote_adv = 0;
2356                         if (flags & MR_LP_ADV_SYM_PAUSE)
2357                                 remote_adv |= LPA_PAUSE_CAP;
2358                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2359                                 remote_adv |= LPA_PAUSE_ASYM;
2360
2361                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2362
2363                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2364                         current_link_up = 1;
2365                 }
2366                 for (i = 0; i < 30; i++) {
2367                         udelay(20);
2368                         tw32_f(MAC_STATUS,
2369                                (MAC_STATUS_SYNC_CHANGED |
2370                                 MAC_STATUS_CFG_CHANGED));
2371                         udelay(40);
2372                         if ((tr32(MAC_STATUS) &
2373                              (MAC_STATUS_SYNC_CHANGED |
2374                               MAC_STATUS_CFG_CHANGED)) == 0)
2375                                 break;
2376                 }
2377
2378                 mac_status = tr32(MAC_STATUS);
2379                 if (current_link_up == 0 &&
2380                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2381                     !(mac_status & MAC_STATUS_RCVD_CFG))
2382                         current_link_up = 1;
2383         } else {
2384                 /* Forcing 1000FD link up. */
2385                 current_link_up = 1;
2386                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2387
2388                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2389                 udelay(40);
2390         }
2391
2392 out:
2393         return current_link_up;
2394 }
2395
2396 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2397 {
2398         u32 orig_pause_cfg;
2399         u16 orig_active_speed;
2400         u8 orig_active_duplex;
2401         u32 mac_status;
2402         int current_link_up;
2403         int i;
2404
2405         orig_pause_cfg =
2406                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2407                                   TG3_FLAG_TX_PAUSE));
2408         orig_active_speed = tp->link_config.active_speed;
2409         orig_active_duplex = tp->link_config.active_duplex;
2410
2411         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2412             netif_carrier_ok(tp->dev) &&
2413             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2414                 mac_status = tr32(MAC_STATUS);
2415                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2416                                MAC_STATUS_SIGNAL_DET |
2417                                MAC_STATUS_CFG_CHANGED |
2418                                MAC_STATUS_RCVD_CFG);
2419                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2420                                    MAC_STATUS_SIGNAL_DET)) {
2421                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2422                                             MAC_STATUS_CFG_CHANGED));
2423                         return 0;
2424                 }
2425         }
2426
2427         tw32_f(MAC_TX_AUTO_NEG, 0);
2428
2429         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2430         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2431         tw32_f(MAC_MODE, tp->mac_mode);
2432         udelay(40);
2433
2434         if (tp->phy_id == PHY_ID_BCM8002)
2435                 tg3_init_bcm8002(tp);
2436
2437         /* Enable link change event even when serdes polling.  */
2438         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2439         udelay(40);
2440
2441         current_link_up = 0;
2442         mac_status = tr32(MAC_STATUS);
2443
2444         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2445                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2446         else
2447                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2448
2449         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2450         tw32_f(MAC_MODE, tp->mac_mode);
2451         udelay(40);
2452
2453         tp->hw_status->status =
2454                 (SD_STATUS_UPDATED |
2455                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2456
2457         for (i = 0; i < 100; i++) {
2458                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2459                                     MAC_STATUS_CFG_CHANGED));
2460                 udelay(5);
2461                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2462                                          MAC_STATUS_CFG_CHANGED)) == 0)
2463                         break;
2464         }
2465
2466         mac_status = tr32(MAC_STATUS);
2467         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2468                 current_link_up = 0;
2469                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2470                         tw32_f(MAC_MODE, (tp->mac_mode |
2471                                           MAC_MODE_SEND_CONFIGS));
2472                         udelay(1);
2473                         tw32_f(MAC_MODE, tp->mac_mode);
2474                 }
2475         }
2476
2477         if (current_link_up == 1) {
2478                 tp->link_config.active_speed = SPEED_1000;
2479                 tp->link_config.active_duplex = DUPLEX_FULL;
2480                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2481                                     LED_CTRL_LNKLED_OVERRIDE |
2482                                     LED_CTRL_1000MBPS_ON));
2483         } else {
2484                 tp->link_config.active_speed = SPEED_INVALID;
2485                 tp->link_config.active_duplex = DUPLEX_INVALID;
2486                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2487                                     LED_CTRL_LNKLED_OVERRIDE |
2488                                     LED_CTRL_TRAFFIC_OVERRIDE));
2489         }
2490
2491         if (current_link_up != netif_carrier_ok(tp->dev)) {
2492                 if (current_link_up)
2493                         netif_carrier_on(tp->dev);
2494                 else
2495                         netif_carrier_off(tp->dev);
2496                 tg3_link_report(tp);
2497         } else {
2498                 u32 now_pause_cfg =
2499                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2500                                          TG3_FLAG_TX_PAUSE);
2501                 if (orig_pause_cfg != now_pause_cfg ||
2502                     orig_active_speed != tp->link_config.active_speed ||
2503                     orig_active_duplex != tp->link_config.active_duplex)
2504                         tg3_link_report(tp);
2505         }
2506
2507         return 0;
2508 }
2509
2510 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2511 {
2512         int err;
2513
2514         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2515                 err = tg3_setup_fiber_phy(tp, force_reset);
2516         } else {
2517                 err = tg3_setup_copper_phy(tp, force_reset);
2518         }
2519
2520         if (tp->link_config.active_speed == SPEED_1000 &&
2521             tp->link_config.active_duplex == DUPLEX_HALF)
2522                 tw32(MAC_TX_LENGTHS,
2523                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2524                       (6 << TX_LENGTHS_IPG_SHIFT) |
2525                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2526         else
2527                 tw32(MAC_TX_LENGTHS,
2528                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2529                       (6 << TX_LENGTHS_IPG_SHIFT) |
2530                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2531
2532         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2533                 if (netif_carrier_ok(tp->dev)) {
2534                         tw32(HOSTCC_STAT_COAL_TICKS,
2535                              tp->coal.stats_block_coalesce_usecs);
2536                 } else {
2537                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2538                 }
2539         }
2540
2541         return err;
2542 }
2543
2544 /* Tigon3 never reports partial packet sends.  So we do not
2545  * need special logic to handle SKBs that have not had all
2546  * of their frags sent yet, like SunGEM does.
2547  */
2548 static void tg3_tx(struct tg3 *tp)
2549 {
2550         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2551         u32 sw_idx = tp->tx_cons;
2552
2553         while (sw_idx != hw_idx) {
2554                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2555                 struct sk_buff *skb = ri->skb;
2556                 int i;
2557
2558                 if (unlikely(skb == NULL))
2559                         BUG();
2560
2561                 pci_unmap_single(tp->pdev,
2562                                  pci_unmap_addr(ri, mapping),
2563                                  skb_headlen(skb),
2564                                  PCI_DMA_TODEVICE);
2565
2566                 ri->skb = NULL;
2567
2568                 sw_idx = NEXT_TX(sw_idx);
2569
2570                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2571                         if (unlikely(sw_idx == hw_idx))
2572                                 BUG();
2573
2574                         ri = &tp->tx_buffers[sw_idx];
2575                         if (unlikely(ri->skb != NULL))
2576                                 BUG();
2577
2578                         pci_unmap_page(tp->pdev,
2579                                        pci_unmap_addr(ri, mapping),
2580                                        skb_shinfo(skb)->frags[i].size,
2581                                        PCI_DMA_TODEVICE);
2582
2583                         sw_idx = NEXT_TX(sw_idx);
2584                 }
2585
2586                 dev_kfree_skb(skb);
2587         }
2588
2589         tp->tx_cons = sw_idx;
2590
2591         if (netif_queue_stopped(tp->dev) &&
2592             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2593                 netif_wake_queue(tp->dev);
2594 }
2595
2596 /* Returns size of skb allocated or < 0 on error.
2597  *
2598  * We only need to fill in the address because the other members
2599  * of the RX descriptor are invariant, see tg3_init_rings.
2600  *
2601  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2602  * posting buffers we only dirty the first cache line of the RX
2603  * descriptor (containing the address).  Whereas for the RX status
2604  * buffers the cpu only reads the last cacheline of the RX descriptor
2605  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2606  */
2607 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2608                             int src_idx, u32 dest_idx_unmasked)
2609 {
2610         struct tg3_rx_buffer_desc *desc;
2611         struct ring_info *map, *src_map;
2612         struct sk_buff *skb;
2613         dma_addr_t mapping;
2614         int skb_size, dest_idx;
2615
2616         src_map = NULL;
2617         switch (opaque_key) {
2618         case RXD_OPAQUE_RING_STD:
2619                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2620                 desc = &tp->rx_std[dest_idx];
2621                 map = &tp->rx_std_buffers[dest_idx];
2622                 if (src_idx >= 0)
2623                         src_map = &tp->rx_std_buffers[src_idx];
2624                 skb_size = RX_PKT_BUF_SZ;
2625                 break;
2626
2627         case RXD_OPAQUE_RING_JUMBO:
2628                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2629                 desc = &tp->rx_jumbo[dest_idx];
2630                 map = &tp->rx_jumbo_buffers[dest_idx];
2631                 if (src_idx >= 0)
2632                         src_map = &tp->rx_jumbo_buffers[src_idx];
2633                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2634                 break;
2635
2636         default:
2637                 return -EINVAL;
2638         };
2639
2640         /* Do not overwrite any of the map or rp information
2641          * until we are sure we can commit to a new buffer.
2642          *
2643          * Callers depend upon this behavior and assume that
2644          * we leave everything unchanged if we fail.
2645          */
2646         skb = dev_alloc_skb(skb_size);
2647         if (skb == NULL)
2648                 return -ENOMEM;
2649
2650         skb->dev = tp->dev;
2651         skb_reserve(skb, tp->rx_offset);
2652
2653         mapping = pci_map_single(tp->pdev, skb->data,
2654                                  skb_size - tp->rx_offset,
2655                                  PCI_DMA_FROMDEVICE);
2656
2657         map->skb = skb;
2658         pci_unmap_addr_set(map, mapping, mapping);
2659
2660         if (src_map != NULL)
2661                 src_map->skb = NULL;
2662
2663         desc->addr_hi = ((u64)mapping >> 32);
2664         desc->addr_lo = ((u64)mapping & 0xffffffff);
2665
2666         return skb_size;
2667 }
2668
2669 /* We only need to move over in the address because the other
2670  * members of the RX descriptor are invariant.  See notes above
2671  * tg3_alloc_rx_skb for full details.
2672  */
2673 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2674                            int src_idx, u32 dest_idx_unmasked)
2675 {
2676         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2677         struct ring_info *src_map, *dest_map;
2678         int dest_idx;
2679
2680         switch (opaque_key) {
2681         case RXD_OPAQUE_RING_STD:
2682                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2683                 dest_desc = &tp->rx_std[dest_idx];
2684                 dest_map = &tp->rx_std_buffers[dest_idx];
2685                 src_desc = &tp->rx_std[src_idx];
2686                 src_map = &tp->rx_std_buffers[src_idx];
2687                 break;
2688
2689         case RXD_OPAQUE_RING_JUMBO:
2690                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2691                 dest_desc = &tp->rx_jumbo[dest_idx];
2692                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2693                 src_desc = &tp->rx_jumbo[src_idx];
2694                 src_map = &tp->rx_jumbo_buffers[src_idx];
2695                 break;
2696
2697         default:
2698                 return;
2699         };
2700
2701         dest_map->skb = src_map->skb;
2702         pci_unmap_addr_set(dest_map, mapping,
2703                            pci_unmap_addr(src_map, mapping));
2704         dest_desc->addr_hi = src_desc->addr_hi;
2705         dest_desc->addr_lo = src_desc->addr_lo;
2706
2707         src_map->skb = NULL;
2708 }
2709
2710 #if TG3_VLAN_TAG_USED
2711 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2712 {
2713         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2714 }
2715 #endif
2716
2717 /* The RX ring scheme is composed of multiple rings which post fresh
2718  * buffers to the chip, and one special ring the chip uses to report
2719  * status back to the host.
2720  *
2721  * The special ring reports the status of received packets to the
2722  * host.  The chip does not write into the original descriptor the
2723  * RX buffer was obtained from.  The chip simply takes the original
2724  * descriptor as provided by the host, updates the status and length
2725  * field, then writes this into the next status ring entry.
2726  *
2727  * Each ring the host uses to post buffers to the chip is described
2728  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2729  * it is first placed into the on-chip ram.  When the packet's length
2730  * is known, it walks down the TG3_BDINFO entries to select the ring.
2731  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2732  * which is within the range of the new packet's length is chosen.
2733  *
2734  * The "separate ring for rx status" scheme may sound queer, but it makes
2735  * sense from a cache coherency perspective.  If only the host writes
2736  * to the buffer post rings, and only the chip writes to the rx status
2737  * rings, then cache lines never move beyond shared-modified state.
2738  * If both the host and chip were to write into the same ring, cache line
2739  * eviction could occur since both entities want it in an exclusive state.
2740  */
2741 static int tg3_rx(struct tg3 *tp, int budget)
2742 {
2743         u32 work_mask;
2744         u32 sw_idx = tp->rx_rcb_ptr;
2745         u16 hw_idx;
2746         int received;
2747
2748         hw_idx = tp->hw_status->idx[0].rx_producer;
2749         /*
2750          * We need to order the read of hw_idx and the read of
2751          * the opaque cookie.
2752          */
2753         rmb();
2754         work_mask = 0;
2755         received = 0;
2756         while (sw_idx != hw_idx && budget > 0) {
2757                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2758                 unsigned int len;
2759                 struct sk_buff *skb;
2760                 dma_addr_t dma_addr;
2761                 u32 opaque_key, desc_idx, *post_ptr;
2762
2763                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2764                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2765                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2766                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2767                                                   mapping);
2768                         skb = tp->rx_std_buffers[desc_idx].skb;
2769                         post_ptr = &tp->rx_std_ptr;
2770                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2771                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2772                                                   mapping);
2773                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2774                         post_ptr = &tp->rx_jumbo_ptr;
2775                 }
2776                 else {
2777                         goto next_pkt_nopost;
2778                 }
2779
2780                 work_mask |= opaque_key;
2781
2782                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2783                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2784                 drop_it:
2785                         tg3_recycle_rx(tp, opaque_key,
2786                                        desc_idx, *post_ptr);
2787                 drop_it_no_recycle:
2788                         /* Other statistics kept track of by card. */
2789                         tp->net_stats.rx_dropped++;
2790                         goto next_pkt;
2791                 }
2792
2793                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2794
2795                 if (len > RX_COPY_THRESHOLD 
2796                         && tp->rx_offset == 2
2797                         /* rx_offset != 2 iff this is a 5701 card running
2798                          * in PCI-X mode [see tg3_get_invariants()] */
2799                 ) {
2800                         int skb_size;
2801
2802                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2803                                                     desc_idx, *post_ptr);
2804                         if (skb_size < 0)
2805                                 goto drop_it;
2806
2807                         pci_unmap_single(tp->pdev, dma_addr,
2808                                          skb_size - tp->rx_offset,
2809                                          PCI_DMA_FROMDEVICE);
2810
2811                         skb_put(skb, len);
2812                 } else {
2813                         struct sk_buff *copy_skb;
2814
2815                         tg3_recycle_rx(tp, opaque_key,
2816                                        desc_idx, *post_ptr);
2817
2818                         copy_skb = dev_alloc_skb(len + 2);
2819                         if (copy_skb == NULL)
2820                                 goto drop_it_no_recycle;
2821
2822                         copy_skb->dev = tp->dev;
2823                         skb_reserve(copy_skb, 2);
2824                         skb_put(copy_skb, len);
2825                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2826                         memcpy(copy_skb->data, skb->data, len);
2827                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2828
2829                         /* We'll reuse the original ring buffer. */
2830                         skb = copy_skb;
2831                 }
2832
2833                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2834                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2835                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2836                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2837                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2838                 else
2839                         skb->ip_summed = CHECKSUM_NONE;
2840
2841                 skb->protocol = eth_type_trans(skb, tp->dev);
2842 #if TG3_VLAN_TAG_USED
2843                 if (tp->vlgrp != NULL &&
2844                     desc->type_flags & RXD_FLAG_VLAN) {
2845                         tg3_vlan_rx(tp, skb,
2846                                     desc->err_vlan & RXD_VLAN_MASK);
2847                 } else
2848 #endif
2849                         netif_receive_skb(skb);
2850
2851                 tp->dev->last_rx = jiffies;
2852                 received++;
2853                 budget--;
2854
2855 next_pkt:
2856                 (*post_ptr)++;
2857 next_pkt_nopost:
2858                 sw_idx++;
2859                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
2860
2861                 /* Refresh hw_idx to see if there is new work */
2862                 if (sw_idx == hw_idx) {
2863                         hw_idx = tp->hw_status->idx[0].rx_producer;
2864                         rmb();
2865                 }
2866         }
2867
2868         /* ACK the status ring. */
2869         tp->rx_rcb_ptr = sw_idx;
2870         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
2871
2872         /* Refill RX ring(s). */
2873         if (work_mask & RXD_OPAQUE_RING_STD) {
2874                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2875                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2876                              sw_idx);
2877         }
2878         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2879                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2880                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2881                              sw_idx);
2882         }
2883         mmiowb();
2884
2885         return received;
2886 }
2887
2888 static int tg3_poll(struct net_device *netdev, int *budget)
2889 {
2890         struct tg3 *tp = netdev_priv(netdev);
2891         struct tg3_hw_status *sblk = tp->hw_status;
2892         int done;
2893
2894         /* handle link change and other phy events */
2895         if (!(tp->tg3_flags &
2896               (TG3_FLAG_USE_LINKCHG_REG |
2897                TG3_FLAG_POLL_SERDES))) {
2898                 if (sblk->status & SD_STATUS_LINK_CHG) {
2899                         sblk->status = SD_STATUS_UPDATED |
2900                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2901                         spin_lock(&tp->lock);
2902                         tg3_setup_phy(tp, 0);
2903                         spin_unlock(&tp->lock);
2904                 }
2905         }
2906
2907         /* run TX completion thread */
2908         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2909                 spin_lock(&tp->tx_lock);
2910                 tg3_tx(tp);
2911                 spin_unlock(&tp->tx_lock);
2912         }
2913
2914         /* run RX thread, within the bounds set by NAPI.
2915          * All RX "locking" is done by ensuring outside
2916          * code synchronizes with dev->poll()
2917          */
2918         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2919                 int orig_budget = *budget;
2920                 int work_done;
2921
2922                 if (orig_budget > netdev->quota)
2923                         orig_budget = netdev->quota;
2924
2925                 work_done = tg3_rx(tp, orig_budget);
2926
2927                 *budget -= work_done;
2928                 netdev->quota -= work_done;
2929         }
2930
2931         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2932                 tp->last_tag = sblk->status_tag;
2933         rmb();
2934         sblk->status &= ~SD_STATUS_UPDATED;
2935
2936         /* if no more work, tell net stack and NIC we're done */
2937         done = !tg3_has_work(tp);
2938         if (done) {
2939                 spin_lock(&tp->lock);
2940                 netif_rx_complete(netdev);
2941                 tg3_restart_ints(tp);
2942                 spin_unlock(&tp->lock);
2943         }
2944
2945         return (done ? 0 : 1);
2946 }
2947
2948 static void tg3_irq_quiesce(struct tg3 *tp)
2949 {
2950         BUG_ON(tp->irq_sync);
2951
2952         tp->irq_sync = 1;
2953         smp_mb();
2954
2955         synchronize_irq(tp->pdev->irq);
2956 }
2957
2958 static inline int tg3_irq_sync(struct tg3 *tp)
2959 {
2960         return tp->irq_sync;
2961 }
2962
2963 /* Fully shutdown all tg3 driver activity elsewhere in the system.
2964  * If irq_sync is non-zero, then the IRQ handler must be synchronized
2965  * with as well.  Most of the time, this is not necessary except when
2966  * shutting down the device.
2967  */
2968 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
2969 {
2970         if (irq_sync)
2971                 tg3_irq_quiesce(tp);
2972         spin_lock_bh(&tp->lock);
2973         spin_lock(&tp->tx_lock);
2974 }
2975
2976 static inline void tg3_full_unlock(struct tg3 *tp)
2977 {
2978         spin_unlock(&tp->tx_lock);
2979         spin_unlock_bh(&tp->lock);
2980 }
2981
2982 /* MSI ISR - No need to check for interrupt sharing and no need to
2983  * flush status block and interrupt mailbox. PCI ordering rules
2984  * guarantee that MSI will arrive after the status block.
2985  */
2986 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2987 {
2988         struct net_device *dev = dev_id;
2989         struct tg3 *tp = netdev_priv(dev);
2990         struct tg3_hw_status *sblk = tp->hw_status;
2991
2992         /*
2993          * Writing any value to intr-mbox-0 clears PCI INTA# and
2994          * chip-internal interrupt pending events.
2995          * Writing non-zero to intr-mbox-0 additional tells the
2996          * NIC to stop sending us irqs, engaging "in-intr-handler"
2997          * event coalescing.
2998          */
2999         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3000         tp->last_tag = sblk->status_tag;
3001         rmb();
3002         if (tg3_irq_sync(tp))
3003                 goto out;
3004         sblk->status &= ~SD_STATUS_UPDATED;
3005         if (likely(tg3_has_work(tp)))
3006                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3007         else {
3008                 /* No work, re-enable interrupts.  */
3009                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3010                              tp->last_tag << 24);
3011         }
3012 out:
3013         return IRQ_RETVAL(1);
3014 }
3015
3016 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3017 {
3018         struct net_device *dev = dev_id;
3019         struct tg3 *tp = netdev_priv(dev);
3020         struct tg3_hw_status *sblk = tp->hw_status;
3021         unsigned int handled = 1;
3022
3023         /* In INTx mode, it is possible for the interrupt to arrive at
3024          * the CPU before the status block posted prior to the interrupt.
3025          * Reading the PCI State register will confirm whether the
3026          * interrupt is ours and will flush the status block.
3027          */
3028         if ((sblk->status & SD_STATUS_UPDATED) ||
3029             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3030                 /*
3031                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3032                  * chip-internal interrupt pending events.
3033                  * Writing non-zero to intr-mbox-0 additional tells the
3034                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3035                  * event coalescing.
3036                  */
3037                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3038                              0x00000001);
3039                 if (tg3_irq_sync(tp))
3040                         goto out;
3041                 sblk->status &= ~SD_STATUS_UPDATED;
3042                 if (likely(tg3_has_work(tp)))
3043                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3044                 else {
3045                         /* No work, shared interrupt perhaps?  re-enable
3046                          * interrupts, and flush that PCI write
3047                          */
3048                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3049                                 0x00000000);
3050                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3051                 }
3052         } else {        /* shared interrupt */
3053                 handled = 0;
3054         }
3055 out:
3056         return IRQ_RETVAL(handled);
3057 }
3058
3059 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3060 {
3061         struct net_device *dev = dev_id;
3062         struct tg3 *tp = netdev_priv(dev);
3063         struct tg3_hw_status *sblk = tp->hw_status;
3064         unsigned int handled = 1;
3065
3066         /* In INTx mode, it is possible for the interrupt to arrive at
3067          * the CPU before the status block posted prior to the interrupt.
3068          * Reading the PCI State register will confirm whether the
3069          * interrupt is ours and will flush the status block.
3070          */
3071         if ((sblk->status & SD_STATUS_UPDATED) ||
3072             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3073                 /*
3074                  * writing any value to intr-mbox-0 clears PCI INTA# and
3075                  * chip-internal interrupt pending events.
3076                  * writing non-zero to intr-mbox-0 additional tells the
3077                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3078                  * event coalescing.
3079                  */
3080                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3081                              0x00000001);
3082                 tp->last_tag = sblk->status_tag;
3083                 rmb();
3084                 if (tg3_irq_sync(tp))
3085                         goto out;
3086                 sblk->status &= ~SD_STATUS_UPDATED;
3087                 if (likely(tg3_has_work(tp)))
3088                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3089                 else {
3090                         /* no work, shared interrupt perhaps?  re-enable
3091                          * interrupts, and flush that PCI write
3092                          */
3093                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3094                                      tp->last_tag << 24);
3095                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3096                 }
3097         } else {        /* shared interrupt */
3098                 handled = 0;
3099         }
3100 out:
3101         return IRQ_RETVAL(handled);
3102 }
3103
3104 /* ISR for interrupt test */
3105 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3106                 struct pt_regs *regs)
3107 {
3108         struct net_device *dev = dev_id;
3109         struct tg3 *tp = netdev_priv(dev);
3110         struct tg3_hw_status *sblk = tp->hw_status;
3111
3112         if (sblk->status & SD_STATUS_UPDATED) {
3113                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3114                              0x00000001);
3115                 return IRQ_RETVAL(1);
3116         }
3117         return IRQ_RETVAL(0);
3118 }
3119
3120 static int tg3_init_hw(struct tg3 *);
3121 static int tg3_halt(struct tg3 *, int, int);
3122
3123 #ifdef CONFIG_NET_POLL_CONTROLLER
3124 static void tg3_poll_controller(struct net_device *dev)
3125 {
3126         struct tg3 *tp = netdev_priv(dev);
3127
3128         tg3_interrupt(tp->pdev->irq, dev, NULL);
3129 }
3130 #endif
3131
3132 static void tg3_reset_task(void *_data)
3133 {
3134         struct tg3 *tp = _data;
3135         unsigned int restart_timer;
3136
3137         tg3_netif_stop(tp);
3138
3139         tg3_full_lock(tp, 1);
3140
3141         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3142         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3143
3144         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3145         tg3_init_hw(tp);
3146
3147         tg3_netif_start(tp);
3148
3149         tg3_full_unlock(tp);
3150
3151         if (restart_timer)
3152                 mod_timer(&tp->timer, jiffies + 1);
3153 }
3154
3155 static void tg3_tx_timeout(struct net_device *dev)
3156 {
3157         struct tg3 *tp = netdev_priv(dev);
3158
3159         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3160                dev->name);
3161
3162         schedule_work(&tp->reset_task);
3163 }
3164
3165 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3166
3167 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3168                                        u32 guilty_entry, int guilty_len,
3169                                        u32 last_plus_one, u32 *start, u32 mss)
3170 {
3171         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3172         dma_addr_t new_addr;
3173         u32 entry = *start;
3174         int i;
3175
3176         if (!new_skb) {
3177                 dev_kfree_skb(skb);
3178                 return -1;
3179         }
3180
3181         /* New SKB is guaranteed to be linear. */
3182         entry = *start;
3183         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3184                                   PCI_DMA_TODEVICE);
3185         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3186                     (skb->ip_summed == CHECKSUM_HW) ?
3187                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3188         *start = NEXT_TX(entry);
3189
3190         /* Now clean up the sw ring entries. */
3191         i = 0;
3192         while (entry != last_plus_one) {
3193                 int len;
3194
3195                 if (i == 0)
3196                         len = skb_headlen(skb);
3197                 else
3198                         len = skb_shinfo(skb)->frags[i-1].size;
3199                 pci_unmap_single(tp->pdev,
3200                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3201                                  len, PCI_DMA_TODEVICE);
3202                 if (i == 0) {
3203                         tp->tx_buffers[entry].skb = new_skb;
3204                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3205                 } else {
3206                         tp->tx_buffers[entry].skb = NULL;
3207                 }
3208                 entry = NEXT_TX(entry);
3209                 i++;
3210         }
3211
3212         dev_kfree_skb(skb);
3213
3214         return 0;
3215 }
3216
3217 static void tg3_set_txd(struct tg3 *tp, int entry,
3218                         dma_addr_t mapping, int len, u32 flags,
3219                         u32 mss_and_is_end)
3220 {
3221         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3222         int is_end = (mss_and_is_end & 0x1);
3223         u32 mss = (mss_and_is_end >> 1);
3224         u32 vlan_tag = 0;
3225
3226         if (is_end)
3227                 flags |= TXD_FLAG_END;
3228         if (flags & TXD_FLAG_VLAN) {
3229                 vlan_tag = flags >> 16;
3230                 flags &= 0xffff;
3231         }
3232         vlan_tag |= (mss << TXD_MSS_SHIFT);
3233
3234         txd->addr_hi = ((u64) mapping >> 32);
3235         txd->addr_lo = ((u64) mapping & 0xffffffff);
3236         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3237         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3238 }
3239
3240 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3241 {
3242         u32 base = (u32) mapping & 0xffffffff;
3243
3244         return ((base > 0xffffdcc0) &&
3245                 (base + len + 8 < base));
3246 }
3247
3248 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3249 {
3250         struct tg3 *tp = netdev_priv(dev);
3251         dma_addr_t mapping;
3252         unsigned int i;
3253         u32 len, entry, base_flags, mss;
3254         int would_hit_hwbug;
3255
3256         len = skb_headlen(skb);
3257
3258         /* No BH disabling for tx_lock here.  We are running in BH disabled
3259          * context and TX reclaim runs via tp->poll inside of a software
3260          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3261          * no IRQ context deadlocks to worry about either.  Rejoice!
3262          */
3263         if (!spin_trylock(&tp->tx_lock))
3264                 return NETDEV_TX_LOCKED; 
3265
3266         /* This is a hard error, log it. */
3267         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3268                 netif_stop_queue(dev);
3269                 spin_unlock(&tp->tx_lock);
3270                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3271                        dev->name);
3272                 return NETDEV_TX_BUSY;
3273         }
3274
3275         entry = tp->tx_prod;
3276         base_flags = 0;
3277         if (skb->ip_summed == CHECKSUM_HW)
3278                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3279 #if TG3_TSO_SUPPORT != 0
3280         mss = 0;
3281         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3282             (mss = skb_shinfo(skb)->tso_size) != 0) {
3283                 int tcp_opt_len, ip_tcp_len;
3284
3285                 if (skb_header_cloned(skb) &&
3286                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3287                         dev_kfree_skb(skb);
3288                         goto out_unlock;
3289                 }
3290
3291                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3292                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3293
3294                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3295                                TXD_FLAG_CPU_POST_DMA);
3296
3297                 skb->nh.iph->check = 0;
3298                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3299                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3300                         skb->h.th->check = 0;
3301                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3302                 }
3303                 else {
3304                         skb->h.th->check =
3305                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3306                                                    skb->nh.iph->daddr,
3307                                                    0, IPPROTO_TCP, 0);
3308                 }
3309
3310                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3311                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3312                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3313                                 int tsflags;
3314
3315                                 tsflags = ((skb->nh.iph->ihl - 5) +
3316                                            (tcp_opt_len >> 2));
3317                                 mss |= (tsflags << 11);
3318                         }
3319                 } else {
3320                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3321                                 int tsflags;
3322
3323                                 tsflags = ((skb->nh.iph->ihl - 5) +
3324                                            (tcp_opt_len >> 2));
3325                                 base_flags |= tsflags << 12;
3326                         }
3327                 }
3328         }
3329 #else
3330         mss = 0;
3331 #endif
3332 #if TG3_VLAN_TAG_USED
3333         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3334                 base_flags |= (TXD_FLAG_VLAN |
3335                                (vlan_tx_tag_get(skb) << 16));
3336 #endif
3337
3338         /* Queue skb data, a.k.a. the main skb fragment. */
3339         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3340
3341         tp->tx_buffers[entry].skb = skb;
3342         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3343
3344         would_hit_hwbug = 0;
3345
3346         if (tg3_4g_overflow_test(mapping, len))
3347                 would_hit_hwbug = entry + 1;
3348
3349         tg3_set_txd(tp, entry, mapping, len, base_flags,
3350                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3351
3352         entry = NEXT_TX(entry);
3353
3354         /* Now loop through additional data fragments, and queue them. */
3355         if (skb_shinfo(skb)->nr_frags > 0) {
3356                 unsigned int i, last;
3357
3358                 last = skb_shinfo(skb)->nr_frags - 1;
3359                 for (i = 0; i <= last; i++) {
3360                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3361
3362                         len = frag->size;
3363                         mapping = pci_map_page(tp->pdev,
3364                                                frag->page,
3365                                                frag->page_offset,
3366                                                len, PCI_DMA_TODEVICE);
3367
3368                         tp->tx_buffers[entry].skb = NULL;
3369                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3370
3371                         if (tg3_4g_overflow_test(mapping, len)) {
3372                                 /* Only one should match. */
3373                                 if (would_hit_hwbug)
3374                                         BUG();
3375                                 would_hit_hwbug = entry + 1;
3376                         }
3377
3378                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3379                                 tg3_set_txd(tp, entry, mapping, len,
3380                                             base_flags, (i == last)|(mss << 1));
3381                         else
3382                                 tg3_set_txd(tp, entry, mapping, len,
3383                                             base_flags, (i == last));
3384
3385                         entry = NEXT_TX(entry);
3386                 }
3387         }
3388
3389         if (would_hit_hwbug) {
3390                 u32 last_plus_one = entry;
3391                 u32 start;
3392                 unsigned int len = 0;
3393
3394                 would_hit_hwbug -= 1;
3395                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3396                 entry &= (TG3_TX_RING_SIZE - 1);
3397                 start = entry;
3398                 i = 0;
3399                 while (entry != last_plus_one) {
3400                         if (i == 0)
3401                                 len = skb_headlen(skb);
3402                         else
3403                                 len = skb_shinfo(skb)->frags[i-1].size;
3404
3405                         if (entry == would_hit_hwbug)
3406                                 break;
3407
3408                         i++;
3409                         entry = NEXT_TX(entry);
3410
3411                 }
3412
3413                 /* If the workaround fails due to memory/mapping
3414                  * failure, silently drop this packet.
3415                  */
3416                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3417                                                 entry, len,
3418                                                 last_plus_one,
3419                                                 &start, mss))
3420                         goto out_unlock;
3421
3422                 entry = start;
3423         }
3424
3425         /* Packets are ready, update Tx producer idx local and on card. */
3426         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3427
3428         tp->tx_prod = entry;
3429         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3430                 netif_stop_queue(dev);
3431
3432 out_unlock:
3433         mmiowb();
3434         spin_unlock(&tp->tx_lock);
3435
3436         dev->trans_start = jiffies;
3437
3438         return NETDEV_TX_OK;
3439 }
3440
3441 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3442                                int new_mtu)
3443 {
3444         dev->mtu = new_mtu;
3445
3446         if (new_mtu > ETH_DATA_LEN)
3447                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3448         else
3449                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3450 }
3451
3452 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3453 {
3454         struct tg3 *tp = netdev_priv(dev);
3455
3456         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3457                 return -EINVAL;
3458
3459         if (!netif_running(dev)) {
3460                 /* We'll just catch it later when the
3461                  * device is up'd.
3462                  */
3463                 tg3_set_mtu(dev, tp, new_mtu);
3464                 return 0;
3465         }
3466
3467         tg3_netif_stop(tp);
3468
3469         tg3_full_lock(tp, 1);
3470
3471         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3472
3473         tg3_set_mtu(dev, tp, new_mtu);
3474
3475         tg3_init_hw(tp);
3476
3477         tg3_netif_start(tp);
3478
3479         tg3_full_unlock(tp);
3480
3481         return 0;
3482 }
3483
3484 /* Free up pending packets in all rx/tx rings.
3485  *
3486  * The chip has been shut down and the driver detached from
3487  * the networking, so no interrupts or new tx packets will
3488  * end up in the driver.  tp->{tx,}lock is not held and we are not
3489  * in an interrupt context and thus may sleep.
3490  */
3491 static void tg3_free_rings(struct tg3 *tp)
3492 {
3493         struct ring_info *rxp;
3494         int i;
3495
3496         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3497                 rxp = &tp->rx_std_buffers[i];
3498
3499                 if (rxp->skb == NULL)
3500                         continue;
3501                 pci_unmap_single(tp->pdev,
3502                                  pci_unmap_addr(rxp, mapping),
3503                                  RX_PKT_BUF_SZ - tp->rx_offset,
3504                                  PCI_DMA_FROMDEVICE);
3505                 dev_kfree_skb_any(rxp->skb);
3506                 rxp->skb = NULL;
3507         }
3508
3509         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3510                 rxp = &tp->rx_jumbo_buffers[i];
3511
3512                 if (rxp->skb == NULL)
3513                         continue;
3514                 pci_unmap_single(tp->pdev,
3515                                  pci_unmap_addr(rxp, mapping),
3516                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3517                                  PCI_DMA_FROMDEVICE);
3518                 dev_kfree_skb_any(rxp->skb);
3519                 rxp->skb = NULL;
3520         }
3521
3522         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3523                 struct tx_ring_info *txp;
3524                 struct sk_buff *skb;
3525                 int j;
3526
3527                 txp = &tp->tx_buffers[i];
3528                 skb = txp->skb;
3529
3530                 if (skb == NULL) {
3531                         i++;
3532                         continue;
3533                 }
3534
3535                 pci_unmap_single(tp->pdev,
3536                                  pci_unmap_addr(txp, mapping),
3537                                  skb_headlen(skb),
3538                                  PCI_DMA_TODEVICE);
3539                 txp->skb = NULL;
3540
3541                 i++;
3542
3543                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3544                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3545                         pci_unmap_page(tp->pdev,
3546                                        pci_unmap_addr(txp, mapping),
3547                                        skb_shinfo(skb)->frags[j].size,
3548                                        PCI_DMA_TODEVICE);
3549                         i++;
3550                 }
3551
3552                 dev_kfree_skb_any(skb);
3553         }
3554 }
3555
3556 /* Initialize tx/rx rings for packet processing.
3557  *
3558  * The chip has been shut down and the driver detached from
3559  * the networking, so no interrupts or new tx packets will
3560  * end up in the driver.  tp->{tx,}lock are held and thus
3561  * we may not sleep.
3562  */
3563 static void tg3_init_rings(struct tg3 *tp)
3564 {
3565         u32 i;
3566
3567         /* Free up all the SKBs. */
3568         tg3_free_rings(tp);
3569
3570         /* Zero out all descriptors. */
3571         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3572         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3573         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3574         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3575
3576         /* Initialize invariants of the rings, we only set this
3577          * stuff once.  This works because the card does not
3578          * write into the rx buffer posting rings.
3579          */
3580         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3581                 struct tg3_rx_buffer_desc *rxd;
3582
3583                 rxd = &tp->rx_std[i];
3584                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3585                         << RXD_LEN_SHIFT;
3586                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3587                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3588                                (i << RXD_OPAQUE_INDEX_SHIFT));
3589         }
3590
3591         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3592                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3593                         struct tg3_rx_buffer_desc *rxd;
3594
3595                         rxd = &tp->rx_jumbo[i];
3596                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3597                                 << RXD_LEN_SHIFT;
3598                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3599                                 RXD_FLAG_JUMBO;
3600                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3601                                (i << RXD_OPAQUE_INDEX_SHIFT));
3602                 }
3603         }
3604
3605         /* Now allocate fresh SKBs for each rx ring. */
3606         for (i = 0; i < tp->rx_pending; i++) {
3607                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3608                                      -1, i) < 0)
3609                         break;
3610         }
3611
3612         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3613                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3614                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3615                                              -1, i) < 0)
3616                                 break;
3617                 }
3618         }
3619 }
3620
3621 /*
3622  * Must not be invoked with interrupt sources disabled and
3623  * the hardware shutdown down.
3624  */
3625 static void tg3_free_consistent(struct tg3 *tp)
3626 {
3627         if (tp->rx_std_buffers) {
3628                 kfree(tp->rx_std_buffers);
3629                 tp->rx_std_buffers = NULL;
3630         }
3631         if (tp->rx_std) {
3632                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3633                                     tp->rx_std, tp->rx_std_mapping);
3634                 tp->rx_std = NULL;
3635         }
3636         if (tp->rx_jumbo) {
3637                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3638                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3639                 tp->rx_jumbo = NULL;
3640         }
3641         if (tp->rx_rcb) {
3642                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3643                                     tp->rx_rcb, tp->rx_rcb_mapping);
3644                 tp->rx_rcb = NULL;
3645         }
3646         if (tp->tx_ring) {
3647                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3648                         tp->tx_ring, tp->tx_desc_mapping);
3649                 tp->tx_ring = NULL;
3650         }
3651         if (tp->hw_status) {
3652                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3653                                     tp->hw_status, tp->status_mapping);
3654                 tp->hw_status = NULL;
3655         }
3656         if (tp->hw_stats) {
3657                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3658                                     tp->hw_stats, tp->stats_mapping);
3659                 tp->hw_stats = NULL;
3660         }
3661 }
3662
3663 /*
3664  * Must not be invoked with interrupt sources disabled and
3665  * the hardware shutdown down.  Can sleep.
3666  */
3667 static int tg3_alloc_consistent(struct tg3 *tp)
3668 {
3669         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3670                                       (TG3_RX_RING_SIZE +
3671                                        TG3_RX_JUMBO_RING_SIZE)) +
3672                                      (sizeof(struct tx_ring_info) *
3673                                       TG3_TX_RING_SIZE),
3674                                      GFP_KERNEL);
3675         if (!tp->rx_std_buffers)
3676                 return -ENOMEM;
3677
3678         memset(tp->rx_std_buffers, 0,
3679                (sizeof(struct ring_info) *
3680                 (TG3_RX_RING_SIZE +
3681                  TG3_RX_JUMBO_RING_SIZE)) +
3682                (sizeof(struct tx_ring_info) *
3683                 TG3_TX_RING_SIZE));
3684
3685         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3686         tp->tx_buffers = (struct tx_ring_info *)
3687                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3688
3689         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3690                                           &tp->rx_std_mapping);
3691         if (!tp->rx_std)
3692                 goto err_out;
3693
3694         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3695                                             &tp->rx_jumbo_mapping);
3696
3697         if (!tp->rx_jumbo)
3698                 goto err_out;
3699
3700         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3701                                           &tp->rx_rcb_mapping);
3702         if (!tp->rx_rcb)
3703                 goto err_out;
3704
3705         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3706                                            &tp->tx_desc_mapping);
3707         if (!tp->tx_ring)
3708                 goto err_out;
3709
3710         tp->hw_status = pci_alloc_consistent(tp->pdev,
3711                                              TG3_HW_STATUS_SIZE,
3712                                              &tp->status_mapping);
3713         if (!tp->hw_status)
3714                 goto err_out;
3715
3716         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3717                                             sizeof(struct tg3_hw_stats),
3718                                             &tp->stats_mapping);
3719         if (!tp->hw_stats)
3720                 goto err_out;
3721
3722         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3723         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3724
3725         return 0;
3726
3727 err_out:
3728         tg3_free_consistent(tp);
3729         return -ENOMEM;
3730 }
3731
3732 #define MAX_WAIT_CNT 1000
3733
3734 /* To stop a block, clear the enable bit and poll till it
3735  * clears.  tp->lock is held.
3736  */
3737 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
3738 {
3739         unsigned int i;
3740         u32 val;
3741
3742         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3743                 switch (ofs) {
3744                 case RCVLSC_MODE:
3745                 case DMAC_MODE:
3746                 case MBFREE_MODE:
3747                 case BUFMGR_MODE:
3748                 case MEMARB_MODE:
3749                         /* We can't enable/disable these bits of the
3750                          * 5705/5750, just say success.
3751                          */
3752                         return 0;
3753
3754                 default:
3755                         break;
3756                 };
3757         }
3758
3759         val = tr32(ofs);
3760         val &= ~enable_bit;
3761         tw32_f(ofs, val);
3762
3763         for (i = 0; i < MAX_WAIT_CNT; i++) {
3764                 udelay(100);
3765                 val = tr32(ofs);
3766                 if ((val & enable_bit) == 0)
3767                         break;
3768         }
3769
3770         if (i == MAX_WAIT_CNT && !silent) {
3771                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3772                        "ofs=%lx enable_bit=%x\n",
3773                        ofs, enable_bit);
3774                 return -ENODEV;
3775         }
3776
3777         return 0;
3778 }
3779
3780 /* tp->lock is held. */
3781 static int tg3_abort_hw(struct tg3 *tp, int silent)
3782 {
3783         int i, err;
3784
3785         tg3_disable_ints(tp);
3786
3787         tp->rx_mode &= ~RX_MODE_ENABLE;
3788         tw32_f(MAC_RX_MODE, tp->rx_mode);
3789         udelay(10);
3790
3791         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
3792         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
3793         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
3794         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
3795         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
3796         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
3797
3798         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
3799         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
3800         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
3801         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
3802         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
3803         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
3804         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
3805
3806         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3807         tw32_f(MAC_MODE, tp->mac_mode);
3808         udelay(40);
3809
3810         tp->tx_mode &= ~TX_MODE_ENABLE;
3811         tw32_f(MAC_TX_MODE, tp->tx_mode);
3812
3813         for (i = 0; i < MAX_WAIT_CNT; i++) {
3814                 udelay(100);
3815                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3816                         break;
3817         }
3818         if (i >= MAX_WAIT_CNT) {
3819                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3820                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3821                        tp->dev->name, tr32(MAC_TX_MODE));
3822                 err |= -ENODEV;
3823         }
3824
3825         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
3826         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
3827         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
3828
3829         tw32(FTQ_RESET, 0xffffffff);
3830         tw32(FTQ_RESET, 0x00000000);
3831
3832         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
3833         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
3834
3835         if (tp->hw_status)
3836                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3837         if (tp->hw_stats)
3838                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3839
3840         return err;
3841 }
3842
3843 /* tp->lock is held. */
3844 static int tg3_nvram_lock(struct tg3 *tp)
3845 {
3846         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3847                 int i;
3848
3849                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3850                 for (i = 0; i < 8000; i++) {
3851                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3852                                 break;
3853                         udelay(20);
3854                 }
3855                 if (i == 8000)
3856                         return -ENODEV;
3857         }
3858         return 0;
3859 }
3860
3861 /* tp->lock is held. */
3862 static void tg3_nvram_unlock(struct tg3 *tp)
3863 {
3864         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3865                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3866 }
3867
3868 /* tp->lock is held. */
3869 static void tg3_enable_nvram_access(struct tg3 *tp)
3870 {
3871         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3872             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3873                 u32 nvaccess = tr32(NVRAM_ACCESS);
3874
3875                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3876         }
3877 }
3878
3879 /* tp->lock is held. */
3880 static void tg3_disable_nvram_access(struct tg3 *tp)
3881 {
3882         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
3883             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
3884                 u32 nvaccess = tr32(NVRAM_ACCESS);
3885
3886                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3887         }
3888 }
3889
3890 /* tp->lock is held. */
3891 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3892 {
3893         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3894                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3895                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3896
3897         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3898                 switch (kind) {
3899                 case RESET_KIND_INIT:
3900                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3901                                       DRV_STATE_START);
3902                         break;
3903
3904                 case RESET_KIND_SHUTDOWN:
3905                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3906                                       DRV_STATE_UNLOAD);
3907                         break;
3908
3909                 case RESET_KIND_SUSPEND:
3910                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3911                                       DRV_STATE_SUSPEND);
3912                         break;
3913
3914                 default:
3915                         break;
3916                 };
3917         }
3918 }
3919
3920 /* tp->lock is held. */
3921 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3922 {
3923         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3924                 switch (kind) {
3925                 case RESET_KIND_INIT:
3926                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3927                                       DRV_STATE_START_DONE);
3928                         break;
3929
3930                 case RESET_KIND_SHUTDOWN:
3931                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3932                                       DRV_STATE_UNLOAD_DONE);
3933                         break;
3934
3935                 default:
3936                         break;
3937                 };
3938         }
3939 }
3940
3941 /* tp->lock is held. */
3942 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3943 {
3944         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3945                 switch (kind) {
3946                 case RESET_KIND_INIT:
3947                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3948                                       DRV_STATE_START);
3949                         break;
3950
3951                 case RESET_KIND_SHUTDOWN:
3952                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3953                                       DRV_STATE_UNLOAD);
3954                         break;
3955
3956                 case RESET_KIND_SUSPEND:
3957                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3958                                       DRV_STATE_SUSPEND);
3959                         break;
3960
3961                 default:
3962                         break;
3963                 };
3964         }
3965 }
3966
3967 static void tg3_stop_fw(struct tg3 *);
3968
3969 /* tp->lock is held. */
3970 static int tg3_chip_reset(struct tg3 *tp)
3971 {
3972         u32 val;
3973         u32 flags_save;
3974         int i;
3975
3976         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3977                 tg3_nvram_lock(tp);
3978
3979         /*
3980          * We must avoid the readl() that normally takes place.
3981          * It locks machines, causes machine checks, and other
3982          * fun things.  So, temporarily disable the 5701
3983          * hardware workaround, while we do the reset.
3984          */
3985         flags_save = tp->tg3_flags;
3986         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3987
3988         /* do the reset */
3989         val = GRC_MISC_CFG_CORECLK_RESET;
3990
3991         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3992                 if (tr32(0x7e2c) == 0x60) {
3993                         tw32(0x7e2c, 0x20);
3994                 }
3995                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3996                         tw32(GRC_MISC_CFG, (1 << 29));
3997                         val |= (1 << 29);
3998                 }
3999         }
4000
4001         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4002                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4003         tw32(GRC_MISC_CFG, val);
4004
4005         /* restore 5701 hardware bug workaround flag */
4006         tp->tg3_flags = flags_save;
4007
4008         /* Unfortunately, we have to delay before the PCI read back.
4009          * Some 575X chips even will not respond to a PCI cfg access
4010          * when the reset command is given to the chip.
4011          *
4012          * How do these hardware designers expect things to work
4013          * properly if the PCI write is posted for a long period
4014          * of time?  It is always necessary to have some method by
4015          * which a register read back can occur to push the write
4016          * out which does the reset.
4017          *
4018          * For most tg3 variants the trick below was working.
4019          * Ho hum...
4020          */
4021         udelay(120);
4022
4023         /* Flush PCI posted writes.  The normal MMIO registers
4024          * are inaccessible at this time so this is the only
4025          * way to make this reliably (actually, this is no longer
4026          * the case, see above).  I tried to use indirect
4027          * register read/write but this upset some 5701 variants.
4028          */
4029         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4030
4031         udelay(120);
4032
4033         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4034                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4035                         int i;
4036                         u32 cfg_val;
4037
4038                         /* Wait for link training to complete.  */
4039                         for (i = 0; i < 5000; i++)
4040                                 udelay(100);
4041
4042                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4043                         pci_write_config_dword(tp->pdev, 0xc4,
4044                                                cfg_val | (1 << 15));
4045                 }
4046                 /* Set PCIE max payload size and clear error status.  */
4047                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4048         }
4049
4050         /* Re-enable indirect register accesses. */
4051         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4052                                tp->misc_host_ctrl);
4053
4054         /* Set MAX PCI retry to zero. */
4055         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4056         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4057             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4058                 val |= PCISTATE_RETRY_SAME_DMA;
4059         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4060
4061         pci_restore_state(tp->pdev);
4062
4063         /* Make sure PCI-X relaxed ordering bit is clear. */
4064         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4065         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4066         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4067
4068         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
4069                 u32 val;
4070
4071                 /* Chip reset on 5780 will reset MSI enable bit,
4072                  * so need to restore it.
4073                  */
4074                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4075                         u16 ctrl;
4076
4077                         pci_read_config_word(tp->pdev,
4078                                              tp->msi_cap + PCI_MSI_FLAGS,
4079                                              &ctrl);
4080                         pci_write_config_word(tp->pdev,
4081                                               tp->msi_cap + PCI_MSI_FLAGS,
4082                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4083                         val = tr32(MSGINT_MODE);
4084                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4085                 }
4086
4087                 val = tr32(MEMARB_MODE);
4088                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4089
4090         } else
4091                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4092
4093         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4094                 tg3_stop_fw(tp);
4095                 tw32(0x5000, 0x400);
4096         }
4097
4098         tw32(GRC_MODE, tp->grc_mode);
4099
4100         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4101                 u32 val = tr32(0xc4);
4102
4103                 tw32(0xc4, val | (1 << 15));
4104         }
4105
4106         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4107             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4108                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4109                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4110                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4111                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4112         }
4113
4114         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4115                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4116                 tw32_f(MAC_MODE, tp->mac_mode);
4117         } else
4118                 tw32_f(MAC_MODE, 0);
4119         udelay(40);
4120
4121         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4122                 /* Wait for firmware initialization to complete. */
4123                 for (i = 0; i < 100000; i++) {
4124                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4125                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4126                                 break;
4127                         udelay(10);
4128                 }
4129                 if (i >= 100000) {
4130                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4131                                "firmware will not restart magic=%08x\n",
4132                                tp->dev->name, val);
4133                         return -ENODEV;
4134                 }
4135         }
4136
4137         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4138             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4139                 u32 val = tr32(0x7c00);
4140
4141                 tw32(0x7c00, val | (1 << 25));
4142         }
4143
4144         /* Reprobe ASF enable state.  */
4145         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4146         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4147         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4148         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4149                 u32 nic_cfg;
4150
4151                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4152                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4153                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4154                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4155                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4156                 }
4157         }
4158
4159         return 0;
4160 }
4161
4162 /* tp->lock is held. */
4163 static void tg3_stop_fw(struct tg3 *tp)
4164 {
4165         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4166                 u32 val;
4167                 int i;
4168
4169                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4170                 val = tr32(GRC_RX_CPU_EVENT);
4171                 val |= (1 << 14);
4172                 tw32(GRC_RX_CPU_EVENT, val);
4173
4174                 /* Wait for RX cpu to ACK the event.  */
4175                 for (i = 0; i < 100; i++) {
4176                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4177                                 break;
4178                         udelay(1);
4179                 }
4180         }
4181 }
4182
4183 /* tp->lock is held. */
4184 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4185 {
4186         int err;
4187
4188         tg3_stop_fw(tp);
4189
4190         tg3_write_sig_pre_reset(tp, kind);
4191
4192         tg3_abort_hw(tp, silent);
4193         err = tg3_chip_reset(tp);
4194
4195         tg3_write_sig_legacy(tp, kind);
4196         tg3_write_sig_post_reset(tp, kind);
4197
4198         if (err)
4199                 return err;
4200
4201         return 0;
4202 }
4203
4204 #define TG3_FW_RELEASE_MAJOR    0x0
4205 #define TG3_FW_RELASE_MINOR     0x0
4206 #define TG3_FW_RELEASE_FIX      0x0
4207 #define TG3_FW_START_ADDR       0x08000000
4208 #define TG3_FW_TEXT_ADDR        0x08000000
4209 #define TG3_FW_TEXT_LEN         0x9c0
4210 #define TG3_FW_RODATA_ADDR      0x080009c0
4211 #define TG3_FW_RODATA_LEN       0x60
4212 #define TG3_FW_DATA_ADDR        0x08000a40
4213 #define TG3_FW_DATA_LEN         0x20
4214 #define TG3_FW_SBSS_ADDR        0x08000a60
4215 #define TG3_FW_SBSS_LEN         0xc
4216 #define TG3_FW_BSS_ADDR         0x08000a70
4217 #define TG3_FW_BSS_LEN          0x10
4218
4219 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4220         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4221         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4222         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4223         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4224         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4225         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4226         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4227         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4228         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4229         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4230         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4231         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4232         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4233         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4234         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4235         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4236         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4237         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4238         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4239         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4240         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4241         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4242         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4243         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4244         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4245         0, 0, 0, 0, 0, 0,
4246         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4247         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4248         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4249         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4250         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4251         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4252         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4253         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4254         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4255         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4256         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4257         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4258         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4259         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4260         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4261         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4262         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4263         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4264         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4265         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4266         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4267         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4268         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4269         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4270         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4271         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4272         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4273         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4274         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4275         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4276         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4277         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4278         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4279         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4280         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4281         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4282         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4283         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4284         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4285         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4286         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4287         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4288         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4289         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4290         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4291         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4292         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4293         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4294         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4295         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4296         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4297         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4298         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4299         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4300         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4301         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4302         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4303         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4304         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4305         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4306         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4307         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4308         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4309         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4310         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4311 };
4312
4313 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4314         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4315         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4316         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4317         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4318         0x00000000
4319 };
4320
4321 #if 0 /* All zeros, don't eat up space with it. */
4322 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4323         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4324         0x00000000, 0x00000000, 0x00000000, 0x00000000
4325 };
4326 #endif
4327
4328 #define RX_CPU_SCRATCH_BASE     0x30000
4329 #define RX_CPU_SCRATCH_SIZE     0x04000
4330 #define TX_CPU_SCRATCH_BASE     0x34000
4331 #define TX_CPU_SCRATCH_SIZE     0x04000
4332
4333 /* tp->lock is held. */
4334 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4335 {
4336         int i;
4337
4338         if (offset == TX_CPU_BASE &&
4339             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4340                 BUG();
4341
4342         if (offset == RX_CPU_BASE) {
4343                 for (i = 0; i < 10000; i++) {
4344                         tw32(offset + CPU_STATE, 0xffffffff);
4345                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4346                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4347                                 break;
4348                 }
4349
4350                 tw32(offset + CPU_STATE, 0xffffffff);
4351                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4352                 udelay(10);
4353         } else {
4354                 for (i = 0; i < 10000; i++) {
4355                         tw32(offset + CPU_STATE, 0xffffffff);
4356                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4357                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4358                                 break;
4359                 }
4360         }
4361
4362         if (i >= 10000) {
4363                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4364                        "and %s CPU\n",
4365                        tp->dev->name,
4366                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4367                 return -ENODEV;
4368         }
4369         return 0;
4370 }
4371
4372 struct fw_info {
4373         unsigned int text_base;
4374         unsigned int text_len;
4375         u32 *text_data;
4376         unsigned int rodata_base;
4377         unsigned int rodata_len;
4378         u32 *rodata_data;
4379         unsigned int data_base;
4380         unsigned int data_len;
4381         u32 *data_data;
4382 };
4383
4384 /* tp->lock is held. */
4385 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4386                                  int cpu_scratch_size, struct fw_info *info)
4387 {
4388         int err, i;
4389         u32 orig_tg3_flags = tp->tg3_flags;
4390         void (*write_op)(struct tg3 *, u32, u32);
4391
4392         if (cpu_base == TX_CPU_BASE &&
4393             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4394                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4395                        "TX cpu firmware on %s which is 5705.\n",
4396                        tp->dev->name);
4397                 return -EINVAL;
4398         }
4399
4400         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4401                 write_op = tg3_write_mem;
4402         else
4403                 write_op = tg3_write_indirect_reg32;
4404
4405         /* Force use of PCI config space for indirect register
4406          * write calls.
4407          */
4408         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4409
4410         /* It is possible that bootcode is still loading at this point.
4411          * Get the nvram lock first before halting the cpu.
4412          */
4413         tg3_nvram_lock(tp);
4414         err = tg3_halt_cpu(tp, cpu_base);
4415         tg3_nvram_unlock(tp);
4416         if (err)
4417                 goto out;
4418
4419         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4420                 write_op(tp, cpu_scratch_base + i, 0);
4421         tw32(cpu_base + CPU_STATE, 0xffffffff);
4422         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4423         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4424                 write_op(tp, (cpu_scratch_base +
4425                               (info->text_base & 0xffff) +
4426                               (i * sizeof(u32))),
4427                          (info->text_data ?
4428                           info->text_data[i] : 0));
4429         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4430                 write_op(tp, (cpu_scratch_base +
4431                               (info->rodata_base & 0xffff) +
4432                               (i * sizeof(u32))),
4433                          (info->rodata_data ?
4434                           info->rodata_data[i] : 0));
4435         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4436                 write_op(tp, (cpu_scratch_base +
4437                               (info->data_base & 0xffff) +
4438                               (i * sizeof(u32))),
4439                          (info->data_data ?
4440                           info->data_data[i] : 0));
4441
4442         err = 0;
4443
4444 out:
4445         tp->tg3_flags = orig_tg3_flags;
4446         return err;
4447 }
4448
4449 /* tp->lock is held. */
4450 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4451 {
4452         struct fw_info info;
4453         int err, i;
4454
4455         info.text_base = TG3_FW_TEXT_ADDR;
4456         info.text_len = TG3_FW_TEXT_LEN;
4457         info.text_data = &tg3FwText[0];
4458         info.rodata_base = TG3_FW_RODATA_ADDR;
4459         info.rodata_len = TG3_FW_RODATA_LEN;
4460         info.rodata_data = &tg3FwRodata[0];
4461         info.data_base = TG3_FW_DATA_ADDR;
4462         info.data_len = TG3_FW_DATA_LEN;
4463         info.data_data = NULL;
4464
4465         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4466                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4467                                     &info);
4468         if (err)
4469                 return err;
4470
4471         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4472                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4473                                     &info);
4474         if (err)
4475                 return err;
4476
4477         /* Now startup only the RX cpu. */
4478         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4479         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4480
4481         for (i = 0; i < 5; i++) {
4482                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4483                         break;
4484                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4485                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4486                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4487                 udelay(1000);
4488         }
4489         if (i >= 5) {
4490                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4491                        "to set RX CPU PC, is %08x should be %08x\n",
4492                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4493                        TG3_FW_TEXT_ADDR);
4494                 return -ENODEV;
4495         }
4496         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4497         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4498
4499         return 0;
4500 }
4501
4502 #if TG3_TSO_SUPPORT != 0
4503
4504 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4505 #define TG3_TSO_FW_RELASE_MINOR         0x6
4506 #define TG3_TSO_FW_RELEASE_FIX          0x0
4507 #define TG3_TSO_FW_START_ADDR           0x08000000
4508 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4509 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4510 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4511 #define TG3_TSO_FW_RODATA_LEN           0x60
4512 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4513 #define TG3_TSO_FW_DATA_LEN             0x30
4514 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4515 #define TG3_TSO_FW_SBSS_LEN             0x2c
4516 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4517 #define TG3_TSO_FW_BSS_LEN              0x894
4518
4519 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4520         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4521         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4522         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4523         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4524         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4525         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4526         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4527         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4528         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4529         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4530         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4531         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4532         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4533         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4534         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4535         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4536         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4537         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4538         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4539         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4540         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4541         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4542         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4543         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4544         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4545         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4546         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4547         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4548         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4549         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4550         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4551         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4552         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4553         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4554         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4555         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4556         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4557         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4558         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4559         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4560         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4561         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4562         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4563         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4564         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4565         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4566         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4567         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4568         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4569         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4570         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4571         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4572         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4573         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4574         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4575         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4576         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4577         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4578         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4579         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4580         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4581         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4582         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4583         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4584         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4585         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4586         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4587         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4588         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4589         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4590         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4591         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4592         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4593         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4594         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4595         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4596         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4597         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4598         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4599         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4600         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4601         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4602         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4603         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4604         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4605         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4606         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4607         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4608         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4609         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4610         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4611         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4612         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4613         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4614         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4615         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4616         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4617         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4618         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4619         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4620         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4621         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4622         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4623         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4624         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4625         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4626         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4627         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4628         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4629         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4630         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4631         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4632         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4633         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4634         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4635         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4636         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4637         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4638         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4639         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4640         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4641         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4642         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4643         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4644         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4645         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4646         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4647         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4648         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4649         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4650         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4651         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4652         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4653         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4654         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4655         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4656         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4657         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4658         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4659         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4660         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4661         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4662         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4663         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4664         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4665         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4666         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4667         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4668         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4669         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4670         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4671         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4672         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4673         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4674         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4675         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4676         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4677         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4678         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4679         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4680         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4681         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4682         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4683         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4684         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4685         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4686         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4687         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4688         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4689         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4690         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4691         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4692         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4693         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4694         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4695         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4696         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4697         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4698         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4699         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4700         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4701         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4702         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4703         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4704         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4705         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4706         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4707         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4708         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4709         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4710         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4711         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4712         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4713         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4714         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4715         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4716         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4717         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4718         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4719         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4720         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4721         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4722         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4723         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4724         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4725         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4726         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4727         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4728         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4729         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4730         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4731         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4732         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4733         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4734         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4735         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4736         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4737         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4738         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4739         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4740         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4741         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4742         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4743         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4744         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4745         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4746         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4747         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4748         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4749         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4750         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4751         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4752         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4753         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4754         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4755         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4756         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4757         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4758         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4759         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4760         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4761         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4762         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4763         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4764         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4765         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4766         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4767         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4768         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4769         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4770         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4771         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4772         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4773         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4774         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4775         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4776         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4777         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4778         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4779         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4780         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4781         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4782         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4783         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4784         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4785         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4786         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4787         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4788         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4789         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4790         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4791         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4792         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4793         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4794         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4795         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4796         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4797         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4798         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4799         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4800         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4801         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4802         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4803         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4804 };
4805
4806 static u32 tg3TsoFwRodata[] = {
4807         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4808         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4809         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4810         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4811         0x00000000,
4812 };
4813
4814 static u32 tg3TsoFwData[] = {
4815         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4816         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4817         0x00000000,
4818 };
4819
4820 /* 5705 needs a special version of the TSO firmware.  */
4821 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4822 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4823 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4824 #define TG3_TSO5_FW_START_ADDR          0x00010000
4825 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4826 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4827 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4828 #define TG3_TSO5_FW_RODATA_LEN          0x50
4829 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4830 #define TG3_TSO5_FW_DATA_LEN            0x20
4831 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4832 #define TG3_TSO5_FW_SBSS_LEN            0x28
4833 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4834 #define TG3_TSO5_FW_BSS_LEN             0x88
4835
4836 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4837         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4838         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4839         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4840         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4841         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4842         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4843         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4844         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4845         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4846         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4847         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4848         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4849         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4850         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4851         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4852         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4853         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4854         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4855         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4856         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4857         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4858         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4859         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4860         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4861         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4862         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4863         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4864         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4865         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4866         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4867         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4868         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4869         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4870         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4871         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4872         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4873         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4874         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4875         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4876         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4877         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4878         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4879         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4880         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4881         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4882         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4883         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4884         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4885         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4886         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4887         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4888         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4889         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4890         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4891         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4892         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4893         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4894         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4895         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4896         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4897         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4898         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4899         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4900         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4901         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4902         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4903         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4904         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4905         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4906         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4907         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4908         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4909         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4910         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4911         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4912         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4913         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4914         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4915         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4916         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4917         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4918         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4919         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4920         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4921         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4922         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4923         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4924         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4925         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4926         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4927         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4928         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4929         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4930         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4931         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4932         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4933         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4934         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4935         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4936         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4937         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4938         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4939         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4940         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4941         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4942         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4943         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4944         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4945         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4946         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4947         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4948         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4949         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4950         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4951         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4952         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4953         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4954         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4955         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4956         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4957         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4958         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4959         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4960         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4961         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4962         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4963         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4964         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4965         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4966         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4967         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4968         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4969         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4970         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4971         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4972         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4973         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4974         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4975         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4976         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4977         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4978         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4979         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4980         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4981         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4982         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4983         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4984         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4985         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4986         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4987         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4988         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4989         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4990         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4991         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4992         0x00000000, 0x00000000, 0x00000000,
4993 };
4994
4995 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4996         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4997         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4998         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4999         0x00000000, 0x00000000, 0x00000000,
5000 };
5001
5002 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5003         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5004         0x00000000, 0x00000000, 0x00000000,
5005 };
5006
5007 /* tp->lock is held. */
5008 static int tg3_load_tso_firmware(struct tg3 *tp)
5009 {
5010         struct fw_info info;
5011         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5012         int err, i;
5013
5014         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5015                 return 0;
5016
5017         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5018                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5019                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5020                 info.text_data = &tg3Tso5FwText[0];
5021                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5022                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5023                 info.rodata_data = &tg3Tso5FwRodata[0];
5024                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5025                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5026                 info.data_data = &tg3Tso5FwData[0];
5027                 cpu_base = RX_CPU_BASE;
5028                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5029                 cpu_scratch_size = (info.text_len +
5030                                     info.rodata_len +
5031                                     info.data_len +
5032                                     TG3_TSO5_FW_SBSS_LEN +
5033                                     TG3_TSO5_FW_BSS_LEN);
5034         } else {
5035                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5036                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5037                 info.text_data = &tg3TsoFwText[0];
5038                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5039                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5040                 info.rodata_data = &tg3TsoFwRodata[0];
5041                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5042                 info.data_len = TG3_TSO_FW_DATA_LEN;
5043                 info.data_data = &tg3TsoFwData[0];
5044                 cpu_base = TX_CPU_BASE;
5045                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5046                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5047         }
5048
5049         err = tg3_load_firmware_cpu(tp, cpu_base,
5050                                     cpu_scratch_base, cpu_scratch_size,
5051                                     &info);
5052         if (err)
5053                 return err;
5054
5055         /* Now startup the cpu. */
5056         tw32(cpu_base + CPU_STATE, 0xffffffff);
5057         tw32_f(cpu_base + CPU_PC,    info.text_base);
5058
5059         for (i = 0; i < 5; i++) {
5060                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5061                         break;
5062                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5063                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5064                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5065                 udelay(1000);
5066         }
5067         if (i >= 5) {
5068                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5069                        "to set CPU PC, is %08x should be %08x\n",
5070                        tp->dev->name, tr32(cpu_base + CPU_PC),
5071                        info.text_base);
5072                 return -ENODEV;
5073         }
5074         tw32(cpu_base + CPU_STATE, 0xffffffff);
5075         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5076         return 0;
5077 }
5078
5079 #endif /* TG3_TSO_SUPPORT != 0 */
5080
5081 /* tp->lock is held. */
5082 static void __tg3_set_mac_addr(struct tg3 *tp)
5083 {
5084         u32 addr_high, addr_low;
5085         int i;
5086
5087         addr_high = ((tp->dev->dev_addr[0] << 8) |
5088                      tp->dev->dev_addr[1]);
5089         addr_low = ((tp->dev->dev_addr[2] << 24) |
5090                     (tp->dev->dev_addr[3] << 16) |
5091                     (tp->dev->dev_addr[4] <<  8) |
5092                     (tp->dev->dev_addr[5] <<  0));
5093         for (i = 0; i < 4; i++) {
5094                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5095                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5096         }
5097
5098         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5099             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5100                 for (i = 0; i < 12; i++) {
5101                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5102                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5103                 }
5104         }
5105
5106         addr_high = (tp->dev->dev_addr[0] +
5107                      tp->dev->dev_addr[1] +
5108                      tp->dev->dev_addr[2] +
5109                      tp->dev->dev_addr[3] +
5110                      tp->dev->dev_addr[4] +
5111                      tp->dev->dev_addr[5]) &
5112                 TX_BACKOFF_SEED_MASK;
5113         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5114 }
5115
5116 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5117 {
5118         struct tg3 *tp = netdev_priv(dev);
5119         struct sockaddr *addr = p;
5120
5121         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5122
5123         spin_lock_bh(&tp->lock);
5124         __tg3_set_mac_addr(tp);
5125         spin_unlock_bh(&tp->lock);
5126
5127         return 0;
5128 }
5129
5130 /* tp->lock is held. */
5131 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5132                            dma_addr_t mapping, u32 maxlen_flags,
5133                            u32 nic_addr)
5134 {
5135         tg3_write_mem(tp,
5136                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5137                       ((u64) mapping >> 32));
5138         tg3_write_mem(tp,
5139                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5140                       ((u64) mapping & 0xffffffff));
5141         tg3_write_mem(tp,
5142                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5143                        maxlen_flags);
5144
5145         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5146                 tg3_write_mem(tp,
5147                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5148                               nic_addr);
5149 }
5150
5151 static void __tg3_set_rx_mode(struct net_device *);
5152 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5153 {
5154         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5155         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5156         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5157         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5158         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5159                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5160                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5161         }
5162         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5163         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5164         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5165                 u32 val = ec->stats_block_coalesce_usecs;
5166
5167                 if (!netif_carrier_ok(tp->dev))
5168                         val = 0;
5169
5170                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5171         }
5172 }
5173
5174 /* tp->lock is held. */
5175 static int tg3_reset_hw(struct tg3 *tp)
5176 {
5177         u32 val, rdmac_mode;
5178         int i, err, limit;
5179
5180         tg3_disable_ints(tp);
5181
5182         tg3_stop_fw(tp);
5183
5184         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5185
5186         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5187                 tg3_abort_hw(tp, 1);
5188         }
5189
5190         err = tg3_chip_reset(tp);
5191         if (err)
5192                 return err;
5193
5194         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5195
5196         /* This works around an issue with Athlon chipsets on
5197          * B3 tigon3 silicon.  This bit has no effect on any
5198          * other revision.  But do not set this on PCI Express
5199          * chips.
5200          */
5201         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5202                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5203         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5204
5205         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5206             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5207                 val = tr32(TG3PCI_PCISTATE);
5208                 val |= PCISTATE_RETRY_SAME_DMA;
5209                 tw32(TG3PCI_PCISTATE, val);
5210         }
5211
5212         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5213                 /* Enable some hw fixes.  */
5214                 val = tr32(TG3PCI_MSI_DATA);
5215                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5216                 tw32(TG3PCI_MSI_DATA, val);
5217         }
5218
5219         /* Descriptor ring init may make accesses to the
5220          * NIC SRAM area to setup the TX descriptors, so we
5221          * can only do this after the hardware has been
5222          * successfully reset.
5223          */
5224         tg3_init_rings(tp);
5225
5226         /* This value is determined during the probe time DMA
5227          * engine test, tg3_test_dma.
5228          */
5229         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5230
5231         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5232                           GRC_MODE_4X_NIC_SEND_RINGS |
5233                           GRC_MODE_NO_TX_PHDR_CSUM |
5234                           GRC_MODE_NO_RX_PHDR_CSUM);
5235         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5236         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5237                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5238         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5239                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5240
5241         tw32(GRC_MODE,
5242              tp->grc_mode |
5243              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5244
5245         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5246         val = tr32(GRC_MISC_CFG);
5247         val &= ~0xff;
5248         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5249         tw32(GRC_MISC_CFG, val);
5250
5251         /* Initialize MBUF/DESC pool. */
5252         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5253                 /* Do nothing.  */
5254         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5255                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5256                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5257                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5258                 else
5259                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5260                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5261                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5262         }
5263 #if TG3_TSO_SUPPORT != 0
5264         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5265                 int fw_len;
5266
5267                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5268                           TG3_TSO5_FW_RODATA_LEN +
5269                           TG3_TSO5_FW_DATA_LEN +
5270                           TG3_TSO5_FW_SBSS_LEN +
5271                           TG3_TSO5_FW_BSS_LEN);
5272                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5273                 tw32(BUFMGR_MB_POOL_ADDR,
5274                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5275                 tw32(BUFMGR_MB_POOL_SIZE,
5276                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5277         }
5278 #endif
5279
5280         if (tp->dev->mtu <= ETH_DATA_LEN) {
5281                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5282                      tp->bufmgr_config.mbuf_read_dma_low_water);
5283                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5284                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5285                 tw32(BUFMGR_MB_HIGH_WATER,
5286                      tp->bufmgr_config.mbuf_high_water);
5287         } else {
5288                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5289                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5290                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5291                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5292                 tw32(BUFMGR_MB_HIGH_WATER,
5293                      tp->bufmgr_config.mbuf_high_water_jumbo);
5294         }
5295         tw32(BUFMGR_DMA_LOW_WATER,
5296              tp->bufmgr_config.dma_low_water);
5297         tw32(BUFMGR_DMA_HIGH_WATER,
5298              tp->bufmgr_config.dma_high_water);
5299
5300         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5301         for (i = 0; i < 2000; i++) {
5302                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5303                         break;
5304                 udelay(10);
5305         }
5306         if (i >= 2000) {
5307                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5308                        tp->dev->name);
5309                 return -ENODEV;
5310         }
5311
5312         /* Setup replenish threshold. */
5313         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5314
5315         /* Initialize TG3_BDINFO's at:
5316          *  RCVDBDI_STD_BD:     standard eth size rx ring
5317          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5318          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5319          *
5320          * like so:
5321          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5322          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5323          *                              ring attribute flags
5324          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5325          *
5326          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5327          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5328          *
5329          * The size of each ring is fixed in the firmware, but the location is
5330          * configurable.
5331          */
5332         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5333              ((u64) tp->rx_std_mapping >> 32));
5334         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5335              ((u64) tp->rx_std_mapping & 0xffffffff));
5336         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5337              NIC_SRAM_RX_BUFFER_DESC);
5338
5339         /* Don't even try to program the JUMBO/MINI buffer descriptor
5340          * configs on 5705.
5341          */
5342         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5343                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5344                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5345         } else {
5346                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5347                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5348
5349                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5350                      BDINFO_FLAGS_DISABLED);
5351
5352                 /* Setup replenish threshold. */
5353                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5354
5355                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5356                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5357                              ((u64) tp->rx_jumbo_mapping >> 32));
5358                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5359                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5360                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5361                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5362                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5363                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5364                 } else {
5365                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5366                              BDINFO_FLAGS_DISABLED);
5367                 }
5368
5369         }
5370
5371         /* There is only one send ring on 5705/5750, no need to explicitly
5372          * disable the others.
5373          */
5374         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5375                 /* Clear out send RCB ring in SRAM. */
5376                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5377                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5378                                       BDINFO_FLAGS_DISABLED);
5379         }
5380
5381         tp->tx_prod = 0;
5382         tp->tx_cons = 0;
5383         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5384         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5385
5386         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5387                        tp->tx_desc_mapping,
5388                        (TG3_TX_RING_SIZE <<
5389                         BDINFO_FLAGS_MAXLEN_SHIFT),
5390                        NIC_SRAM_TX_BUFFER_DESC);
5391
5392         /* There is only one receive return ring on 5705/5750, no need
5393          * to explicitly disable the others.
5394          */
5395         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5396                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5397                      i += TG3_BDINFO_SIZE) {
5398                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5399                                       BDINFO_FLAGS_DISABLED);
5400                 }
5401         }
5402
5403         tp->rx_rcb_ptr = 0;
5404         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5405
5406         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5407                        tp->rx_rcb_mapping,
5408                        (TG3_RX_RCB_RING_SIZE(tp) <<
5409                         BDINFO_FLAGS_MAXLEN_SHIFT),
5410                        0);
5411
5412         tp->rx_std_ptr = tp->rx_pending;
5413         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5414                      tp->rx_std_ptr);
5415
5416         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5417                                                 tp->rx_jumbo_pending : 0;
5418         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5419                      tp->rx_jumbo_ptr);
5420
5421         /* Initialize MAC address and backoff seed. */
5422         __tg3_set_mac_addr(tp);
5423
5424         /* MTU + ethernet header + FCS + optional VLAN tag */
5425         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5426
5427         /* The slot time is changed by tg3_setup_phy if we
5428          * run at gigabit with half duplex.
5429          */
5430         tw32(MAC_TX_LENGTHS,
5431              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5432              (6 << TX_LENGTHS_IPG_SHIFT) |
5433              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5434
5435         /* Receive rules. */
5436         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5437         tw32(RCVLPC_CONFIG, 0x0181);
5438
5439         /* Calculate RDMAC_MODE setting early, we need it to determine
5440          * the RCVLPC_STATE_ENABLE mask.
5441          */
5442         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5443                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5444                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5445                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5446                       RDMAC_MODE_LNGREAD_ENAB);
5447         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5448                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5449
5450         /* If statement applies to 5705 and 5750 PCI devices only */
5451         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5452              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5453             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5454                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5455                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5456                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5457                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5458                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5459                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5460                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5461                 }
5462         }
5463
5464         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5465                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5466
5467 #if TG3_TSO_SUPPORT != 0
5468         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5469                 rdmac_mode |= (1 << 27);
5470 #endif
5471
5472         /* Receive/send statistics. */
5473         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5474             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5475                 val = tr32(RCVLPC_STATS_ENABLE);
5476                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5477                 tw32(RCVLPC_STATS_ENABLE, val);
5478         } else {
5479                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5480         }
5481         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5482         tw32(SNDDATAI_STATSENAB, 0xffffff);
5483         tw32(SNDDATAI_STATSCTRL,
5484              (SNDDATAI_SCTRL_ENABLE |
5485               SNDDATAI_SCTRL_FASTUPD));
5486
5487         /* Setup host coalescing engine. */
5488         tw32(HOSTCC_MODE, 0);
5489         for (i = 0; i < 2000; i++) {
5490                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5491                         break;
5492                 udelay(10);
5493         }
5494
5495         __tg3_set_coalesce(tp, &tp->coal);
5496
5497         /* set status block DMA address */
5498         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5499              ((u64) tp->status_mapping >> 32));
5500         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5501              ((u64) tp->status_mapping & 0xffffffff));
5502
5503         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5504                 /* Status/statistics block address.  See tg3_timer,
5505                  * the tg3_periodic_fetch_stats call there, and
5506                  * tg3_get_stats to see how this works for 5705/5750 chips.
5507                  */
5508                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5509                      ((u64) tp->stats_mapping >> 32));
5510                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5511                      ((u64) tp->stats_mapping & 0xffffffff));
5512                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5513                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5514         }
5515
5516         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5517
5518         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5519         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5520         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5521                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5522
5523         /* Clear statistics/status block in chip, and status block in ram. */
5524         for (i = NIC_SRAM_STATS_BLK;
5525              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5526              i += sizeof(u32)) {
5527                 tg3_write_mem(tp, i, 0);
5528                 udelay(40);
5529         }
5530         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5531
5532         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5533                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5534         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5535         udelay(40);
5536
5537         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5538          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5539          * register to preserve the GPIO settings for LOMs. The GPIOs,
5540          * whether used as inputs or outputs, are set by boot code after
5541          * reset.
5542          */
5543         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5544                 u32 gpio_mask;
5545
5546                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5547                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5548
5549                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5550                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5551                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5552
5553                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5554
5555                 /* GPIO1 must be driven high for eeprom write protect */
5556                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5557                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5558         }
5559         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5560         udelay(100);
5561
5562         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5563         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5564         tp->last_tag = 0;
5565
5566         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5567                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5568                 udelay(40);
5569         }
5570
5571         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5572                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5573                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5574                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5575                WDMAC_MODE_LNGREAD_ENAB);
5576
5577         /* If statement applies to 5705 and 5750 PCI devices only */
5578         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5579              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5580             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5581                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5582                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5583                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5584                         /* nothing */
5585                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5586                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5587                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5588                         val |= WDMAC_MODE_RX_ACCEL;
5589                 }
5590         }
5591
5592         tw32_f(WDMAC_MODE, val);
5593         udelay(40);
5594
5595         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5596                 val = tr32(TG3PCI_X_CAPS);
5597                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5598                         val &= ~PCIX_CAPS_BURST_MASK;
5599                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5600                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5601                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5602                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5603                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5604                                 val |= (tp->split_mode_max_reqs <<
5605                                         PCIX_CAPS_SPLIT_SHIFT);
5606                 }
5607                 tw32(TG3PCI_X_CAPS, val);
5608         }
5609
5610         tw32_f(RDMAC_MODE, rdmac_mode);
5611         udelay(40);
5612
5613         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5614         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5615                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5616         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5617         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5618         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5619         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5620         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5621 #if TG3_TSO_SUPPORT != 0
5622         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5623                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5624 #endif
5625         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5626         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5627
5628         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5629                 err = tg3_load_5701_a0_firmware_fix(tp);
5630                 if (err)
5631                         return err;
5632         }
5633
5634 #if TG3_TSO_SUPPORT != 0
5635         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5636                 err = tg3_load_tso_firmware(tp);
5637                 if (err)
5638                         return err;
5639         }
5640 #endif
5641
5642         tp->tx_mode = TX_MODE_ENABLE;
5643         tw32_f(MAC_TX_MODE, tp->tx_mode);
5644         udelay(100);
5645
5646         tp->rx_mode = RX_MODE_ENABLE;
5647         tw32_f(MAC_RX_MODE, tp->rx_mode);
5648         udelay(10);
5649
5650         if (tp->link_config.phy_is_low_power) {
5651                 tp->link_config.phy_is_low_power = 0;
5652                 tp->link_config.speed = tp->link_config.orig_speed;
5653                 tp->link_config.duplex = tp->link_config.orig_duplex;
5654                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5655         }
5656
5657         tp->mi_mode = MAC_MI_MODE_BASE;
5658         tw32_f(MAC_MI_MODE, tp->mi_mode);
5659         udelay(80);
5660
5661         tw32(MAC_LED_CTRL, tp->led_ctrl);
5662
5663         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5664         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5665                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5666                 udelay(10);
5667         }
5668         tw32_f(MAC_RX_MODE, tp->rx_mode);
5669         udelay(10);
5670
5671         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5672                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5673                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5674                         /* Set drive transmission level to 1.2V  */
5675                         /* only if the signal pre-emphasis bit is not set  */
5676                         val = tr32(MAC_SERDES_CFG);
5677                         val &= 0xfffff000;
5678                         val |= 0x880;
5679                         tw32(MAC_SERDES_CFG, val);
5680                 }
5681                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5682                         tw32(MAC_SERDES_CFG, 0x616000);
5683         }
5684
5685         /* Prevent chip from dropping frames when flow control
5686          * is enabled.
5687          */
5688         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5689
5690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5691             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5692                 /* Use hardware link auto-negotiation */
5693                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5694         }
5695
5696         err = tg3_setup_phy(tp, 1);
5697         if (err)
5698                 return err;
5699
5700         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5701                 u32 tmp;
5702
5703                 /* Clear CRC stats. */
5704                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5705                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5706                         tg3_readphy(tp, 0x14, &tmp);
5707                 }
5708         }
5709
5710         __tg3_set_rx_mode(tp->dev);
5711
5712         /* Initialize receive rules. */
5713         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5714         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5715         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5716         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5717
5718         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
5719             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780))
5720                 limit = 8;
5721         else
5722                 limit = 16;
5723         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5724                 limit -= 4;
5725         switch (limit) {
5726         case 16:
5727                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5728         case 15:
5729                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5730         case 14:
5731                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5732         case 13:
5733                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5734         case 12:
5735                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5736         case 11:
5737                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5738         case 10:
5739                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5740         case 9:
5741                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5742         case 8:
5743                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5744         case 7:
5745                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5746         case 6:
5747                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5748         case 5:
5749                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5750         case 4:
5751                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5752         case 3:
5753                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5754         case 2:
5755         case 1:
5756
5757         default:
5758                 break;
5759         };
5760
5761         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5762
5763         return 0;
5764 }
5765
5766 /* Called at device open time to get the chip ready for
5767  * packet processing.  Invoked with tp->lock held.
5768  */
5769 static int tg3_init_hw(struct tg3 *tp)
5770 {
5771         int err;
5772
5773         /* Force the chip into D0. */
5774         err = tg3_set_power_state(tp, 0);
5775         if (err)
5776                 goto out;
5777
5778         tg3_switch_clocks(tp);
5779
5780         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5781
5782         err = tg3_reset_hw(tp);
5783
5784 out:
5785         return err;
5786 }
5787
5788 #define TG3_STAT_ADD32(PSTAT, REG) \
5789 do {    u32 __val = tr32(REG); \
5790         (PSTAT)->low += __val; \
5791         if ((PSTAT)->low < __val) \
5792                 (PSTAT)->high += 1; \
5793 } while (0)
5794
5795 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5796 {
5797         struct tg3_hw_stats *sp = tp->hw_stats;
5798
5799         if (!netif_carrier_ok(tp->dev))
5800                 return;
5801
5802         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5803         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5804         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5805         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5806         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5807         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5808         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5809         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5810         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5811         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5812         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5813         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5814         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5815
5816         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5817         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5818         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5819         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5820         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5821         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5822         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5823         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5824         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5825         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5826         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5827         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5828         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5829         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5830 }
5831
5832 static void tg3_timer(unsigned long __opaque)
5833 {
5834         struct tg3 *tp = (struct tg3 *) __opaque;
5835
5836         spin_lock(&tp->lock);
5837
5838         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5839                 /* All of this garbage is because when using non-tagged
5840                  * IRQ status the mailbox/status_block protocol the chip
5841                  * uses with the cpu is race prone.
5842                  */
5843                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5844                         tw32(GRC_LOCAL_CTRL,
5845                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5846                 } else {
5847                         tw32(HOSTCC_MODE, tp->coalesce_mode |
5848                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5849                 }
5850
5851                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5852                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5853                         spin_unlock(&tp->lock);
5854                         schedule_work(&tp->reset_task);
5855                         return;
5856                 }
5857         }
5858
5859         /* This part only runs once per second. */
5860         if (!--tp->timer_counter) {
5861                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5862                         tg3_periodic_fetch_stats(tp);
5863
5864                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5865                         u32 mac_stat;
5866                         int phy_event;
5867
5868                         mac_stat = tr32(MAC_STATUS);
5869
5870                         phy_event = 0;
5871                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5872                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5873                                         phy_event = 1;
5874                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5875                                 phy_event = 1;
5876
5877                         if (phy_event)
5878                                 tg3_setup_phy(tp, 0);
5879                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5880                         u32 mac_stat = tr32(MAC_STATUS);
5881                         int need_setup = 0;
5882
5883                         if (netif_carrier_ok(tp->dev) &&
5884                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5885                                 need_setup = 1;
5886                         }
5887                         if (! netif_carrier_ok(tp->dev) &&
5888                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5889                                          MAC_STATUS_SIGNAL_DET))) {
5890                                 need_setup = 1;
5891                         }
5892                         if (need_setup) {
5893                                 tw32_f(MAC_MODE,
5894                                      (tp->mac_mode &
5895                                       ~MAC_MODE_PORT_MODE_MASK));
5896                                 udelay(40);
5897                                 tw32_f(MAC_MODE, tp->mac_mode);
5898                                 udelay(40);
5899                                 tg3_setup_phy(tp, 0);
5900                         }
5901                 }
5902
5903                 tp->timer_counter = tp->timer_multiplier;
5904         }
5905
5906         /* Heartbeat is only sent once every 120 seconds.  */
5907         if (!--tp->asf_counter) {
5908                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5909                         u32 val;
5910
5911                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5912                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5913                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5914                         val = tr32(GRC_RX_CPU_EVENT);
5915                         val |= (1 << 14);
5916                         tw32(GRC_RX_CPU_EVENT, val);
5917                 }
5918                 tp->asf_counter = tp->asf_multiplier;
5919         }
5920
5921         spin_unlock(&tp->lock);
5922
5923         tp->timer.expires = jiffies + tp->timer_offset;
5924         add_timer(&tp->timer);
5925 }
5926
5927 static int tg3_test_interrupt(struct tg3 *tp)
5928 {
5929         struct net_device *dev = tp->dev;
5930         int err, i;
5931         u32 int_mbox = 0;
5932
5933         if (!netif_running(dev))
5934                 return -ENODEV;
5935
5936         tg3_disable_ints(tp);
5937
5938         free_irq(tp->pdev->irq, dev);
5939
5940         err = request_irq(tp->pdev->irq, tg3_test_isr,
5941                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5942         if (err)
5943                 return err;
5944
5945         tg3_enable_ints(tp);
5946
5947         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
5948                HOSTCC_MODE_NOW);
5949
5950         for (i = 0; i < 5; i++) {
5951                 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5952                 if (int_mbox != 0)
5953                         break;
5954                 msleep(10);
5955         }
5956
5957         tg3_disable_ints(tp);
5958
5959         free_irq(tp->pdev->irq, dev);
5960         
5961         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5962                 err = request_irq(tp->pdev->irq, tg3_msi,
5963                                   SA_SAMPLE_RANDOM, dev->name, dev);
5964         else {
5965                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5966                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5967                         fn = tg3_interrupt_tagged;
5968                 err = request_irq(tp->pdev->irq, fn,
5969                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5970         }
5971
5972         if (err)
5973                 return err;
5974
5975         if (int_mbox != 0)
5976                 return 0;
5977
5978         return -EIO;
5979 }
5980
5981 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
5982  * successfully restored
5983  */
5984 static int tg3_test_msi(struct tg3 *tp)
5985 {
5986         struct net_device *dev = tp->dev;
5987         int err;
5988         u16 pci_cmd;
5989
5990         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
5991                 return 0;
5992
5993         /* Turn off SERR reporting in case MSI terminates with Master
5994          * Abort.
5995          */
5996         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
5997         pci_write_config_word(tp->pdev, PCI_COMMAND,
5998                               pci_cmd & ~PCI_COMMAND_SERR);
5999
6000         err = tg3_test_interrupt(tp);
6001
6002         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6003
6004         if (!err)
6005                 return 0;
6006
6007         /* other failures */
6008         if (err != -EIO)
6009                 return err;
6010
6011         /* MSI test failed, go back to INTx mode */
6012         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6013                "switching to INTx mode. Please report this failure to "
6014                "the PCI maintainer and include system chipset information.\n",
6015                        tp->dev->name);
6016
6017         free_irq(tp->pdev->irq, dev);
6018         pci_disable_msi(tp->pdev);
6019
6020         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6021
6022         {
6023                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6024                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6025                         fn = tg3_interrupt_tagged;
6026
6027                 err = request_irq(tp->pdev->irq, fn,
6028                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6029         }
6030         if (err)
6031                 return err;
6032
6033         /* Need to reset the chip because the MSI cycle may have terminated
6034          * with Master Abort.
6035          */
6036         tg3_full_lock(tp, 1);
6037
6038         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6039         err = tg3_init_hw(tp);
6040
6041         tg3_full_unlock(tp);
6042
6043         if (err)
6044                 free_irq(tp->pdev->irq, dev);
6045
6046         return err;
6047 }
6048
6049 static int tg3_open(struct net_device *dev)
6050 {
6051         struct tg3 *tp = netdev_priv(dev);
6052         int err;
6053
6054         tg3_full_lock(tp, 0);
6055
6056         tg3_disable_ints(tp);
6057         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6058
6059         tg3_full_unlock(tp);
6060
6061         /* The placement of this call is tied
6062          * to the setup and use of Host TX descriptors.
6063          */
6064         err = tg3_alloc_consistent(tp);
6065         if (err)
6066                 return err;
6067
6068         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6069             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6070             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6071                 /* All MSI supporting chips should support tagged
6072                  * status.  Assert that this is the case.
6073                  */
6074                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6075                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6076                                "Not using MSI.\n", tp->dev->name);
6077                 } else if (pci_enable_msi(tp->pdev) == 0) {
6078                         u32 msi_mode;
6079
6080                         msi_mode = tr32(MSGINT_MODE);
6081                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6082                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6083                 }
6084         }
6085         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6086                 err = request_irq(tp->pdev->irq, tg3_msi,
6087                                   SA_SAMPLE_RANDOM, dev->name, dev);
6088         else {
6089                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6090                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6091                         fn = tg3_interrupt_tagged;
6092
6093                 err = request_irq(tp->pdev->irq, fn,
6094                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6095         }
6096
6097         if (err) {
6098                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6099                         pci_disable_msi(tp->pdev);
6100                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6101                 }
6102                 tg3_free_consistent(tp);
6103                 return err;
6104         }
6105
6106         tg3_full_lock(tp, 0);
6107
6108         err = tg3_init_hw(tp);
6109         if (err) {
6110                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6111                 tg3_free_rings(tp);
6112         } else {
6113                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6114                         tp->timer_offset = HZ;
6115                 else
6116                         tp->timer_offset = HZ / 10;
6117
6118                 BUG_ON(tp->timer_offset > HZ);
6119                 tp->timer_counter = tp->timer_multiplier =
6120                         (HZ / tp->timer_offset);
6121                 tp->asf_counter = tp->asf_multiplier =
6122                         ((HZ / tp->timer_offset) * 120);
6123
6124                 init_timer(&tp->timer);
6125                 tp->timer.expires = jiffies + tp->timer_offset;
6126                 tp->timer.data = (unsigned long) tp;
6127                 tp->timer.function = tg3_timer;
6128         }
6129
6130         tg3_full_unlock(tp);
6131
6132         if (err) {
6133                 free_irq(tp->pdev->irq, dev);
6134                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6135                         pci_disable_msi(tp->pdev);
6136                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6137                 }
6138                 tg3_free_consistent(tp);
6139                 return err;
6140         }
6141
6142         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6143                 err = tg3_test_msi(tp);
6144
6145                 if (err) {
6146                         tg3_full_lock(tp, 0);
6147
6148                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6149                                 pci_disable_msi(tp->pdev);
6150                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6151                         }
6152                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6153                         tg3_free_rings(tp);
6154                         tg3_free_consistent(tp);
6155
6156                         tg3_full_unlock(tp);
6157
6158                         return err;
6159                 }
6160         }
6161
6162         tg3_full_lock(tp, 0);
6163
6164         add_timer(&tp->timer);
6165         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6166         tg3_enable_ints(tp);
6167
6168         tg3_full_unlock(tp);
6169
6170         netif_start_queue(dev);
6171
6172         return 0;
6173 }
6174
6175 #if 0
6176 /*static*/ void tg3_dump_state(struct tg3 *tp)
6177 {
6178         u32 val32, val32_2, val32_3, val32_4, val32_5;
6179         u16 val16;
6180         int i;
6181
6182         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6183         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6184         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6185                val16, val32);
6186
6187         /* MAC block */
6188         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6189                tr32(MAC_MODE), tr32(MAC_STATUS));
6190         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6191                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6192         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6193                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6194         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6195                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6196
6197         /* Send data initiator control block */
6198         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6199                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6200         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6201                tr32(SNDDATAI_STATSCTRL));
6202
6203         /* Send data completion control block */
6204         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6205
6206         /* Send BD ring selector block */
6207         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6208                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6209
6210         /* Send BD initiator control block */
6211         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6212                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6213
6214         /* Send BD completion control block */
6215         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6216
6217         /* Receive list placement control block */
6218         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6219                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6220         printk("       RCVLPC_STATSCTRL[%08x]\n",
6221                tr32(RCVLPC_STATSCTRL));
6222
6223         /* Receive data and receive BD initiator control block */
6224         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6225                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6226
6227         /* Receive data completion control block */
6228         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6229                tr32(RCVDCC_MODE));
6230
6231         /* Receive BD initiator control block */
6232         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6233                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6234
6235         /* Receive BD completion control block */
6236         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6237                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6238
6239         /* Receive list selector control block */
6240         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6241                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6242
6243         /* Mbuf cluster free block */
6244         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6245                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6246
6247         /* Host coalescing control block */
6248         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6249                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6250         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6251                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6252                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6253         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6254                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6255                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6256         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6257                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6258         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6259                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6260
6261         /* Memory arbiter control block */
6262         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6263                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6264
6265         /* Buffer manager control block */
6266         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6267                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6268         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6269                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6270         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6271                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6272                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6273                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6274
6275         /* Read DMA control block */
6276         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6277                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6278
6279         /* Write DMA control block */
6280         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6281                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6282
6283         /* DMA completion block */
6284         printk("DEBUG: DMAC_MODE[%08x]\n",
6285                tr32(DMAC_MODE));
6286
6287         /* GRC block */
6288         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6289                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6290         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6291                tr32(GRC_LOCAL_CTRL));
6292
6293         /* TG3_BDINFOs */
6294         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6295                tr32(RCVDBDI_JUMBO_BD + 0x0),
6296                tr32(RCVDBDI_JUMBO_BD + 0x4),
6297                tr32(RCVDBDI_JUMBO_BD + 0x8),
6298                tr32(RCVDBDI_JUMBO_BD + 0xc));
6299         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6300                tr32(RCVDBDI_STD_BD + 0x0),
6301                tr32(RCVDBDI_STD_BD + 0x4),
6302                tr32(RCVDBDI_STD_BD + 0x8),
6303                tr32(RCVDBDI_STD_BD + 0xc));
6304         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6305                tr32(RCVDBDI_MINI_BD + 0x0),
6306                tr32(RCVDBDI_MINI_BD + 0x4),
6307                tr32(RCVDBDI_MINI_BD + 0x8),
6308                tr32(RCVDBDI_MINI_BD + 0xc));
6309
6310         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6311         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6312         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6313         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6314         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6315                val32, val32_2, val32_3, val32_4);
6316
6317         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6318         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6319         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6320         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6321         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6322                val32, val32_2, val32_3, val32_4);
6323
6324         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6325         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6326         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6327         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6328         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6329         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6330                val32, val32_2, val32_3, val32_4, val32_5);
6331
6332         /* SW status block */
6333         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6334                tp->hw_status->status,
6335                tp->hw_status->status_tag,
6336                tp->hw_status->rx_jumbo_consumer,
6337                tp->hw_status->rx_consumer,
6338                tp->hw_status->rx_mini_consumer,
6339                tp->hw_status->idx[0].rx_producer,
6340                tp->hw_status->idx[0].tx_consumer);
6341
6342         /* SW statistics block */
6343         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6344                ((u32 *)tp->hw_stats)[0],
6345                ((u32 *)tp->hw_stats)[1],
6346                ((u32 *)tp->hw_stats)[2],
6347                ((u32 *)tp->hw_stats)[3]);
6348
6349         /* Mailboxes */
6350         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6351                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6352                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6353                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6354                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6355
6356         /* NIC side send descriptors. */
6357         for (i = 0; i < 6; i++) {
6358                 unsigned long txd;
6359
6360                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6361                         + (i * sizeof(struct tg3_tx_buffer_desc));
6362                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6363                        i,
6364                        readl(txd + 0x0), readl(txd + 0x4),
6365                        readl(txd + 0x8), readl(txd + 0xc));
6366         }
6367
6368         /* NIC side RX descriptors. */
6369         for (i = 0; i < 6; i++) {
6370                 unsigned long rxd;
6371
6372                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6373                         + (i * sizeof(struct tg3_rx_buffer_desc));
6374                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6375                        i,
6376                        readl(rxd + 0x0), readl(rxd + 0x4),
6377                        readl(rxd + 0x8), readl(rxd + 0xc));
6378                 rxd += (4 * sizeof(u32));
6379                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6380                        i,
6381                        readl(rxd + 0x0), readl(rxd + 0x4),
6382                        readl(rxd + 0x8), readl(rxd + 0xc));
6383         }
6384
6385         for (i = 0; i < 6; i++) {
6386                 unsigned long rxd;
6387
6388                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6389                         + (i * sizeof(struct tg3_rx_buffer_desc));
6390                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6391                        i,
6392                        readl(rxd + 0x0), readl(rxd + 0x4),
6393                        readl(rxd + 0x8), readl(rxd + 0xc));
6394                 rxd += (4 * sizeof(u32));
6395                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6396                        i,
6397                        readl(rxd + 0x0), readl(rxd + 0x4),
6398                        readl(rxd + 0x8), readl(rxd + 0xc));
6399         }
6400 }
6401 #endif
6402
6403 static struct net_device_stats *tg3_get_stats(struct net_device *);
6404 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6405
6406 static int tg3_close(struct net_device *dev)
6407 {
6408         struct tg3 *tp = netdev_priv(dev);
6409
6410         netif_stop_queue(dev);
6411
6412         del_timer_sync(&tp->timer);
6413
6414         tg3_full_lock(tp, 1);
6415 #if 0
6416         tg3_dump_state(tp);
6417 #endif
6418
6419         tg3_disable_ints(tp);
6420
6421         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6422         tg3_free_rings(tp);
6423         tp->tg3_flags &=
6424                 ~(TG3_FLAG_INIT_COMPLETE |
6425                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6426         netif_carrier_off(tp->dev);
6427
6428         tg3_full_unlock(tp);
6429
6430         free_irq(tp->pdev->irq, dev);
6431         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6432                 pci_disable_msi(tp->pdev);
6433                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6434         }
6435
6436         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6437                sizeof(tp->net_stats_prev));
6438         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6439                sizeof(tp->estats_prev));
6440
6441         tg3_free_consistent(tp);
6442
6443         return 0;
6444 }
6445
6446 static inline unsigned long get_stat64(tg3_stat64_t *val)
6447 {
6448         unsigned long ret;
6449
6450 #if (BITS_PER_LONG == 32)
6451         ret = val->low;
6452 #else
6453         ret = ((u64)val->high << 32) | ((u64)val->low);
6454 #endif
6455         return ret;
6456 }
6457
6458 static unsigned long calc_crc_errors(struct tg3 *tp)
6459 {
6460         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6461
6462         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6463             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6464              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6465                 u32 val;
6466
6467                 spin_lock_bh(&tp->lock);
6468                 if (!tg3_readphy(tp, 0x1e, &val)) {
6469                         tg3_writephy(tp, 0x1e, val | 0x8000);
6470                         tg3_readphy(tp, 0x14, &val);
6471                 } else
6472                         val = 0;
6473                 spin_unlock_bh(&tp->lock);
6474
6475                 tp->phy_crc_errors += val;
6476
6477                 return tp->phy_crc_errors;
6478         }
6479
6480         return get_stat64(&hw_stats->rx_fcs_errors);
6481 }
6482
6483 #define ESTAT_ADD(member) \
6484         estats->member =        old_estats->member + \
6485                                 get_stat64(&hw_stats->member)
6486
6487 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6488 {
6489         struct tg3_ethtool_stats *estats = &tp->estats;
6490         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6491         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6492
6493         if (!hw_stats)
6494                 return old_estats;
6495
6496         ESTAT_ADD(rx_octets);
6497         ESTAT_ADD(rx_fragments);
6498         ESTAT_ADD(rx_ucast_packets);
6499         ESTAT_ADD(rx_mcast_packets);
6500         ESTAT_ADD(rx_bcast_packets);
6501         ESTAT_ADD(rx_fcs_errors);
6502         ESTAT_ADD(rx_align_errors);
6503         ESTAT_ADD(rx_xon_pause_rcvd);
6504         ESTAT_ADD(rx_xoff_pause_rcvd);
6505         ESTAT_ADD(rx_mac_ctrl_rcvd);
6506         ESTAT_ADD(rx_xoff_entered);
6507         ESTAT_ADD(rx_frame_too_long_errors);
6508         ESTAT_ADD(rx_jabbers);
6509         ESTAT_ADD(rx_undersize_packets);
6510         ESTAT_ADD(rx_in_length_errors);
6511         ESTAT_ADD(rx_out_length_errors);
6512         ESTAT_ADD(rx_64_or_less_octet_packets);
6513         ESTAT_ADD(rx_65_to_127_octet_packets);
6514         ESTAT_ADD(rx_128_to_255_octet_packets);
6515         ESTAT_ADD(rx_256_to_511_octet_packets);
6516         ESTAT_ADD(rx_512_to_1023_octet_packets);
6517         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6518         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6519         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6520         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6521         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6522
6523         ESTAT_ADD(tx_octets);
6524         ESTAT_ADD(tx_collisions);
6525         ESTAT_ADD(tx_xon_sent);
6526         ESTAT_ADD(tx_xoff_sent);
6527         ESTAT_ADD(tx_flow_control);
6528         ESTAT_ADD(tx_mac_errors);
6529         ESTAT_ADD(tx_single_collisions);
6530         ESTAT_ADD(tx_mult_collisions);
6531         ESTAT_ADD(tx_deferred);
6532         ESTAT_ADD(tx_excessive_collisions);
6533         ESTAT_ADD(tx_late_collisions);
6534         ESTAT_ADD(tx_collide_2times);
6535         ESTAT_ADD(tx_collide_3times);
6536         ESTAT_ADD(tx_collide_4times);
6537         ESTAT_ADD(tx_collide_5times);
6538         ESTAT_ADD(tx_collide_6times);
6539         ESTAT_ADD(tx_collide_7times);
6540         ESTAT_ADD(tx_collide_8times);
6541         ESTAT_ADD(tx_collide_9times);
6542         ESTAT_ADD(tx_collide_10times);
6543         ESTAT_ADD(tx_collide_11times);
6544         ESTAT_ADD(tx_collide_12times);
6545         ESTAT_ADD(tx_collide_13times);
6546         ESTAT_ADD(tx_collide_14times);
6547         ESTAT_ADD(tx_collide_15times);
6548         ESTAT_ADD(tx_ucast_packets);
6549         ESTAT_ADD(tx_mcast_packets);
6550         ESTAT_ADD(tx_bcast_packets);
6551         ESTAT_ADD(tx_carrier_sense_errors);
6552         ESTAT_ADD(tx_discards);
6553         ESTAT_ADD(tx_errors);
6554
6555         ESTAT_ADD(dma_writeq_full);
6556         ESTAT_ADD(dma_write_prioq_full);
6557         ESTAT_ADD(rxbds_empty);
6558         ESTAT_ADD(rx_discards);
6559         ESTAT_ADD(rx_errors);
6560         ESTAT_ADD(rx_threshold_hit);
6561
6562         ESTAT_ADD(dma_readq_full);
6563         ESTAT_ADD(dma_read_prioq_full);
6564         ESTAT_ADD(tx_comp_queue_full);
6565
6566         ESTAT_ADD(ring_set_send_prod_index);
6567         ESTAT_ADD(ring_status_update);
6568         ESTAT_ADD(nic_irqs);
6569         ESTAT_ADD(nic_avoided_irqs);
6570         ESTAT_ADD(nic_tx_threshold_hit);
6571
6572         return estats;
6573 }
6574
6575 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6576 {
6577         struct tg3 *tp = netdev_priv(dev);
6578         struct net_device_stats *stats = &tp->net_stats;
6579         struct net_device_stats *old_stats = &tp->net_stats_prev;
6580         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6581
6582         if (!hw_stats)
6583                 return old_stats;
6584
6585         stats->rx_packets = old_stats->rx_packets +
6586                 get_stat64(&hw_stats->rx_ucast_packets) +
6587                 get_stat64(&hw_stats->rx_mcast_packets) +
6588                 get_stat64(&hw_stats->rx_bcast_packets);
6589                 
6590         stats->tx_packets = old_stats->tx_packets +
6591                 get_stat64(&hw_stats->tx_ucast_packets) +
6592                 get_stat64(&hw_stats->tx_mcast_packets) +
6593                 get_stat64(&hw_stats->tx_bcast_packets);
6594
6595         stats->rx_bytes = old_stats->rx_bytes +
6596                 get_stat64(&hw_stats->rx_octets);
6597         stats->tx_bytes = old_stats->tx_bytes +
6598                 get_stat64(&hw_stats->tx_octets);
6599
6600         stats->rx_errors = old_stats->rx_errors +
6601                 get_stat64(&hw_stats->rx_errors) +
6602                 get_stat64(&hw_stats->rx_discards);
6603         stats->tx_errors = old_stats->tx_errors +
6604                 get_stat64(&hw_stats->tx_errors) +
6605                 get_stat64(&hw_stats->tx_mac_errors) +
6606                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6607                 get_stat64(&hw_stats->tx_discards);
6608
6609         stats->multicast = old_stats->multicast +
6610                 get_stat64(&hw_stats->rx_mcast_packets);
6611         stats->collisions = old_stats->collisions +
6612                 get_stat64(&hw_stats->tx_collisions);
6613
6614         stats->rx_length_errors = old_stats->rx_length_errors +
6615                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6616                 get_stat64(&hw_stats->rx_undersize_packets);
6617
6618         stats->rx_over_errors = old_stats->rx_over_errors +
6619                 get_stat64(&hw_stats->rxbds_empty);
6620         stats->rx_frame_errors = old_stats->rx_frame_errors +
6621                 get_stat64(&hw_stats->rx_align_errors);
6622         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6623                 get_stat64(&hw_stats->tx_discards);
6624         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6625                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6626
6627         stats->rx_crc_errors = old_stats->rx_crc_errors +
6628                 calc_crc_errors(tp);
6629
6630         return stats;
6631 }
6632
6633 static inline u32 calc_crc(unsigned char *buf, int len)
6634 {
6635         u32 reg;
6636         u32 tmp;
6637         int j, k;
6638
6639         reg = 0xffffffff;
6640
6641         for (j = 0; j < len; j++) {
6642                 reg ^= buf[j];
6643
6644                 for (k = 0; k < 8; k++) {
6645                         tmp = reg & 0x01;
6646
6647                         reg >>= 1;
6648
6649                         if (tmp) {
6650                                 reg ^= 0xedb88320;
6651                         }
6652                 }
6653         }
6654
6655         return ~reg;
6656 }
6657
6658 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6659 {
6660         /* accept or reject all multicast frames */
6661         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6662         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6663         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6664         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6665 }
6666
6667 static void __tg3_set_rx_mode(struct net_device *dev)
6668 {
6669         struct tg3 *tp = netdev_priv(dev);
6670         u32 rx_mode;
6671
6672         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6673                                   RX_MODE_KEEP_VLAN_TAG);
6674
6675         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6676          * flag clear.
6677          */
6678 #if TG3_VLAN_TAG_USED
6679         if (!tp->vlgrp &&
6680             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6681                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6682 #else
6683         /* By definition, VLAN is disabled always in this
6684          * case.
6685          */
6686         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6687                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6688 #endif
6689
6690         if (dev->flags & IFF_PROMISC) {
6691                 /* Promiscuous mode. */
6692                 rx_mode |= RX_MODE_PROMISC;
6693         } else if (dev->flags & IFF_ALLMULTI) {
6694                 /* Accept all multicast. */
6695                 tg3_set_multi (tp, 1);
6696         } else if (dev->mc_count < 1) {
6697                 /* Reject all multicast. */
6698                 tg3_set_multi (tp, 0);
6699         } else {
6700                 /* Accept one or more multicast(s). */
6701                 struct dev_mc_list *mclist;
6702                 unsigned int i;
6703                 u32 mc_filter[4] = { 0, };
6704                 u32 regidx;
6705                 u32 bit;
6706                 u32 crc;
6707
6708                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6709                      i++, mclist = mclist->next) {
6710
6711                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6712                         bit = ~crc & 0x7f;
6713                         regidx = (bit & 0x60) >> 5;
6714                         bit &= 0x1f;
6715                         mc_filter[regidx] |= (1 << bit);
6716                 }
6717
6718                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6719                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6720                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6721                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6722         }
6723
6724         if (rx_mode != tp->rx_mode) {
6725                 tp->rx_mode = rx_mode;
6726                 tw32_f(MAC_RX_MODE, rx_mode);
6727                 udelay(10);
6728         }
6729 }
6730
6731 static void tg3_set_rx_mode(struct net_device *dev)
6732 {
6733         struct tg3 *tp = netdev_priv(dev);
6734
6735         tg3_full_lock(tp, 0);
6736         __tg3_set_rx_mode(dev);
6737         tg3_full_unlock(tp);
6738 }
6739
6740 #define TG3_REGDUMP_LEN         (32 * 1024)
6741
6742 static int tg3_get_regs_len(struct net_device *dev)
6743 {
6744         return TG3_REGDUMP_LEN;
6745 }
6746
6747 static void tg3_get_regs(struct net_device *dev,
6748                 struct ethtool_regs *regs, void *_p)
6749 {
6750         u32 *p = _p;
6751         struct tg3 *tp = netdev_priv(dev);
6752         u8 *orig_p = _p;
6753         int i;
6754
6755         regs->version = 0;
6756
6757         memset(p, 0, TG3_REGDUMP_LEN);
6758
6759         tg3_full_lock(tp, 0);
6760
6761 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6762 #define GET_REG32_LOOP(base,len)                \
6763 do {    p = (u32 *)(orig_p + (base));           \
6764         for (i = 0; i < len; i += 4)            \
6765                 __GET_REG32((base) + i);        \
6766 } while (0)
6767 #define GET_REG32_1(reg)                        \
6768 do {    p = (u32 *)(orig_p + (reg));            \
6769         __GET_REG32((reg));                     \
6770 } while (0)
6771
6772         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6773         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6774         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6775         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6776         GET_REG32_1(SNDDATAC_MODE);
6777         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6778         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6779         GET_REG32_1(SNDBDC_MODE);
6780         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6781         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6782         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6783         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6784         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6785         GET_REG32_1(RCVDCC_MODE);
6786         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6787         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6788         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6789         GET_REG32_1(MBFREE_MODE);
6790         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6791         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6792         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6793         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6794         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6795         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6796         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6797         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6798         GET_REG32_LOOP(FTQ_RESET, 0x120);
6799         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6800         GET_REG32_1(DMAC_MODE);
6801         GET_REG32_LOOP(GRC_MODE, 0x4c);
6802         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6803                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6804
6805 #undef __GET_REG32
6806 #undef GET_REG32_LOOP
6807 #undef GET_REG32_1
6808
6809         tg3_full_unlock(tp);
6810 }
6811
6812 static int tg3_get_eeprom_len(struct net_device *dev)
6813 {
6814         struct tg3 *tp = netdev_priv(dev);
6815
6816         return tp->nvram_size;
6817 }
6818
6819 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6820
6821 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6822 {
6823         struct tg3 *tp = netdev_priv(dev);
6824         int ret;
6825         u8  *pd;
6826         u32 i, offset, len, val, b_offset, b_count;
6827
6828         offset = eeprom->offset;
6829         len = eeprom->len;
6830         eeprom->len = 0;
6831
6832         eeprom->magic = TG3_EEPROM_MAGIC;
6833
6834         if (offset & 3) {
6835                 /* adjustments to start on required 4 byte boundary */
6836                 b_offset = offset & 3;
6837                 b_count = 4 - b_offset;
6838                 if (b_count > len) {
6839                         /* i.e. offset=1 len=2 */
6840                         b_count = len;
6841                 }
6842                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6843                 if (ret)
6844                         return ret;
6845                 val = cpu_to_le32(val);
6846                 memcpy(data, ((char*)&val) + b_offset, b_count);
6847                 len -= b_count;
6848                 offset += b_count;
6849                 eeprom->len += b_count;
6850         }
6851
6852         /* read bytes upto the last 4 byte boundary */
6853         pd = &data[eeprom->len];
6854         for (i = 0; i < (len - (len & 3)); i += 4) {
6855                 ret = tg3_nvram_read(tp, offset + i, &val);
6856                 if (ret) {
6857                         eeprom->len += i;
6858                         return ret;
6859                 }
6860                 val = cpu_to_le32(val);
6861                 memcpy(pd + i, &val, 4);
6862         }
6863         eeprom->len += i;
6864
6865         if (len & 3) {
6866                 /* read last bytes not ending on 4 byte boundary */
6867                 pd = &data[eeprom->len];
6868                 b_count = len & 3;
6869                 b_offset = offset + len - b_count;
6870                 ret = tg3_nvram_read(tp, b_offset, &val);
6871                 if (ret)
6872                         return ret;
6873                 val = cpu_to_le32(val);
6874                 memcpy(pd, ((char*)&val), b_count);
6875                 eeprom->len += b_count;
6876         }
6877         return 0;
6878 }
6879
6880 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6881
6882 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6883 {
6884         struct tg3 *tp = netdev_priv(dev);
6885         int ret;
6886         u32 offset, len, b_offset, odd_len, start, end;
6887         u8 *buf;
6888
6889         if (eeprom->magic != TG3_EEPROM_MAGIC)
6890                 return -EINVAL;
6891
6892         offset = eeprom->offset;
6893         len = eeprom->len;
6894
6895         if ((b_offset = (offset & 3))) {
6896                 /* adjustments to start on required 4 byte boundary */
6897                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6898                 if (ret)
6899                         return ret;
6900                 start = cpu_to_le32(start);
6901                 len += b_offset;
6902                 offset &= ~3;
6903                 if (len < 4)
6904                         len = 4;
6905         }
6906
6907         odd_len = 0;
6908         if (len & 3) {
6909                 /* adjustments to end on required 4 byte boundary */
6910                 odd_len = 1;
6911                 len = (len + 3) & ~3;
6912                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6913                 if (ret)
6914                         return ret;
6915                 end = cpu_to_le32(end);
6916         }
6917
6918         buf = data;
6919         if (b_offset || odd_len) {
6920                 buf = kmalloc(len, GFP_KERNEL);
6921                 if (buf == 0)
6922                         return -ENOMEM;
6923                 if (b_offset)
6924                         memcpy(buf, &start, 4);
6925                 if (odd_len)
6926                         memcpy(buf+len-4, &end, 4);
6927                 memcpy(buf + b_offset, data, eeprom->len);
6928         }
6929
6930         ret = tg3_nvram_write_block(tp, offset, len, buf);
6931
6932         if (buf != data)
6933                 kfree(buf);
6934
6935         return ret;
6936 }
6937
6938 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6939 {
6940         struct tg3 *tp = netdev_priv(dev);
6941   
6942         cmd->supported = (SUPPORTED_Autoneg);
6943
6944         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6945                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6946                                    SUPPORTED_1000baseT_Full);
6947
6948         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6949                 cmd->supported |= (SUPPORTED_100baseT_Half |
6950                                   SUPPORTED_100baseT_Full |
6951                                   SUPPORTED_10baseT_Half |
6952                                   SUPPORTED_10baseT_Full |
6953                                   SUPPORTED_MII);
6954         else
6955                 cmd->supported |= SUPPORTED_FIBRE;
6956   
6957         cmd->advertising = tp->link_config.advertising;
6958         if (netif_running(dev)) {
6959                 cmd->speed = tp->link_config.active_speed;
6960                 cmd->duplex = tp->link_config.active_duplex;
6961         }
6962         cmd->port = 0;
6963         cmd->phy_address = PHY_ADDR;
6964         cmd->transceiver = 0;
6965         cmd->autoneg = tp->link_config.autoneg;
6966         cmd->maxtxpkt = 0;
6967         cmd->maxrxpkt = 0;
6968         return 0;
6969 }
6970   
6971 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6972 {
6973         struct tg3 *tp = netdev_priv(dev);
6974   
6975         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6976                 /* These are the only valid advertisement bits allowed.  */
6977                 if (cmd->autoneg == AUTONEG_ENABLE &&
6978                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6979                                           ADVERTISED_1000baseT_Full |
6980                                           ADVERTISED_Autoneg |
6981                                           ADVERTISED_FIBRE)))
6982                         return -EINVAL;
6983         }
6984
6985         tg3_full_lock(tp, 0);
6986
6987         tp->link_config.autoneg = cmd->autoneg;
6988         if (cmd->autoneg == AUTONEG_ENABLE) {
6989                 tp->link_config.advertising = cmd->advertising;
6990                 tp->link_config.speed = SPEED_INVALID;
6991                 tp->link_config.duplex = DUPLEX_INVALID;
6992         } else {
6993                 tp->link_config.advertising = 0;
6994                 tp->link_config.speed = cmd->speed;
6995                 tp->link_config.duplex = cmd->duplex;
6996         }
6997   
6998         if (netif_running(dev))
6999                 tg3_setup_phy(tp, 1);
7000
7001         tg3_full_unlock(tp);
7002   
7003         return 0;
7004 }
7005   
7006 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7007 {
7008         struct tg3 *tp = netdev_priv(dev);
7009   
7010         strcpy(info->driver, DRV_MODULE_NAME);
7011         strcpy(info->version, DRV_MODULE_VERSION);
7012         strcpy(info->bus_info, pci_name(tp->pdev));
7013 }
7014   
7015 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7016 {
7017         struct tg3 *tp = netdev_priv(dev);
7018   
7019         wol->supported = WAKE_MAGIC;
7020         wol->wolopts = 0;
7021         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7022                 wol->wolopts = WAKE_MAGIC;
7023         memset(&wol->sopass, 0, sizeof(wol->sopass));
7024 }
7025   
7026 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7027 {
7028         struct tg3 *tp = netdev_priv(dev);
7029   
7030         if (wol->wolopts & ~WAKE_MAGIC)
7031                 return -EINVAL;
7032         if ((wol->wolopts & WAKE_MAGIC) &&
7033             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7034             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7035                 return -EINVAL;
7036   
7037         spin_lock_bh(&tp->lock);
7038         if (wol->wolopts & WAKE_MAGIC)
7039                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7040         else
7041                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7042         spin_unlock_bh(&tp->lock);
7043   
7044         return 0;
7045 }
7046   
7047 static u32 tg3_get_msglevel(struct net_device *dev)
7048 {
7049         struct tg3 *tp = netdev_priv(dev);
7050         return tp->msg_enable;
7051 }
7052   
7053 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7054 {
7055         struct tg3 *tp = netdev_priv(dev);
7056         tp->msg_enable = value;
7057 }
7058   
7059 #if TG3_TSO_SUPPORT != 0
7060 static int tg3_set_tso(struct net_device *dev, u32 value)
7061 {
7062         struct tg3 *tp = netdev_priv(dev);
7063
7064         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7065                 if (value)
7066                         return -EINVAL;
7067                 return 0;
7068         }
7069         return ethtool_op_set_tso(dev, value);
7070 }
7071 #endif
7072   
7073 static int tg3_nway_reset(struct net_device *dev)
7074 {
7075         struct tg3 *tp = netdev_priv(dev);
7076         u32 bmcr;
7077         int r;
7078   
7079         if (!netif_running(dev))
7080                 return -EAGAIN;
7081
7082         spin_lock_bh(&tp->lock);
7083         r = -EINVAL;
7084         tg3_readphy(tp, MII_BMCR, &bmcr);
7085         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7086             (bmcr & BMCR_ANENABLE)) {
7087                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7088                 r = 0;
7089         }
7090         spin_unlock_bh(&tp->lock);
7091   
7092         return r;
7093 }
7094   
7095 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7096 {
7097         struct tg3 *tp = netdev_priv(dev);
7098   
7099         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7100         ering->rx_mini_max_pending = 0;
7101         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7102
7103         ering->rx_pending = tp->rx_pending;
7104         ering->rx_mini_pending = 0;
7105         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7106         ering->tx_pending = tp->tx_pending;
7107 }
7108   
7109 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7110 {
7111         struct tg3 *tp = netdev_priv(dev);
7112         int irq_sync = 0;
7113   
7114         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7115             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7116             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7117                 return -EINVAL;
7118   
7119         if (netif_running(dev)) {
7120                 tg3_netif_stop(tp);
7121                 irq_sync = 1;
7122         }
7123
7124         tg3_full_lock(tp, irq_sync);
7125   
7126         tp->rx_pending = ering->rx_pending;
7127
7128         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7129             tp->rx_pending > 63)
7130                 tp->rx_pending = 63;
7131         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7132         tp->tx_pending = ering->tx_pending;
7133
7134         if (netif_running(dev)) {
7135                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7136                 tg3_init_hw(tp);
7137                 tg3_netif_start(tp);
7138         }
7139
7140         tg3_full_unlock(tp);
7141   
7142         return 0;
7143 }
7144   
7145 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7146 {
7147         struct tg3 *tp = netdev_priv(dev);
7148   
7149         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7150         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7151         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7152 }
7153   
7154 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7155 {
7156         struct tg3 *tp = netdev_priv(dev);
7157         int irq_sync = 0;
7158   
7159         if (netif_running(dev)) {
7160                 tg3_netif_stop(tp);
7161                 irq_sync = 1;
7162         }
7163
7164         tg3_full_lock(tp, irq_sync);
7165
7166         if (epause->autoneg)
7167                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7168         else
7169                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7170         if (epause->rx_pause)
7171                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7172         else
7173                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7174         if (epause->tx_pause)
7175                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7176         else
7177                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7178
7179         if (netif_running(dev)) {
7180                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7181                 tg3_init_hw(tp);
7182                 tg3_netif_start(tp);
7183         }
7184
7185         tg3_full_unlock(tp);
7186   
7187         return 0;
7188 }
7189   
7190 static u32 tg3_get_rx_csum(struct net_device *dev)
7191 {
7192         struct tg3 *tp = netdev_priv(dev);
7193         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7194 }
7195   
7196 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7197 {
7198         struct tg3 *tp = netdev_priv(dev);
7199   
7200         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7201                 if (data != 0)
7202                         return -EINVAL;
7203                 return 0;
7204         }
7205   
7206         spin_lock_bh(&tp->lock);
7207         if (data)
7208                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7209         else
7210                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7211         spin_unlock_bh(&tp->lock);
7212   
7213         return 0;
7214 }
7215   
7216 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7217 {
7218         struct tg3 *tp = netdev_priv(dev);
7219   
7220         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7221                 if (data != 0)
7222                         return -EINVAL;
7223                 return 0;
7224         }
7225   
7226         if (data)
7227                 dev->features |= NETIF_F_IP_CSUM;
7228         else
7229                 dev->features &= ~NETIF_F_IP_CSUM;
7230
7231         return 0;
7232 }
7233
7234 static int tg3_get_stats_count (struct net_device *dev)
7235 {
7236         return TG3_NUM_STATS;
7237 }
7238
7239 static int tg3_get_test_count (struct net_device *dev)
7240 {
7241         return TG3_NUM_TEST;
7242 }
7243
7244 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7245 {
7246         switch (stringset) {
7247         case ETH_SS_STATS:
7248                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7249                 break;
7250         case ETH_SS_TEST:
7251                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7252                 break;
7253         default:
7254                 WARN_ON(1);     /* we need a WARN() */
7255                 break;
7256         }
7257 }
7258
7259 static void tg3_get_ethtool_stats (struct net_device *dev,
7260                                    struct ethtool_stats *estats, u64 *tmp_stats)
7261 {
7262         struct tg3 *tp = netdev_priv(dev);
7263         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7264 }
7265
7266 #define NVRAM_TEST_SIZE 0x100
7267
7268 static int tg3_test_nvram(struct tg3 *tp)
7269 {
7270         u32 *buf, csum;
7271         int i, j, err = 0;
7272
7273         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7274         if (buf == NULL)
7275                 return -ENOMEM;
7276
7277         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7278                 u32 val;
7279
7280                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7281                         break;
7282                 buf[j] = cpu_to_le32(val);
7283         }
7284         if (i < NVRAM_TEST_SIZE)
7285                 goto out;
7286
7287         err = -EIO;
7288         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7289                 goto out;
7290
7291         /* Bootstrap checksum at offset 0x10 */
7292         csum = calc_crc((unsigned char *) buf, 0x10);
7293         if(csum != cpu_to_le32(buf[0x10/4]))
7294                 goto out;
7295
7296         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7297         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7298         if (csum != cpu_to_le32(buf[0xfc/4]))
7299                  goto out;
7300
7301         err = 0;
7302
7303 out:
7304         kfree(buf);
7305         return err;
7306 }
7307
7308 #define TG3_SERDES_TIMEOUT_SEC  2
7309 #define TG3_COPPER_TIMEOUT_SEC  6
7310
7311 static int tg3_test_link(struct tg3 *tp)
7312 {
7313         int i, max;
7314
7315         if (!netif_running(tp->dev))
7316                 return -ENODEV;
7317
7318         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7319                 max = TG3_SERDES_TIMEOUT_SEC;
7320         else
7321                 max = TG3_COPPER_TIMEOUT_SEC;
7322
7323         for (i = 0; i < max; i++) {
7324                 if (netif_carrier_ok(tp->dev))
7325                         return 0;
7326
7327                 if (msleep_interruptible(1000))
7328                         break;
7329         }
7330
7331         return -EIO;
7332 }
7333
7334 /* Only test the commonly used registers */
7335 static int tg3_test_registers(struct tg3 *tp)
7336 {
7337         int i, is_5705;
7338         u32 offset, read_mask, write_mask, val, save_val, read_val;
7339         static struct {
7340                 u16 offset;
7341                 u16 flags;
7342 #define TG3_FL_5705     0x1
7343 #define TG3_FL_NOT_5705 0x2
7344 #define TG3_FL_NOT_5788 0x4
7345                 u32 read_mask;
7346                 u32 write_mask;
7347         } reg_tbl[] = {
7348                 /* MAC Control Registers */
7349                 { MAC_MODE, TG3_FL_NOT_5705,
7350                         0x00000000, 0x00ef6f8c },
7351                 { MAC_MODE, TG3_FL_5705,
7352                         0x00000000, 0x01ef6b8c },
7353                 { MAC_STATUS, TG3_FL_NOT_5705,
7354                         0x03800107, 0x00000000 },
7355                 { MAC_STATUS, TG3_FL_5705,
7356                         0x03800100, 0x00000000 },
7357                 { MAC_ADDR_0_HIGH, 0x0000,
7358                         0x00000000, 0x0000ffff },
7359                 { MAC_ADDR_0_LOW, 0x0000,
7360                         0x00000000, 0xffffffff },
7361                 { MAC_RX_MTU_SIZE, 0x0000,
7362                         0x00000000, 0x0000ffff },
7363                 { MAC_TX_MODE, 0x0000,
7364                         0x00000000, 0x00000070 },
7365                 { MAC_TX_LENGTHS, 0x0000,
7366                         0x00000000, 0x00003fff },
7367                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7368                         0x00000000, 0x000007fc },
7369                 { MAC_RX_MODE, TG3_FL_5705,
7370                         0x00000000, 0x000007dc },
7371                 { MAC_HASH_REG_0, 0x0000,
7372                         0x00000000, 0xffffffff },
7373                 { MAC_HASH_REG_1, 0x0000,
7374                         0x00000000, 0xffffffff },
7375                 { MAC_HASH_REG_2, 0x0000,
7376                         0x00000000, 0xffffffff },
7377                 { MAC_HASH_REG_3, 0x0000,
7378                         0x00000000, 0xffffffff },
7379
7380                 /* Receive Data and Receive BD Initiator Control Registers. */
7381                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7382                         0x00000000, 0xffffffff },
7383                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7384                         0x00000000, 0xffffffff },
7385                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7386                         0x00000000, 0x00000003 },
7387                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7388                         0x00000000, 0xffffffff },
7389                 { RCVDBDI_STD_BD+0, 0x0000,
7390                         0x00000000, 0xffffffff },
7391                 { RCVDBDI_STD_BD+4, 0x0000,
7392                         0x00000000, 0xffffffff },
7393                 { RCVDBDI_STD_BD+8, 0x0000,
7394                         0x00000000, 0xffff0002 },
7395                 { RCVDBDI_STD_BD+0xc, 0x0000,
7396                         0x00000000, 0xffffffff },
7397         
7398                 /* Receive BD Initiator Control Registers. */
7399                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7400                         0x00000000, 0xffffffff },
7401                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7402                         0x00000000, 0x000003ff },
7403                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7404                         0x00000000, 0xffffffff },
7405         
7406                 /* Host Coalescing Control Registers. */
7407                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7408                         0x00000000, 0x00000004 },
7409                 { HOSTCC_MODE, TG3_FL_5705,
7410                         0x00000000, 0x000000f6 },
7411                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7412                         0x00000000, 0xffffffff },
7413                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7414                         0x00000000, 0x000003ff },
7415                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7416                         0x00000000, 0xffffffff },
7417                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7418                         0x00000000, 0x000003ff },
7419                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7420                         0x00000000, 0xffffffff },
7421                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7422                         0x00000000, 0x000000ff },
7423                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7424                         0x00000000, 0xffffffff },
7425                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7426                         0x00000000, 0x000000ff },
7427                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7428                         0x00000000, 0xffffffff },
7429                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7430                         0x00000000, 0xffffffff },
7431                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7432                         0x00000000, 0xffffffff },
7433                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7434                         0x00000000, 0x000000ff },
7435                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7436                         0x00000000, 0xffffffff },
7437                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7438                         0x00000000, 0x000000ff },
7439                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7440                         0x00000000, 0xffffffff },
7441                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7442                         0x00000000, 0xffffffff },
7443                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7444                         0x00000000, 0xffffffff },
7445                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7446                         0x00000000, 0xffffffff },
7447                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7448                         0x00000000, 0xffffffff },
7449                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7450                         0xffffffff, 0x00000000 },
7451                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7452                         0xffffffff, 0x00000000 },
7453
7454                 /* Buffer Manager Control Registers. */
7455                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7456                         0x00000000, 0x007fff80 },
7457                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7458                         0x00000000, 0x007fffff },
7459                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7460                         0x00000000, 0x0000003f },
7461                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7462                         0x00000000, 0x000001ff },
7463                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7464                         0x00000000, 0x000001ff },
7465                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7466                         0xffffffff, 0x00000000 },
7467                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7468                         0xffffffff, 0x00000000 },
7469         
7470                 /* Mailbox Registers */
7471                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7472                         0x00000000, 0x000001ff },
7473                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7474                         0x00000000, 0x000001ff },
7475                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7476                         0x00000000, 0x000007ff },
7477                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7478                         0x00000000, 0x000001ff },
7479
7480                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7481         };
7482
7483         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7484                 is_5705 = 1;
7485         else
7486                 is_5705 = 0;
7487
7488         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7489                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7490                         continue;
7491
7492                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7493                         continue;
7494
7495                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7496                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7497                         continue;
7498
7499                 offset = (u32) reg_tbl[i].offset;
7500                 read_mask = reg_tbl[i].read_mask;
7501                 write_mask = reg_tbl[i].write_mask;
7502
7503                 /* Save the original register content */
7504                 save_val = tr32(offset);
7505
7506                 /* Determine the read-only value. */
7507                 read_val = save_val & read_mask;
7508
7509                 /* Write zero to the register, then make sure the read-only bits
7510                  * are not changed and the read/write bits are all zeros.
7511                  */
7512                 tw32(offset, 0);
7513
7514                 val = tr32(offset);
7515
7516                 /* Test the read-only and read/write bits. */
7517                 if (((val & read_mask) != read_val) || (val & write_mask))
7518                         goto out;
7519
7520                 /* Write ones to all the bits defined by RdMask and WrMask, then
7521                  * make sure the read-only bits are not changed and the
7522                  * read/write bits are all ones.
7523                  */
7524                 tw32(offset, read_mask | write_mask);
7525
7526                 val = tr32(offset);
7527
7528                 /* Test the read-only bits. */
7529                 if ((val & read_mask) != read_val)
7530                         goto out;
7531
7532                 /* Test the read/write bits. */
7533                 if ((val & write_mask) != write_mask)
7534                         goto out;
7535
7536                 tw32(offset, save_val);
7537         }
7538
7539         return 0;
7540
7541 out:
7542         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7543         tw32(offset, save_val);
7544         return -EIO;
7545 }
7546
7547 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7548 {
7549         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7550         int i;
7551         u32 j;
7552
7553         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7554                 for (j = 0; j < len; j += 4) {
7555                         u32 val;
7556
7557                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7558                         tg3_read_mem(tp, offset + j, &val);
7559                         if (val != test_pattern[i])
7560                                 return -EIO;
7561                 }
7562         }
7563         return 0;
7564 }
7565
7566 static int tg3_test_memory(struct tg3 *tp)
7567 {
7568         static struct mem_entry {
7569                 u32 offset;
7570                 u32 len;
7571         } mem_tbl_570x[] = {
7572                 { 0x00000000, 0x01000},
7573                 { 0x00002000, 0x1c000},
7574                 { 0xffffffff, 0x00000}
7575         }, mem_tbl_5705[] = {
7576                 { 0x00000100, 0x0000c},
7577                 { 0x00000200, 0x00008},
7578                 { 0x00000b50, 0x00400},
7579                 { 0x00004000, 0x00800},
7580                 { 0x00006000, 0x01000},
7581                 { 0x00008000, 0x02000},
7582                 { 0x00010000, 0x0e000},
7583                 { 0xffffffff, 0x00000}
7584         };
7585         struct mem_entry *mem_tbl;
7586         int err = 0;
7587         int i;
7588
7589         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7590                 mem_tbl = mem_tbl_5705;
7591         else
7592                 mem_tbl = mem_tbl_570x;
7593
7594         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7595                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7596                     mem_tbl[i].len)) != 0)
7597                         break;
7598         }
7599         
7600         return err;
7601 }
7602
7603 static int tg3_test_loopback(struct tg3 *tp)
7604 {
7605         u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7606         u32 desc_idx;
7607         struct sk_buff *skb, *rx_skb;
7608         u8 *tx_data;
7609         dma_addr_t map;
7610         int num_pkts, tx_len, rx_len, i, err;
7611         struct tg3_rx_buffer_desc *desc;
7612
7613         if (!netif_running(tp->dev))
7614                 return -ENODEV;
7615
7616         err = -EIO;
7617
7618         tg3_abort_hw(tp, 1);
7619
7620         tg3_reset_hw(tp);
7621
7622         mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7623                    MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7624                    MAC_MODE_PORT_MODE_GMII;
7625         tw32(MAC_MODE, mac_mode);
7626
7627         tx_len = 1514;
7628         skb = dev_alloc_skb(tx_len);
7629         tx_data = skb_put(skb, tx_len);
7630         memcpy(tx_data, tp->dev->dev_addr, 6);
7631         memset(tx_data + 6, 0x0, 8);
7632
7633         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7634
7635         for (i = 14; i < tx_len; i++)
7636                 tx_data[i] = (u8) (i & 0xff);
7637
7638         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7639
7640         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7641              HOSTCC_MODE_NOW);
7642
7643         udelay(10);
7644
7645         rx_start_idx = tp->hw_status->idx[0].rx_producer;
7646
7647         send_idx = 0;
7648         num_pkts = 0;
7649
7650         tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7651
7652         send_idx++;
7653         num_pkts++;
7654
7655         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7656         tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7657
7658         udelay(10);
7659
7660         for (i = 0; i < 10; i++) {
7661                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7662                        HOSTCC_MODE_NOW);
7663
7664                 udelay(10);
7665
7666                 tx_idx = tp->hw_status->idx[0].tx_consumer;
7667                 rx_idx = tp->hw_status->idx[0].rx_producer;
7668                 if ((tx_idx == send_idx) &&
7669                     (rx_idx == (rx_start_idx + num_pkts)))
7670                         break;
7671         }
7672
7673         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7674         dev_kfree_skb(skb);
7675
7676         if (tx_idx != send_idx)
7677                 goto out;
7678
7679         if (rx_idx != rx_start_idx + num_pkts)
7680                 goto out;
7681
7682         desc = &tp->rx_rcb[rx_start_idx];
7683         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7684         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7685         if (opaque_key != RXD_OPAQUE_RING_STD)
7686                 goto out;
7687
7688         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7689             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7690                 goto out;
7691
7692         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7693         if (rx_len != tx_len)
7694                 goto out;
7695
7696         rx_skb = tp->rx_std_buffers[desc_idx].skb;
7697
7698         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7699         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7700
7701         for (i = 14; i < tx_len; i++) {
7702                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7703                         goto out;
7704         }
7705         err = 0;
7706         
7707         /* tg3_free_rings will unmap and free the rx_skb */
7708 out:
7709         return err;
7710 }
7711
7712 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7713                           u64 *data)
7714 {
7715         struct tg3 *tp = netdev_priv(dev);
7716
7717         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7718
7719         if (tg3_test_nvram(tp) != 0) {
7720                 etest->flags |= ETH_TEST_FL_FAILED;
7721                 data[0] = 1;
7722         }
7723         if (tg3_test_link(tp) != 0) {
7724                 etest->flags |= ETH_TEST_FL_FAILED;
7725                 data[1] = 1;
7726         }
7727         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7728                 int irq_sync = 0;
7729
7730                 if (netif_running(dev)) {
7731                         tg3_netif_stop(tp);
7732                         irq_sync = 1;
7733                 }
7734
7735                 tg3_full_lock(tp, irq_sync);
7736
7737                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7738                 tg3_nvram_lock(tp);
7739                 tg3_halt_cpu(tp, RX_CPU_BASE);
7740                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7741                         tg3_halt_cpu(tp, TX_CPU_BASE);
7742                 tg3_nvram_unlock(tp);
7743
7744                 if (tg3_test_registers(tp) != 0) {
7745                         etest->flags |= ETH_TEST_FL_FAILED;
7746                         data[2] = 1;
7747                 }
7748                 if (tg3_test_memory(tp) != 0) {
7749                         etest->flags |= ETH_TEST_FL_FAILED;
7750                         data[3] = 1;
7751                 }
7752                 if (tg3_test_loopback(tp) != 0) {
7753                         etest->flags |= ETH_TEST_FL_FAILED;
7754                         data[4] = 1;
7755                 }
7756
7757                 tg3_full_unlock(tp);
7758
7759                 if (tg3_test_interrupt(tp) != 0) {
7760                         etest->flags |= ETH_TEST_FL_FAILED;
7761                         data[5] = 1;
7762                 }
7763
7764                 tg3_full_lock(tp, 0);
7765
7766                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7767                 if (netif_running(dev)) {
7768                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7769                         tg3_init_hw(tp);
7770                         tg3_netif_start(tp);
7771                 }
7772
7773                 tg3_full_unlock(tp);
7774         }
7775 }
7776
7777 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7778 {
7779         struct mii_ioctl_data *data = if_mii(ifr);
7780         struct tg3 *tp = netdev_priv(dev);
7781         int err;
7782
7783         switch(cmd) {
7784         case SIOCGMIIPHY:
7785                 data->phy_id = PHY_ADDR;
7786
7787                 /* fallthru */
7788         case SIOCGMIIREG: {
7789                 u32 mii_regval;
7790
7791                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7792                         break;                  /* We have no PHY */
7793
7794                 spin_lock_bh(&tp->lock);
7795                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
7796                 spin_unlock_bh(&tp->lock);
7797
7798                 data->val_out = mii_regval;
7799
7800                 return err;
7801         }
7802
7803         case SIOCSMIIREG:
7804                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7805                         break;                  /* We have no PHY */
7806
7807                 if (!capable(CAP_NET_ADMIN))
7808                         return -EPERM;
7809
7810                 spin_lock_bh(&tp->lock);
7811                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
7812                 spin_unlock_bh(&tp->lock);
7813
7814                 return err;
7815
7816         default:
7817                 /* do nothing */
7818                 break;
7819         }
7820         return -EOPNOTSUPP;
7821 }
7822
7823 #if TG3_VLAN_TAG_USED
7824 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
7825 {
7826         struct tg3 *tp = netdev_priv(dev);
7827
7828         tg3_full_lock(tp, 0);
7829
7830         tp->vlgrp = grp;
7831
7832         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
7833         __tg3_set_rx_mode(dev);
7834
7835         tg3_full_unlock(tp);
7836 }
7837
7838 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7839 {
7840         struct tg3 *tp = netdev_priv(dev);
7841
7842         tg3_full_lock(tp, 0);
7843         if (tp->vlgrp)
7844                 tp->vlgrp->vlan_devices[vid] = NULL;
7845         tg3_full_unlock(tp);
7846 }
7847 #endif
7848
7849 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7850 {
7851         struct tg3 *tp = netdev_priv(dev);
7852
7853         memcpy(ec, &tp->coal, sizeof(*ec));
7854         return 0;
7855 }
7856
7857 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7858 {
7859         struct tg3 *tp = netdev_priv(dev);
7860         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
7861         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
7862
7863         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7864                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
7865                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
7866                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
7867                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
7868         }
7869
7870         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
7871             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
7872             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
7873             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
7874             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
7875             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
7876             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
7877             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
7878             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
7879             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
7880                 return -EINVAL;
7881
7882         /* No rx interrupts will be generated if both are zero */
7883         if ((ec->rx_coalesce_usecs == 0) &&
7884             (ec->rx_max_coalesced_frames == 0))
7885                 return -EINVAL;
7886
7887         /* No tx interrupts will be generated if both are zero */
7888         if ((ec->tx_coalesce_usecs == 0) &&
7889             (ec->tx_max_coalesced_frames == 0))
7890                 return -EINVAL;
7891
7892         /* Only copy relevant parameters, ignore all others. */
7893         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
7894         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
7895         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
7896         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
7897         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
7898         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
7899         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
7900         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
7901         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
7902
7903         if (netif_running(dev)) {
7904                 tg3_full_lock(tp, 0);
7905                 __tg3_set_coalesce(tp, &tp->coal);
7906                 tg3_full_unlock(tp);
7907         }
7908         return 0;
7909 }
7910
7911 static struct ethtool_ops tg3_ethtool_ops = {
7912         .get_settings           = tg3_get_settings,
7913         .set_settings           = tg3_set_settings,
7914         .get_drvinfo            = tg3_get_drvinfo,
7915         .get_regs_len           = tg3_get_regs_len,
7916         .get_regs               = tg3_get_regs,
7917         .get_wol                = tg3_get_wol,
7918         .set_wol                = tg3_set_wol,
7919         .get_msglevel           = tg3_get_msglevel,
7920         .set_msglevel           = tg3_set_msglevel,
7921         .nway_reset             = tg3_nway_reset,
7922         .get_link               = ethtool_op_get_link,
7923         .get_eeprom_len         = tg3_get_eeprom_len,
7924         .get_eeprom             = tg3_get_eeprom,
7925         .set_eeprom             = tg3_set_eeprom,
7926         .get_ringparam          = tg3_get_ringparam,
7927         .set_ringparam          = tg3_set_ringparam,
7928         .get_pauseparam         = tg3_get_pauseparam,
7929         .set_pauseparam         = tg3_set_pauseparam,
7930         .get_rx_csum            = tg3_get_rx_csum,
7931         .set_rx_csum            = tg3_set_rx_csum,
7932         .get_tx_csum            = ethtool_op_get_tx_csum,
7933         .set_tx_csum            = tg3_set_tx_csum,
7934         .get_sg                 = ethtool_op_get_sg,
7935         .set_sg                 = ethtool_op_set_sg,
7936 #if TG3_TSO_SUPPORT != 0
7937         .get_tso                = ethtool_op_get_tso,
7938         .set_tso                = tg3_set_tso,
7939 #endif
7940         .self_test_count        = tg3_get_test_count,
7941         .self_test              = tg3_self_test,
7942         .get_strings            = tg3_get_strings,
7943         .get_stats_count        = tg3_get_stats_count,
7944         .get_ethtool_stats      = tg3_get_ethtool_stats,
7945         .get_coalesce           = tg3_get_coalesce,
7946         .set_coalesce           = tg3_set_coalesce,
7947 };
7948
7949 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
7950 {
7951         u32 cursize, val;
7952
7953         tp->nvram_size = EEPROM_CHIP_SIZE;
7954
7955         if (tg3_nvram_read(tp, 0, &val) != 0)
7956                 return;
7957
7958         if (swab32(val) != TG3_EEPROM_MAGIC)
7959                 return;
7960
7961         /*
7962          * Size the chip by reading offsets at increasing powers of two.
7963          * When we encounter our validation signature, we know the addressing
7964          * has wrapped around, and thus have our chip size.
7965          */
7966         cursize = 0x800;
7967
7968         while (cursize < tp->nvram_size) {
7969                 if (tg3_nvram_read(tp, cursize, &val) != 0)
7970                         return;
7971
7972                 if (swab32(val) == TG3_EEPROM_MAGIC)
7973                         break;
7974
7975                 cursize <<= 1;
7976         }
7977
7978         tp->nvram_size = cursize;
7979 }
7980                 
7981 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7982 {
7983         u32 val;
7984
7985         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7986                 if (val != 0) {
7987                         tp->nvram_size = (val >> 16) * 1024;
7988                         return;
7989                 }
7990         }
7991         tp->nvram_size = 0x20000;
7992 }
7993
7994 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7995 {
7996         u32 nvcfg1;
7997
7998         nvcfg1 = tr32(NVRAM_CFG1);
7999         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8000                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8001         }
8002         else {
8003                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8004                 tw32(NVRAM_CFG1, nvcfg1);
8005         }
8006
8007         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8008                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8009                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8010                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8011                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8012                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8013                                 break;
8014                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8015                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8016                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8017                                 break;
8018                         case FLASH_VENDOR_ATMEL_EEPROM:
8019                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8020                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8021                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8022                                 break;
8023                         case FLASH_VENDOR_ST:
8024                                 tp->nvram_jedecnum = JEDEC_ST;
8025                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8026                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8027                                 break;
8028                         case FLASH_VENDOR_SAIFUN:
8029                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8030                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8031                                 break;
8032                         case FLASH_VENDOR_SST_SMALL:
8033                         case FLASH_VENDOR_SST_LARGE:
8034                                 tp->nvram_jedecnum = JEDEC_SST;
8035                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8036                                 break;
8037                 }
8038         }
8039         else {
8040                 tp->nvram_jedecnum = JEDEC_ATMEL;
8041                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8042                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8043         }
8044 }
8045
8046 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8047 {
8048         u32 nvcfg1;
8049
8050         nvcfg1 = tr32(NVRAM_CFG1);
8051
8052         /* NVRAM protection for TPM */
8053         if (nvcfg1 & (1 << 27))
8054                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8055
8056         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8057                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8058                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8059                         tp->nvram_jedecnum = JEDEC_ATMEL;
8060                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8061                         break;
8062                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8063                         tp->nvram_jedecnum = JEDEC_ATMEL;
8064                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8065                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8066                         break;
8067                 case FLASH_5752VENDOR_ST_M45PE10:
8068                 case FLASH_5752VENDOR_ST_M45PE20:
8069                 case FLASH_5752VENDOR_ST_M45PE40:
8070                         tp->nvram_jedecnum = JEDEC_ST;
8071                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8072                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8073                         break;
8074         }
8075
8076         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8077                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8078                         case FLASH_5752PAGE_SIZE_256:
8079                                 tp->nvram_pagesize = 256;
8080                                 break;
8081                         case FLASH_5752PAGE_SIZE_512:
8082                                 tp->nvram_pagesize = 512;
8083                                 break;
8084                         case FLASH_5752PAGE_SIZE_1K:
8085                                 tp->nvram_pagesize = 1024;
8086                                 break;
8087                         case FLASH_5752PAGE_SIZE_2K:
8088                                 tp->nvram_pagesize = 2048;
8089                                 break;
8090                         case FLASH_5752PAGE_SIZE_4K:
8091                                 tp->nvram_pagesize = 4096;
8092                                 break;
8093                         case FLASH_5752PAGE_SIZE_264:
8094                                 tp->nvram_pagesize = 264;
8095                                 break;
8096                 }
8097         }
8098         else {
8099                 /* For eeprom, set pagesize to maximum eeprom size */
8100                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8101
8102                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8103                 tw32(NVRAM_CFG1, nvcfg1);
8104         }
8105 }
8106
8107 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8108 static void __devinit tg3_nvram_init(struct tg3 *tp)
8109 {
8110         int j;
8111
8112         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8113                 return;
8114
8115         tw32_f(GRC_EEPROM_ADDR,
8116              (EEPROM_ADDR_FSM_RESET |
8117               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8118                EEPROM_ADDR_CLKPERD_SHIFT)));
8119
8120         /* XXX schedule_timeout() ... */
8121         for (j = 0; j < 100; j++)
8122                 udelay(10);
8123
8124         /* Enable seeprom accesses. */
8125         tw32_f(GRC_LOCAL_CTRL,
8126              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8127         udelay(100);
8128
8129         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8130             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8131                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8132
8133                 tg3_enable_nvram_access(tp);
8134
8135                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8136                         tg3_get_5752_nvram_info(tp);
8137                 else
8138                         tg3_get_nvram_info(tp);
8139
8140                 tg3_get_nvram_size(tp);
8141
8142                 tg3_disable_nvram_access(tp);
8143
8144         } else {
8145                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8146
8147                 tg3_get_eeprom_size(tp);
8148         }
8149 }
8150
8151 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8152                                         u32 offset, u32 *val)
8153 {
8154         u32 tmp;
8155         int i;
8156
8157         if (offset > EEPROM_ADDR_ADDR_MASK ||
8158             (offset % 4) != 0)
8159                 return -EINVAL;
8160
8161         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8162                                         EEPROM_ADDR_DEVID_MASK |
8163                                         EEPROM_ADDR_READ);
8164         tw32(GRC_EEPROM_ADDR,
8165              tmp |
8166              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8167              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8168               EEPROM_ADDR_ADDR_MASK) |
8169              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8170
8171         for (i = 0; i < 10000; i++) {
8172                 tmp = tr32(GRC_EEPROM_ADDR);
8173
8174                 if (tmp & EEPROM_ADDR_COMPLETE)
8175                         break;
8176                 udelay(100);
8177         }
8178         if (!(tmp & EEPROM_ADDR_COMPLETE))
8179                 return -EBUSY;
8180
8181         *val = tr32(GRC_EEPROM_DATA);
8182         return 0;
8183 }
8184
8185 #define NVRAM_CMD_TIMEOUT 10000
8186
8187 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8188 {
8189         int i;
8190
8191         tw32(NVRAM_CMD, nvram_cmd);
8192         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8193                 udelay(10);
8194                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8195                         udelay(10);
8196                         break;
8197                 }
8198         }
8199         if (i == NVRAM_CMD_TIMEOUT) {
8200                 return -EBUSY;
8201         }
8202         return 0;
8203 }
8204
8205 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8206 {
8207         int ret;
8208
8209         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8210                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8211                 return -EINVAL;
8212         }
8213
8214         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8215                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8216
8217         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8218                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8219                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8220
8221                 offset = ((offset / tp->nvram_pagesize) <<
8222                           ATMEL_AT45DB0X1B_PAGE_POS) +
8223                         (offset % tp->nvram_pagesize);
8224         }
8225
8226         if (offset > NVRAM_ADDR_MSK)
8227                 return -EINVAL;
8228
8229         tg3_nvram_lock(tp);
8230
8231         tg3_enable_nvram_access(tp);
8232
8233         tw32(NVRAM_ADDR, offset);
8234         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8235                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8236
8237         if (ret == 0)
8238                 *val = swab32(tr32(NVRAM_RDDATA));
8239
8240         tg3_nvram_unlock(tp);
8241
8242         tg3_disable_nvram_access(tp);
8243
8244         return ret;
8245 }
8246
8247 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8248                                     u32 offset, u32 len, u8 *buf)
8249 {
8250         int i, j, rc = 0;
8251         u32 val;
8252
8253         for (i = 0; i < len; i += 4) {
8254                 u32 addr, data;
8255
8256                 addr = offset + i;
8257
8258                 memcpy(&data, buf + i, 4);
8259
8260                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8261
8262                 val = tr32(GRC_EEPROM_ADDR);
8263                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8264
8265                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8266                         EEPROM_ADDR_READ);
8267                 tw32(GRC_EEPROM_ADDR, val |
8268                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8269                         (addr & EEPROM_ADDR_ADDR_MASK) |
8270                         EEPROM_ADDR_START |
8271                         EEPROM_ADDR_WRITE);
8272                 
8273                 for (j = 0; j < 10000; j++) {
8274                         val = tr32(GRC_EEPROM_ADDR);
8275
8276                         if (val & EEPROM_ADDR_COMPLETE)
8277                                 break;
8278                         udelay(100);
8279                 }
8280                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8281                         rc = -EBUSY;
8282                         break;
8283                 }
8284         }
8285
8286         return rc;
8287 }
8288
8289 /* offset and length are dword aligned */
8290 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8291                 u8 *buf)
8292 {
8293         int ret = 0;
8294         u32 pagesize = tp->nvram_pagesize;
8295         u32 pagemask = pagesize - 1;
8296         u32 nvram_cmd;
8297         u8 *tmp;
8298
8299         tmp = kmalloc(pagesize, GFP_KERNEL);
8300         if (tmp == NULL)
8301                 return -ENOMEM;
8302
8303         while (len) {
8304                 int j;
8305                 u32 phy_addr, page_off, size;
8306
8307                 phy_addr = offset & ~pagemask;
8308         
8309                 for (j = 0; j < pagesize; j += 4) {
8310                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8311                                                 (u32 *) (tmp + j))))
8312                                 break;
8313                 }
8314                 if (ret)
8315                         break;
8316
8317                 page_off = offset & pagemask;
8318                 size = pagesize;
8319                 if (len < size)
8320                         size = len;
8321
8322                 len -= size;
8323
8324                 memcpy(tmp + page_off, buf, size);
8325
8326                 offset = offset + (pagesize - page_off);
8327
8328                 tg3_enable_nvram_access(tp);
8329
8330                 /*
8331                  * Before we can erase the flash page, we need
8332                  * to issue a special "write enable" command.
8333                  */
8334                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8335
8336                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8337                         break;
8338
8339                 /* Erase the target page */
8340                 tw32(NVRAM_ADDR, phy_addr);
8341
8342                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8343                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8344
8345                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8346                         break;
8347
8348                 /* Issue another write enable to start the write. */
8349                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8350
8351                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8352                         break;
8353
8354                 for (j = 0; j < pagesize; j += 4) {
8355                         u32 data;
8356
8357                         data = *((u32 *) (tmp + j));
8358                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8359
8360                         tw32(NVRAM_ADDR, phy_addr + j);
8361
8362                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8363                                 NVRAM_CMD_WR;
8364
8365                         if (j == 0)
8366                                 nvram_cmd |= NVRAM_CMD_FIRST;
8367                         else if (j == (pagesize - 4))
8368                                 nvram_cmd |= NVRAM_CMD_LAST;
8369
8370                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8371                                 break;
8372                 }
8373                 if (ret)
8374                         break;
8375         }
8376
8377         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8378         tg3_nvram_exec_cmd(tp, nvram_cmd);
8379
8380         kfree(tmp);
8381
8382         return ret;
8383 }
8384
8385 /* offset and length are dword aligned */
8386 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8387                 u8 *buf)
8388 {
8389         int i, ret = 0;
8390
8391         for (i = 0; i < len; i += 4, offset += 4) {
8392                 u32 data, page_off, phy_addr, nvram_cmd;
8393
8394                 memcpy(&data, buf + i, 4);
8395                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8396
8397                 page_off = offset % tp->nvram_pagesize;
8398
8399                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8400                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8401
8402                         phy_addr = ((offset / tp->nvram_pagesize) <<
8403                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8404                 }
8405                 else {
8406                         phy_addr = offset;
8407                 }
8408
8409                 tw32(NVRAM_ADDR, phy_addr);
8410
8411                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8412
8413                 if ((page_off == 0) || (i == 0))
8414                         nvram_cmd |= NVRAM_CMD_FIRST;
8415                 else if (page_off == (tp->nvram_pagesize - 4))
8416                         nvram_cmd |= NVRAM_CMD_LAST;
8417
8418                 if (i == (len - 4))
8419                         nvram_cmd |= NVRAM_CMD_LAST;
8420
8421                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
8422                         (nvram_cmd & NVRAM_CMD_FIRST)) {
8423
8424                         if ((ret = tg3_nvram_exec_cmd(tp,
8425                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8426                                 NVRAM_CMD_DONE)))
8427
8428                                 break;
8429                 }
8430                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8431                         /* We always do complete word writes to eeprom. */
8432                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8433                 }
8434
8435                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8436                         break;
8437         }
8438         return ret;
8439 }
8440
8441 /* offset and length are dword aligned */
8442 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8443 {
8444         int ret;
8445
8446         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8447                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8448                 return -EINVAL;
8449         }
8450
8451         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8452                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8453                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8454                 udelay(40);
8455         }
8456
8457         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8458                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8459         }
8460         else {
8461                 u32 grc_mode;
8462
8463                 tg3_nvram_lock(tp);
8464
8465                 tg3_enable_nvram_access(tp);
8466                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8467                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8468                         tw32(NVRAM_WRITE1, 0x406);
8469
8470                 grc_mode = tr32(GRC_MODE);
8471                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8472
8473                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8474                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8475
8476                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8477                                 buf);
8478                 }
8479                 else {
8480                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8481                                 buf);
8482                 }
8483
8484                 grc_mode = tr32(GRC_MODE);
8485                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8486
8487                 tg3_disable_nvram_access(tp);
8488                 tg3_nvram_unlock(tp);
8489         }
8490
8491         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8492                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8493                 udelay(40);
8494         }
8495
8496         return ret;
8497 }
8498
8499 struct subsys_tbl_ent {
8500         u16 subsys_vendor, subsys_devid;
8501         u32 phy_id;
8502 };
8503
8504 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8505         /* Broadcom boards. */
8506         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8507         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8508         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8509         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8510         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8511         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8512         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8513         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8514         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8515         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8516         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8517
8518         /* 3com boards. */
8519         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8520         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8521         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8522         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8523         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8524
8525         /* DELL boards. */
8526         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8527         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8528         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8529         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8530
8531         /* Compaq boards. */
8532         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8533         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8534         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
8535         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8536         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8537
8538         /* IBM boards. */
8539         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8540 };
8541
8542 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8543 {
8544         int i;
8545
8546         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8547                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8548                      tp->pdev->subsystem_vendor) &&
8549                     (subsys_id_to_phy_id[i].subsys_devid ==
8550                      tp->pdev->subsystem_device))
8551                         return &subsys_id_to_phy_id[i];
8552         }
8553         return NULL;
8554 }
8555
8556 /* Since this function may be called in D3-hot power state during
8557  * tg3_init_one(), only config cycles are allowed.
8558  */
8559 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8560 {
8561         u32 val;
8562
8563         /* Make sure register accesses (indirect or otherwise)
8564          * will function correctly.
8565          */
8566         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8567                                tp->misc_host_ctrl);
8568
8569         tp->phy_id = PHY_ID_INVALID;
8570         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8571
8572         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8573         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8574                 u32 nic_cfg, led_cfg;
8575                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8576                 int eeprom_phy_serdes = 0;
8577
8578                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8579                 tp->nic_sram_data_cfg = nic_cfg;
8580
8581                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8582                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8583                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8584                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8585                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8586                     (ver > 0) && (ver < 0x100))
8587                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8588
8589                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8590                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8591                         eeprom_phy_serdes = 1;
8592
8593                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8594                 if (nic_phy_id != 0) {
8595                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8596                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8597
8598                         eeprom_phy_id  = (id1 >> 16) << 10;
8599                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
8600                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
8601                 } else
8602                         eeprom_phy_id = 0;
8603
8604                 tp->phy_id = eeprom_phy_id;
8605                 if (eeprom_phy_serdes)
8606                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8607
8608                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8609                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8610                                     SHASTA_EXT_LED_MODE_MASK);
8611                 else
8612                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8613
8614                 switch (led_cfg) {
8615                 default:
8616                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8617                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8618                         break;
8619
8620                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8621                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8622                         break;
8623
8624                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
8625                         tp->led_ctrl = LED_CTRL_MODE_MAC;
8626
8627                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
8628                          * read on some older 5700/5701 bootcode.
8629                          */
8630                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
8631                             ASIC_REV_5700 ||
8632                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
8633                             ASIC_REV_5701)
8634                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8635
8636                         break;
8637
8638                 case SHASTA_EXT_LED_SHARED:
8639                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
8640                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8641                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
8642                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8643                                                  LED_CTRL_MODE_PHY_2);
8644                         break;
8645
8646                 case SHASTA_EXT_LED_MAC:
8647                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
8648                         break;
8649
8650                 case SHASTA_EXT_LED_COMBO:
8651                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
8652                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
8653                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8654                                                  LED_CTRL_MODE_PHY_2);
8655                         break;
8656
8657                 };
8658
8659                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8660                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
8661                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
8662                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8663
8664                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8665                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8666                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
8667                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
8668
8669                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8670                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
8671                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8672                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
8673                 }
8674                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
8675                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
8676
8677                 if (cfg2 & (1 << 17))
8678                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
8679
8680                 /* serdes signal pre-emphasis in register 0x590 set by */
8681                 /* bootcode if bit 18 is set */
8682                 if (cfg2 & (1 << 18))
8683                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8684         }
8685 }
8686
8687 static int __devinit tg3_phy_probe(struct tg3 *tp)
8688 {
8689         u32 hw_phy_id_1, hw_phy_id_2;
8690         u32 hw_phy_id, hw_phy_id_masked;
8691         int err;
8692
8693         /* Reading the PHY ID register can conflict with ASF
8694          * firwmare access to the PHY hardware.
8695          */
8696         err = 0;
8697         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
8698                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
8699         } else {
8700                 /* Now read the physical PHY_ID from the chip and verify
8701                  * that it is sane.  If it doesn't look good, we fall back
8702                  * to either the hard-coded table based PHY_ID and failing
8703                  * that the value found in the eeprom area.
8704                  */
8705                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
8706                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
8707
8708                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
8709                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
8710                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
8711
8712                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
8713         }
8714
8715         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
8716                 tp->phy_id = hw_phy_id;
8717                 if (hw_phy_id_masked == PHY_ID_BCM8002)
8718                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8719         } else {
8720                 if (tp->phy_id != PHY_ID_INVALID) {
8721                         /* Do nothing, phy ID already set up in
8722                          * tg3_get_eeprom_hw_cfg().
8723                          */
8724                 } else {
8725                         struct subsys_tbl_ent *p;
8726
8727                         /* No eeprom signature?  Try the hardcoded
8728                          * subsys device table.
8729                          */
8730                         p = lookup_by_subsys(tp);
8731                         if (!p)
8732                                 return -ENODEV;
8733
8734                         tp->phy_id = p->phy_id;
8735                         if (!tp->phy_id ||
8736                             tp->phy_id == PHY_ID_BCM8002)
8737                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8738                 }
8739         }
8740
8741         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8742             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8743                 u32 bmsr, adv_reg, tg3_ctrl;
8744
8745                 tg3_readphy(tp, MII_BMSR, &bmsr);
8746                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
8747                     (bmsr & BMSR_LSTATUS))
8748                         goto skip_phy_reset;
8749                     
8750                 err = tg3_phy_reset(tp);
8751                 if (err)
8752                         return err;
8753
8754                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
8755                            ADVERTISE_100HALF | ADVERTISE_100FULL |
8756                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
8757                 tg3_ctrl = 0;
8758                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
8759                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
8760                                     MII_TG3_CTRL_ADV_1000_FULL);
8761                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8762                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
8763                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
8764                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
8765                 }
8766
8767                 if (!tg3_copper_is_advertising_all(tp)) {
8768                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8769
8770                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8771                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8772
8773                         tg3_writephy(tp, MII_BMCR,
8774                                      BMCR_ANENABLE | BMCR_ANRESTART);
8775                 }
8776                 tg3_phy_set_wirespeed(tp);
8777
8778                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
8779                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8780                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
8781         }
8782
8783 skip_phy_reset:
8784         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8785                 err = tg3_init_5401phy_dsp(tp);
8786                 if (err)
8787                         return err;
8788         }
8789
8790         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
8791                 err = tg3_init_5401phy_dsp(tp);
8792         }
8793
8794         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8795                 tp->link_config.advertising =
8796                         (ADVERTISED_1000baseT_Half |
8797                          ADVERTISED_1000baseT_Full |
8798                          ADVERTISED_Autoneg |
8799                          ADVERTISED_FIBRE);
8800         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8801                 tp->link_config.advertising &=
8802                         ~(ADVERTISED_1000baseT_Half |
8803                           ADVERTISED_1000baseT_Full);
8804
8805         return err;
8806 }
8807
8808 static void __devinit tg3_read_partno(struct tg3 *tp)
8809 {
8810         unsigned char vpd_data[256];
8811         int i;
8812
8813         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8814                 /* Sun decided not to put the necessary bits in the
8815                  * NVRAM of their onboard tg3 parts :(
8816                  */
8817                 strcpy(tp->board_part_number, "Sun 570X");
8818                 return;
8819         }
8820
8821         for (i = 0; i < 256; i += 4) {
8822                 u32 tmp;
8823
8824                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
8825                         goto out_not_found;
8826
8827                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
8828                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
8829                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
8830                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
8831         }
8832
8833         /* Now parse and find the part number. */
8834         for (i = 0; i < 256; ) {
8835                 unsigned char val = vpd_data[i];
8836                 int block_end;
8837
8838                 if (val == 0x82 || val == 0x91) {
8839                         i = (i + 3 +
8840                              (vpd_data[i + 1] +
8841                               (vpd_data[i + 2] << 8)));
8842                         continue;
8843                 }
8844
8845                 if (val != 0x90)
8846                         goto out_not_found;
8847
8848                 block_end = (i + 3 +
8849                              (vpd_data[i + 1] +
8850                               (vpd_data[i + 2] << 8)));
8851                 i += 3;
8852                 while (i < block_end) {
8853                         if (vpd_data[i + 0] == 'P' &&
8854                             vpd_data[i + 1] == 'N') {
8855                                 int partno_len = vpd_data[i + 2];
8856
8857                                 if (partno_len > 24)
8858                                         goto out_not_found;
8859
8860                                 memcpy(tp->board_part_number,
8861                                        &vpd_data[i + 3],
8862                                        partno_len);
8863
8864                                 /* Success. */
8865                                 return;
8866                         }
8867                 }
8868
8869                 /* Part number not found. */
8870                 goto out_not_found;
8871         }
8872
8873 out_not_found:
8874         strcpy(tp->board_part_number, "none");
8875 }
8876
8877 #ifdef CONFIG_SPARC64
8878 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
8879 {
8880         struct pci_dev *pdev = tp->pdev;
8881         struct pcidev_cookie *pcp = pdev->sysdata;
8882
8883         if (pcp != NULL) {
8884                 int node = pcp->prom_node;
8885                 u32 venid;
8886                 int err;
8887
8888                 err = prom_getproperty(node, "subsystem-vendor-id",
8889                                        (char *) &venid, sizeof(venid));
8890                 if (err == 0 || err == -1)
8891                         return 0;
8892                 if (venid == PCI_VENDOR_ID_SUN)
8893                         return 1;
8894         }
8895         return 0;
8896 }
8897 #endif
8898
8899 static int __devinit tg3_get_invariants(struct tg3 *tp)
8900 {
8901         static struct pci_device_id write_reorder_chipsets[] = {
8902                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8903                              PCI_DEVICE_ID_INTEL_82801AA_8) },
8904                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8905                              PCI_DEVICE_ID_INTEL_82801AB_8) },
8906                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8907                              PCI_DEVICE_ID_INTEL_82801BA_11) },
8908                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
8909                              PCI_DEVICE_ID_INTEL_82801BA_6) },
8910                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
8911                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
8912                 { },
8913         };
8914         u32 misc_ctrl_reg;
8915         u32 cacheline_sz_reg;
8916         u32 pci_state_reg, grc_misc_cfg;
8917         u32 val;
8918         u16 pci_cmd;
8919         int err;
8920
8921 #ifdef CONFIG_SPARC64
8922         if (tg3_is_sun_570X(tp))
8923                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
8924 #endif
8925
8926         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
8927          * reordering to the mailbox registers done by the host
8928          * controller can cause major troubles.  We read back from
8929          * every mailbox register write to force the writes to be
8930          * posted to the chip in order.
8931          */
8932         if (pci_dev_present(write_reorder_chipsets))
8933                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
8934
8935         /* Force memory write invalidate off.  If we leave it on,
8936          * then on 5700_BX chips we have to enable a workaround.
8937          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
8938          * to match the cacheline size.  The Broadcom driver have this
8939          * workaround but turns MWI off all the times so never uses
8940          * it.  This seems to suggest that the workaround is insufficient.
8941          */
8942         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8943         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
8944         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8945
8946         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
8947          * has the register indirect write enable bit set before
8948          * we try to access any of the MMIO registers.  It is also
8949          * critical that the PCI-X hw workaround situation is decided
8950          * before that as well.
8951          */
8952         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8953                               &misc_ctrl_reg);
8954
8955         tp->pci_chip_rev_id = (misc_ctrl_reg >>
8956                                MISC_HOST_CTRL_CHIPREV_SHIFT);
8957
8958         /* Wrong chip ID in 5752 A0. This code can be removed later
8959          * as A0 is not in production.
8960          */
8961         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
8962                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
8963
8964         /* Find msi capability. */
8965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8966                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
8967
8968         /* Initialize misc host control in PCI block. */
8969         tp->misc_host_ctrl |= (misc_ctrl_reg &
8970                                MISC_HOST_CTRL_CHIPREV);
8971         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8972                                tp->misc_host_ctrl);
8973
8974         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
8975                               &cacheline_sz_reg);
8976
8977         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
8978         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
8979         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
8980         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
8981
8982         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8983             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8984             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8985                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
8986
8987         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
8988             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
8989                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
8990
8991         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8992                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
8993
8994         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
8995             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
8996             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
8997                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
8998
8999         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9000                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9001
9002         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9003             tp->pci_lat_timer < 64) {
9004                 tp->pci_lat_timer = 64;
9005
9006                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9007                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9008                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9009                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9010
9011                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9012                                        cacheline_sz_reg);
9013         }
9014
9015         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9016                               &pci_state_reg);
9017
9018         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9019                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9020
9021                 /* If this is a 5700 BX chipset, and we are in PCI-X
9022                  * mode, enable register write workaround.
9023                  *
9024                  * The workaround is to use indirect register accesses
9025                  * for all chip writes not to mailbox registers.
9026                  */
9027                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9028                         u32 pm_reg;
9029                         u16 pci_cmd;
9030
9031                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9032
9033                         /* The chip can have it's power management PCI config
9034                          * space registers clobbered due to this bug.
9035                          * So explicitly force the chip into D0 here.
9036                          */
9037                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9038                                               &pm_reg);
9039                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9040                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9041                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9042                                                pm_reg);
9043
9044                         /* Also, force SERR#/PERR# in PCI command. */
9045                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9046                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9047                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9048                 }
9049         }
9050
9051         /* Back to back register writes can cause problems on this chip,
9052          * the workaround is to read back all reg writes except those to
9053          * mailbox regs.  See tg3_write_indirect_reg32().
9054          *
9055          * PCI Express 5750_A0 rev chips need this workaround too.
9056          */
9057         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9058             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9059              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9060                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9061
9062         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9063                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9064         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9065                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9066
9067         /* Chip-specific fixup from Broadcom driver */
9068         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9069             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9070                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9071                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9072         }
9073
9074         /* Get eeprom hw config before calling tg3_set_power_state().
9075          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9076          * determined before calling tg3_set_power_state() so that
9077          * we know whether or not to switch out of Vaux power.
9078          * When the flag is set, it means that GPIO1 is used for eeprom
9079          * write protect and also implies that it is a LOM where GPIOs
9080          * are not used to switch power.
9081          */ 
9082         tg3_get_eeprom_hw_cfg(tp);
9083
9084         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9085          * GPIO1 driven high will bring 5700's external PHY out of reset.
9086          * It is also used as eeprom write protect on LOMs.
9087          */
9088         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9089         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9090             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9091                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9092                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9093         /* Unused GPIO3 must be driven as output on 5752 because there
9094          * are no pull-up resistors on unused GPIO pins.
9095          */
9096         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9097                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9098
9099         /* Force the chip into D0. */
9100         err = tg3_set_power_state(tp, 0);
9101         if (err) {
9102                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9103                        pci_name(tp->pdev));
9104                 return err;
9105         }
9106
9107         /* 5700 B0 chips do not support checksumming correctly due
9108          * to hardware bugs.
9109          */
9110         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9111                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9112
9113         /* Pseudo-header checksum is done by hardware logic and not
9114          * the offload processers, so make the chip do the pseudo-
9115          * header checksums on receive.  For transmit it is more
9116          * convenient to do the pseudo-header checksum in software
9117          * as Linux does that on transmit for us in all cases.
9118          */
9119         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9120         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9121
9122         /* Derive initial jumbo mode from MTU assigned in
9123          * ether_setup() via the alloc_etherdev() call
9124          */
9125         if (tp->dev->mtu > ETH_DATA_LEN &&
9126             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)
9127                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9128
9129         /* Determine WakeOnLan speed to use. */
9130         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9131             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9132             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9133             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9134                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9135         } else {
9136                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9137         }
9138
9139         /* A few boards don't want Ethernet@WireSpeed phy feature */
9140         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9141             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9142              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9143              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
9144                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9145
9146         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9147             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9148                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9149         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9150                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9151
9152         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9153                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9154
9155         tp->coalesce_mode = 0;
9156         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9157             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9158                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9159
9160         /* Initialize MAC MI mode, polling disabled. */
9161         tw32_f(MAC_MI_MODE, tp->mi_mode);
9162         udelay(80);
9163
9164         /* Initialize data/descriptor byte/word swapping. */
9165         val = tr32(GRC_MODE);
9166         val &= GRC_MODE_HOST_STACKUP;
9167         tw32(GRC_MODE, val | tp->grc_mode);
9168
9169         tg3_switch_clocks(tp);
9170
9171         /* Clear this out for sanity. */
9172         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9173
9174         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9175                               &pci_state_reg);
9176         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9177             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9178                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9179
9180                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9181                     chiprevid == CHIPREV_ID_5701_B0 ||
9182                     chiprevid == CHIPREV_ID_5701_B2 ||
9183                     chiprevid == CHIPREV_ID_5701_B5) {
9184                         void __iomem *sram_base;
9185
9186                         /* Write some dummy words into the SRAM status block
9187                          * area, see if it reads back correctly.  If the return
9188                          * value is bad, force enable the PCIX workaround.
9189                          */
9190                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9191
9192                         writel(0x00000000, sram_base);
9193                         writel(0x00000000, sram_base + 4);
9194                         writel(0xffffffff, sram_base + 4);
9195                         if (readl(sram_base) != 0x00000000)
9196                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9197                 }
9198         }
9199
9200         udelay(50);
9201         tg3_nvram_init(tp);
9202
9203         grc_misc_cfg = tr32(GRC_MISC_CFG);
9204         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9205
9206         /* Broadcom's driver says that CIOBE multisplit has a bug */
9207 #if 0
9208         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9209             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9210                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9211                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9212         }
9213 #endif
9214         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9215             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9216              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9217                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9218
9219         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9220             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9221                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9222         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9223                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9224                                       HOSTCC_MODE_CLRTICK_TXBD);
9225
9226                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9227                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9228                                        tp->misc_host_ctrl);
9229         }
9230
9231         /* these are limited to 10/100 only */
9232         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9233              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9234             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9235              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9236              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9237               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9238               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9239             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9240              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9241               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9242                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9243
9244         err = tg3_phy_probe(tp);
9245         if (err) {
9246                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9247                        pci_name(tp->pdev), err);
9248                 /* ... but do not return immediately ... */
9249         }
9250
9251         tg3_read_partno(tp);
9252
9253         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9254                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9255         } else {
9256                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9257                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9258                 else
9259                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9260         }
9261
9262         /* 5700 {AX,BX} chips have a broken status block link
9263          * change bit implementation, so we must use the
9264          * status register in those cases.
9265          */
9266         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9267                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9268         else
9269                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9270
9271         /* The led_ctrl is set during tg3_phy_probe, here we might
9272          * have to force the link status polling mechanism based
9273          * upon subsystem IDs.
9274          */
9275         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9276             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9277                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9278                                   TG3_FLAG_USE_LINKCHG_REG);
9279         }
9280
9281         /* For all SERDES we poll the MAC status register. */
9282         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9283                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9284         else
9285                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9286
9287         /* 5700 BX chips need to have their TX producer index mailboxes
9288          * written twice to workaround a bug.
9289          */
9290         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9291                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9292         else
9293                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
9294
9295         /* It seems all chips can get confused if TX buffers
9296          * straddle the 4GB address boundary in some cases.
9297          */
9298         tp->dev->hard_start_xmit = tg3_start_xmit;
9299
9300         tp->rx_offset = 2;
9301         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9302             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9303                 tp->rx_offset = 0;
9304
9305         /* By default, disable wake-on-lan.  User can change this
9306          * using ETHTOOL_SWOL.
9307          */
9308         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9309
9310         return err;
9311 }
9312
9313 #ifdef CONFIG_SPARC64
9314 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9315 {
9316         struct net_device *dev = tp->dev;
9317         struct pci_dev *pdev = tp->pdev;
9318         struct pcidev_cookie *pcp = pdev->sysdata;
9319
9320         if (pcp != NULL) {
9321                 int node = pcp->prom_node;
9322
9323                 if (prom_getproplen(node, "local-mac-address") == 6) {
9324                         prom_getproperty(node, "local-mac-address",
9325                                          dev->dev_addr, 6);
9326                         return 0;
9327                 }
9328         }
9329         return -ENODEV;
9330 }
9331
9332 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9333 {
9334         struct net_device *dev = tp->dev;
9335
9336         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9337         return 0;
9338 }
9339 #endif
9340
9341 static int __devinit tg3_get_device_address(struct tg3 *tp)
9342 {
9343         struct net_device *dev = tp->dev;
9344         u32 hi, lo, mac_offset;
9345
9346 #ifdef CONFIG_SPARC64
9347         if (!tg3_get_macaddr_sparc(tp))
9348                 return 0;
9349 #endif
9350
9351         mac_offset = 0x7c;
9352         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9353              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9354             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9355                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9356                         mac_offset = 0xcc;
9357                 if (tg3_nvram_lock(tp))
9358                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9359                 else
9360                         tg3_nvram_unlock(tp);
9361         }
9362
9363         /* First try to get it from MAC address mailbox. */
9364         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9365         if ((hi >> 16) == 0x484b) {
9366                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9367                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9368
9369                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9370                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9371                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9372                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9373                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9374         }
9375         /* Next, try NVRAM. */
9376         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9377                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9378                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9379                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9380                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9381                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9382                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9383                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9384                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9385         }
9386         /* Finally just fetch it out of the MAC control regs. */
9387         else {
9388                 hi = tr32(MAC_ADDR_0_HIGH);
9389                 lo = tr32(MAC_ADDR_0_LOW);
9390
9391                 dev->dev_addr[5] = lo & 0xff;
9392                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9393                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9394                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9395                 dev->dev_addr[1] = hi & 0xff;
9396                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9397         }
9398
9399         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9400 #ifdef CONFIG_SPARC64
9401                 if (!tg3_get_default_macaddr_sparc(tp))
9402                         return 0;
9403 #endif
9404                 return -EINVAL;
9405         }
9406         return 0;
9407 }
9408
9409 #define BOUNDARY_SINGLE_CACHELINE       1
9410 #define BOUNDARY_MULTI_CACHELINE        2
9411
9412 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9413 {
9414         int cacheline_size;
9415         u8 byte;
9416         int goal;
9417
9418         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9419         if (byte == 0)
9420                 cacheline_size = 1024;
9421         else
9422                 cacheline_size = (int) byte * 4;
9423
9424         /* On 5703 and later chips, the boundary bits have no
9425          * effect.
9426          */
9427         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9428             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9429             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9430                 goto out;
9431
9432 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9433         goal = BOUNDARY_MULTI_CACHELINE;
9434 #else
9435 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9436         goal = BOUNDARY_SINGLE_CACHELINE;
9437 #else
9438         goal = 0;
9439 #endif
9440 #endif
9441
9442         if (!goal)
9443                 goto out;
9444
9445         /* PCI controllers on most RISC systems tend to disconnect
9446          * when a device tries to burst across a cache-line boundary.
9447          * Therefore, letting tg3 do so just wastes PCI bandwidth.
9448          *
9449          * Unfortunately, for PCI-E there are only limited
9450          * write-side controls for this, and thus for reads
9451          * we will still get the disconnects.  We'll also waste
9452          * these PCI cycles for both read and write for chips
9453          * other than 5700 and 5701 which do not implement the
9454          * boundary bits.
9455          */
9456         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9457             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9458                 switch (cacheline_size) {
9459                 case 16:
9460                 case 32:
9461                 case 64:
9462                 case 128:
9463                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9464                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9465                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9466                         } else {
9467                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9468                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9469                         }
9470                         break;
9471
9472                 case 256:
9473                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9474                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9475                         break;
9476
9477                 default:
9478                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9479                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9480                         break;
9481                 };
9482         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9483                 switch (cacheline_size) {
9484                 case 16:
9485                 case 32:
9486                 case 64:
9487                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9488                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9489                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9490                                 break;
9491                         }
9492                         /* fallthrough */
9493                 case 128:
9494                 default:
9495                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9496                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9497                         break;
9498                 };
9499         } else {
9500                 switch (cacheline_size) {
9501                 case 16:
9502                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9503                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9504                                         DMA_RWCTRL_WRITE_BNDRY_16);
9505                                 break;
9506                         }
9507                         /* fallthrough */
9508                 case 32:
9509                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9510                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9511                                         DMA_RWCTRL_WRITE_BNDRY_32);
9512                                 break;
9513                         }
9514                         /* fallthrough */
9515                 case 64:
9516                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9517                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9518                                         DMA_RWCTRL_WRITE_BNDRY_64);
9519                                 break;
9520                         }
9521                         /* fallthrough */
9522                 case 128:
9523                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9524                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9525                                         DMA_RWCTRL_WRITE_BNDRY_128);
9526                                 break;
9527                         }
9528                         /* fallthrough */
9529                 case 256:
9530                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
9531                                 DMA_RWCTRL_WRITE_BNDRY_256);
9532                         break;
9533                 case 512:
9534                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
9535                                 DMA_RWCTRL_WRITE_BNDRY_512);
9536                         break;
9537                 case 1024:
9538                 default:
9539                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9540                                 DMA_RWCTRL_WRITE_BNDRY_1024);
9541                         break;
9542                 };
9543         }
9544
9545 out:
9546         return val;
9547 }
9548
9549 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
9550 {
9551         struct tg3_internal_buffer_desc test_desc;
9552         u32 sram_dma_descs;
9553         int i, ret;
9554
9555         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
9556
9557         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
9558         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
9559         tw32(RDMAC_STATUS, 0);
9560         tw32(WDMAC_STATUS, 0);
9561
9562         tw32(BUFMGR_MODE, 0);
9563         tw32(FTQ_RESET, 0);
9564
9565         test_desc.addr_hi = ((u64) buf_dma) >> 32;
9566         test_desc.addr_lo = buf_dma & 0xffffffff;
9567         test_desc.nic_mbuf = 0x00002100;
9568         test_desc.len = size;
9569
9570         /*
9571          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
9572          * the *second* time the tg3 driver was getting loaded after an
9573          * initial scan.
9574          *
9575          * Broadcom tells me:
9576          *   ...the DMA engine is connected to the GRC block and a DMA
9577          *   reset may affect the GRC block in some unpredictable way...
9578          *   The behavior of resets to individual blocks has not been tested.
9579          *
9580          * Broadcom noted the GRC reset will also reset all sub-components.
9581          */
9582         if (to_device) {
9583                 test_desc.cqid_sqid = (13 << 8) | 2;
9584
9585                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
9586                 udelay(40);
9587         } else {
9588                 test_desc.cqid_sqid = (16 << 8) | 7;
9589
9590                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
9591                 udelay(40);
9592         }
9593         test_desc.flags = 0x00000005;
9594
9595         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
9596                 u32 val;
9597
9598                 val = *(((u32 *)&test_desc) + i);
9599                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
9600                                        sram_dma_descs + (i * sizeof(u32)));
9601                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
9602         }
9603         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
9604
9605         if (to_device) {
9606                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
9607         } else {
9608                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
9609         }
9610
9611         ret = -ENODEV;
9612         for (i = 0; i < 40; i++) {
9613                 u32 val;
9614
9615                 if (to_device)
9616                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
9617                 else
9618                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
9619                 if ((val & 0xffff) == sram_dma_descs) {
9620                         ret = 0;
9621                         break;
9622                 }
9623
9624                 udelay(100);
9625         }
9626
9627         return ret;
9628 }
9629
9630 #define TEST_BUFFER_SIZE        0x2000
9631
9632 static int __devinit tg3_test_dma(struct tg3 *tp)
9633 {
9634         dma_addr_t buf_dma;
9635         u32 *buf, saved_dma_rwctrl;
9636         int ret;
9637
9638         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
9639         if (!buf) {
9640                 ret = -ENOMEM;
9641                 goto out_nofree;
9642         }
9643
9644         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
9645                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
9646
9647         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
9648
9649         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9650                 /* DMA read watermark not used on PCIE */
9651                 tp->dma_rwctrl |= 0x00180000;
9652         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
9653                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
9654                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
9655                         tp->dma_rwctrl |= 0x003f0000;
9656                 else
9657                         tp->dma_rwctrl |= 0x003f000f;
9658         } else {
9659                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9660                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9661                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
9662
9663                         if (ccval == 0x6 || ccval == 0x7)
9664                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
9665
9666                         /* Set bit 23 to enable PCIX hw bug fix */
9667                         tp->dma_rwctrl |= 0x009f0000;
9668                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9669                         /* 5780 always in PCIX mode */
9670                         tp->dma_rwctrl |= 0x00144000;
9671                 } else {
9672                         tp->dma_rwctrl |= 0x001b000f;
9673                 }
9674         }
9675
9676         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9677             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9678                 tp->dma_rwctrl &= 0xfffffff0;
9679
9680         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
9682                 /* Remove this if it causes problems for some boards. */
9683                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
9684
9685                 /* On 5700/5701 chips, we need to set this bit.
9686                  * Otherwise the chip will issue cacheline transactions
9687                  * to streamable DMA memory with not all the byte
9688                  * enables turned on.  This is an error on several
9689                  * RISC PCI controllers, in particular sparc64.
9690                  *
9691                  * On 5703/5704 chips, this bit has been reassigned
9692                  * a different meaning.  In particular, it is used
9693                  * on those chips to enable a PCI-X workaround.
9694                  */
9695                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
9696         }
9697
9698         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9699
9700 #if 0
9701         /* Unneeded, already done by tg3_get_invariants.  */
9702         tg3_switch_clocks(tp);
9703 #endif
9704
9705         ret = 0;
9706         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9707             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
9708                 goto out;
9709
9710         /* It is best to perform DMA test with maximum write burst size
9711          * to expose the 5700/5701 write DMA bug.
9712          */
9713         saved_dma_rwctrl = tp->dma_rwctrl;
9714         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9715         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9716
9717         while (1) {
9718                 u32 *p = buf, i;
9719
9720                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
9721                         p[i] = i;
9722
9723                 /* Send the buffer to the chip. */
9724                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
9725                 if (ret) {
9726                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
9727                         break;
9728                 }
9729
9730 #if 0
9731                 /* validate data reached card RAM correctly. */
9732                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9733                         u32 val;
9734                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
9735                         if (le32_to_cpu(val) != p[i]) {
9736                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
9737                                 /* ret = -ENODEV here? */
9738                         }
9739                         p[i] = 0;
9740                 }
9741 #endif
9742                 /* Now read it back. */
9743                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
9744                 if (ret) {
9745                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
9746
9747                         break;
9748                 }
9749
9750                 /* Verify it. */
9751                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9752                         if (p[i] == i)
9753                                 continue;
9754
9755                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9756                             DMA_RWCTRL_WRITE_BNDRY_16) {
9757                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9758                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
9759                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9760                                 break;
9761                         } else {
9762                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
9763                                 ret = -ENODEV;
9764                                 goto out;
9765                         }
9766                 }
9767
9768                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
9769                         /* Success. */
9770                         ret = 0;
9771                         break;
9772                 }
9773         }
9774         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9775             DMA_RWCTRL_WRITE_BNDRY_16) {
9776                 static struct pci_device_id dma_wait_state_chipsets[] = {
9777                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
9778                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
9779                         { },
9780                 };
9781
9782                 /* DMA test passed without adjusting DMA boundary,
9783                  * now look for chipsets that are known to expose the
9784                  * DMA bug without failing the test.
9785                  */
9786                 if (pci_dev_present(dma_wait_state_chipsets)) {
9787                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9788                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
9789                 }
9790                 else
9791                         /* Safe to use the calculated DMA boundary. */
9792                         tp->dma_rwctrl = saved_dma_rwctrl;
9793
9794                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9795         }
9796
9797 out:
9798         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
9799 out_nofree:
9800         return ret;
9801 }
9802
9803 static void __devinit tg3_init_link_config(struct tg3 *tp)
9804 {
9805         tp->link_config.advertising =
9806                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
9807                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
9808                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
9809                  ADVERTISED_Autoneg | ADVERTISED_MII);
9810         tp->link_config.speed = SPEED_INVALID;
9811         tp->link_config.duplex = DUPLEX_INVALID;
9812         tp->link_config.autoneg = AUTONEG_ENABLE;
9813         netif_carrier_off(tp->dev);
9814         tp->link_config.active_speed = SPEED_INVALID;
9815         tp->link_config.active_duplex = DUPLEX_INVALID;
9816         tp->link_config.phy_is_low_power = 0;
9817         tp->link_config.orig_speed = SPEED_INVALID;
9818         tp->link_config.orig_duplex = DUPLEX_INVALID;
9819         tp->link_config.orig_autoneg = AUTONEG_INVALID;
9820 }
9821
9822 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
9823 {
9824         tp->bufmgr_config.mbuf_read_dma_low_water =
9825                 DEFAULT_MB_RDMA_LOW_WATER;
9826         tp->bufmgr_config.mbuf_mac_rx_low_water =
9827                 DEFAULT_MB_MACRX_LOW_WATER;
9828         tp->bufmgr_config.mbuf_high_water =
9829                 DEFAULT_MB_HIGH_WATER;
9830
9831         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
9832                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
9833         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
9834                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
9835         tp->bufmgr_config.mbuf_high_water_jumbo =
9836                 DEFAULT_MB_HIGH_WATER_JUMBO;
9837
9838         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
9839         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
9840 }
9841
9842 static char * __devinit tg3_phy_string(struct tg3 *tp)
9843 {
9844         switch (tp->phy_id & PHY_ID_MASK) {
9845         case PHY_ID_BCM5400:    return "5400";
9846         case PHY_ID_BCM5401:    return "5401";
9847         case PHY_ID_BCM5411:    return "5411";
9848         case PHY_ID_BCM5701:    return "5701";
9849         case PHY_ID_BCM5703:    return "5703";
9850         case PHY_ID_BCM5704:    return "5704";
9851         case PHY_ID_BCM5705:    return "5705";
9852         case PHY_ID_BCM5750:    return "5750";
9853         case PHY_ID_BCM5752:    return "5752";
9854         case PHY_ID_BCM5780:    return "5780";
9855         case PHY_ID_BCM8002:    return "8002/serdes";
9856         case 0:                 return "serdes";
9857         default:                return "unknown";
9858         };
9859 }
9860
9861 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9862 {
9863         struct pci_dev *peer;
9864         unsigned int func, devnr = tp->pdev->devfn & ~7;
9865
9866         for (func = 0; func < 8; func++) {
9867                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
9868                 if (peer && peer != tp->pdev)
9869                         break;
9870                 pci_dev_put(peer);
9871         }
9872         if (!peer || peer == tp->pdev)
9873                 BUG();
9874
9875         /*
9876          * We don't need to keep the refcount elevated; there's no way
9877          * to remove one half of this device without removing the other
9878          */
9879         pci_dev_put(peer);
9880
9881         return peer;
9882 }
9883
9884 static void __devinit tg3_init_coal(struct tg3 *tp)
9885 {
9886         struct ethtool_coalesce *ec = &tp->coal;
9887
9888         memset(ec, 0, sizeof(*ec));
9889         ec->cmd = ETHTOOL_GCOALESCE;
9890         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9891         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9892         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9893         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9894         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9895         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9896         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9897         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9898         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9899
9900         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9901                                  HOSTCC_MODE_CLRTICK_TXBD)) {
9902                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9903                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9904                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9905                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9906         }
9907
9908         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9909                 ec->rx_coalesce_usecs_irq = 0;
9910                 ec->tx_coalesce_usecs_irq = 0;
9911                 ec->stats_block_coalesce_usecs = 0;
9912         }
9913 }
9914
9915 static int __devinit tg3_init_one(struct pci_dev *pdev,
9916                                   const struct pci_device_id *ent)
9917 {
9918         static int tg3_version_printed = 0;
9919         unsigned long tg3reg_base, tg3reg_len;
9920         struct net_device *dev;
9921         struct tg3 *tp;
9922         int i, err, pci_using_dac, pm_cap;
9923
9924         if (tg3_version_printed++ == 0)
9925                 printk(KERN_INFO "%s", version);
9926
9927         err = pci_enable_device(pdev);
9928         if (err) {
9929                 printk(KERN_ERR PFX "Cannot enable PCI device, "
9930                        "aborting.\n");
9931                 return err;
9932         }
9933
9934         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9935                 printk(KERN_ERR PFX "Cannot find proper PCI device "
9936                        "base address, aborting.\n");
9937                 err = -ENODEV;
9938                 goto err_out_disable_pdev;
9939         }
9940
9941         err = pci_request_regions(pdev, DRV_MODULE_NAME);
9942         if (err) {
9943                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
9944                        "aborting.\n");
9945                 goto err_out_disable_pdev;
9946         }
9947
9948         pci_set_master(pdev);
9949
9950         /* Find power-management capability. */
9951         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9952         if (pm_cap == 0) {
9953                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
9954                        "aborting.\n");
9955                 err = -EIO;
9956                 goto err_out_free_res;
9957         }
9958
9959         /* Configure DMA attributes. */
9960         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
9961         if (!err) {
9962                 pci_using_dac = 1;
9963                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
9964                 if (err < 0) {
9965                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
9966                                "for consistent allocations\n");
9967                         goto err_out_free_res;
9968                 }
9969         } else {
9970                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
9971                 if (err) {
9972                         printk(KERN_ERR PFX "No usable DMA configuration, "
9973                                "aborting.\n");
9974                         goto err_out_free_res;
9975                 }
9976                 pci_using_dac = 0;
9977         }
9978
9979         tg3reg_base = pci_resource_start(pdev, 0);
9980         tg3reg_len = pci_resource_len(pdev, 0);
9981
9982         dev = alloc_etherdev(sizeof(*tp));
9983         if (!dev) {
9984                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
9985                 err = -ENOMEM;
9986                 goto err_out_free_res;
9987         }
9988
9989         SET_MODULE_OWNER(dev);
9990         SET_NETDEV_DEV(dev, &pdev->dev);
9991
9992         if (pci_using_dac)
9993                 dev->features |= NETIF_F_HIGHDMA;
9994         dev->features |= NETIF_F_LLTX;
9995 #if TG3_VLAN_TAG_USED
9996         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9997         dev->vlan_rx_register = tg3_vlan_rx_register;
9998         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
9999 #endif
10000
10001         tp = netdev_priv(dev);
10002         tp->pdev = pdev;
10003         tp->dev = dev;
10004         tp->pm_cap = pm_cap;
10005         tp->mac_mode = TG3_DEF_MAC_MODE;
10006         tp->rx_mode = TG3_DEF_RX_MODE;
10007         tp->tx_mode = TG3_DEF_TX_MODE;
10008         tp->mi_mode = MAC_MI_MODE_BASE;
10009         if (tg3_debug > 0)
10010                 tp->msg_enable = tg3_debug;
10011         else
10012                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10013
10014         /* The word/byte swap controls here control register access byte
10015          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10016          * setting below.
10017          */
10018         tp->misc_host_ctrl =
10019                 MISC_HOST_CTRL_MASK_PCI_INT |
10020                 MISC_HOST_CTRL_WORD_SWAP |
10021                 MISC_HOST_CTRL_INDIR_ACCESS |
10022                 MISC_HOST_CTRL_PCISTATE_RW;
10023
10024         /* The NONFRM (non-frame) byte/word swap controls take effect
10025          * on descriptor entries, anything which isn't packet data.
10026          *
10027          * The StrongARM chips on the board (one for tx, one for rx)
10028          * are running in big-endian mode.
10029          */
10030         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10031                         GRC_MODE_WSWAP_NONFRM_DATA);
10032 #ifdef __BIG_ENDIAN
10033         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10034 #endif
10035         spin_lock_init(&tp->lock);
10036         spin_lock_init(&tp->tx_lock);
10037         spin_lock_init(&tp->indirect_lock);
10038         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10039
10040         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10041         if (tp->regs == 0UL) {
10042                 printk(KERN_ERR PFX "Cannot map device registers, "
10043                        "aborting.\n");
10044                 err = -ENOMEM;
10045                 goto err_out_free_dev;
10046         }
10047
10048         tg3_init_link_config(tp);
10049
10050         tg3_init_bufmgr_config(tp);
10051
10052         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10053         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10054         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10055
10056         dev->open = tg3_open;
10057         dev->stop = tg3_close;
10058         dev->get_stats = tg3_get_stats;
10059         dev->set_multicast_list = tg3_set_rx_mode;
10060         dev->set_mac_address = tg3_set_mac_addr;
10061         dev->do_ioctl = tg3_ioctl;
10062         dev->tx_timeout = tg3_tx_timeout;
10063         dev->poll = tg3_poll;
10064         dev->ethtool_ops = &tg3_ethtool_ops;
10065         dev->weight = 64;
10066         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10067         dev->change_mtu = tg3_change_mtu;
10068         dev->irq = pdev->irq;
10069 #ifdef CONFIG_NET_POLL_CONTROLLER
10070         dev->poll_controller = tg3_poll_controller;
10071 #endif
10072
10073         err = tg3_get_invariants(tp);
10074         if (err) {
10075                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10076                        "aborting.\n");
10077                 goto err_out_iounmap;
10078         }
10079
10080         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10081                 tp->bufmgr_config.mbuf_read_dma_low_water =
10082                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10083                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10084                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10085                 tp->bufmgr_config.mbuf_high_water =
10086                         DEFAULT_MB_HIGH_WATER_5705;
10087         }
10088
10089 #if TG3_TSO_SUPPORT != 0
10090         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10091                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10092         }
10093         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10094             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10095             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10096             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10097                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10098         } else {
10099                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10100         }
10101
10102         /* TSO is off by default, user can enable using ethtool.  */
10103 #if 0
10104         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10105                 dev->features |= NETIF_F_TSO;
10106 #endif
10107
10108 #endif
10109
10110         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10111             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10112             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10113                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10114                 tp->rx_pending = 63;
10115         }
10116
10117         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10118                 tp->pdev_peer = tg3_find_5704_peer(tp);
10119
10120         err = tg3_get_device_address(tp);
10121         if (err) {
10122                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10123                        "aborting.\n");
10124                 goto err_out_iounmap;
10125         }
10126
10127         /*
10128          * Reset chip in case UNDI or EFI driver did not shutdown
10129          * DMA self test will enable WDMAC and we'll see (spurious)
10130          * pending DMA on the PCI bus at that point.
10131          */
10132         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10133             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10134                 pci_save_state(tp->pdev);
10135                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10136                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10137         }
10138
10139         err = tg3_test_dma(tp);
10140         if (err) {
10141                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10142                 goto err_out_iounmap;
10143         }
10144
10145         /* Tigon3 can do ipv4 only... and some chips have buggy
10146          * checksumming.
10147          */
10148         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10149                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10150                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10151         } else
10152                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10153
10154         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10155                 dev->features &= ~NETIF_F_HIGHDMA;
10156
10157         /* flow control autonegotiation is default behavior */
10158         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10159
10160         tg3_init_coal(tp);
10161
10162         err = register_netdev(dev);
10163         if (err) {
10164                 printk(KERN_ERR PFX "Cannot register net device, "
10165                        "aborting.\n");
10166                 goto err_out_iounmap;
10167         }
10168
10169         pci_set_drvdata(pdev, dev);
10170
10171         /* Now that we have fully setup the chip, save away a snapshot
10172          * of the PCI config space.  We need to restore this after
10173          * GRC_MISC_CFG core clock resets and some resume events.
10174          */
10175         pci_save_state(tp->pdev);
10176
10177         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
10178                dev->name,
10179                tp->board_part_number,
10180                tp->pci_chip_rev_id,
10181                tg3_phy_string(tp),
10182                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
10183                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10184                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10185                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10186                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10187                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10188
10189         for (i = 0; i < 6; i++)
10190                 printk("%2.2x%c", dev->dev_addr[i],
10191                        i == 5 ? '\n' : ':');
10192
10193         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10194                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10195                "TSOcap[%d] \n",
10196                dev->name,
10197                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10198                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10199                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10200                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10201                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10202                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10203                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10204         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10205                dev->name, tp->dma_rwctrl);
10206
10207         return 0;
10208
10209 err_out_iounmap:
10210         iounmap(tp->regs);
10211
10212 err_out_free_dev:
10213         free_netdev(dev);
10214
10215 err_out_free_res:
10216         pci_release_regions(pdev);
10217
10218 err_out_disable_pdev:
10219         pci_disable_device(pdev);
10220         pci_set_drvdata(pdev, NULL);
10221         return err;
10222 }
10223
10224 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10225 {
10226         struct net_device *dev = pci_get_drvdata(pdev);
10227
10228         if (dev) {
10229                 struct tg3 *tp = netdev_priv(dev);
10230
10231                 unregister_netdev(dev);
10232                 iounmap(tp->regs);
10233                 free_netdev(dev);
10234                 pci_release_regions(pdev);
10235                 pci_disable_device(pdev);
10236                 pci_set_drvdata(pdev, NULL);
10237         }
10238 }
10239
10240 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10241 {
10242         struct net_device *dev = pci_get_drvdata(pdev);
10243         struct tg3 *tp = netdev_priv(dev);
10244         int err;
10245
10246         if (!netif_running(dev))
10247                 return 0;
10248
10249         tg3_netif_stop(tp);
10250
10251         del_timer_sync(&tp->timer);
10252
10253         tg3_full_lock(tp, 1);
10254         tg3_disable_ints(tp);
10255         tg3_full_unlock(tp);
10256
10257         netif_device_detach(dev);
10258
10259         tg3_full_lock(tp, 0);
10260         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10261         tg3_full_unlock(tp);
10262
10263         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10264         if (err) {
10265                 tg3_full_lock(tp, 0);
10266
10267                 tg3_init_hw(tp);
10268
10269                 tp->timer.expires = jiffies + tp->timer_offset;
10270                 add_timer(&tp->timer);
10271
10272                 netif_device_attach(dev);
10273                 tg3_netif_start(tp);
10274
10275                 tg3_full_unlock(tp);
10276         }
10277
10278         return err;
10279 }
10280
10281 static int tg3_resume(struct pci_dev *pdev)
10282 {
10283         struct net_device *dev = pci_get_drvdata(pdev);
10284         struct tg3 *tp = netdev_priv(dev);
10285         int err;
10286
10287         if (!netif_running(dev))
10288                 return 0;
10289
10290         pci_restore_state(tp->pdev);
10291
10292         err = tg3_set_power_state(tp, 0);
10293         if (err)
10294                 return err;
10295
10296         netif_device_attach(dev);
10297
10298         tg3_full_lock(tp, 0);
10299
10300         tg3_init_hw(tp);
10301
10302         tp->timer.expires = jiffies + tp->timer_offset;
10303         add_timer(&tp->timer);
10304
10305         tg3_netif_start(tp);
10306
10307         tg3_full_unlock(tp);
10308
10309         return 0;
10310 }
10311
10312 static struct pci_driver tg3_driver = {
10313         .name           = DRV_MODULE_NAME,
10314         .id_table       = tg3_pci_tbl,
10315         .probe          = tg3_init_one,
10316         .remove         = __devexit_p(tg3_remove_one),
10317         .suspend        = tg3_suspend,
10318         .resume         = tg3_resume
10319 };
10320
10321 static int __init tg3_init(void)
10322 {
10323         return pci_module_init(&tg3_driver);
10324 }
10325
10326 static void __exit tg3_cleanup(void)
10327 {
10328         pci_unregister_driver(&tg3_driver);
10329 }
10330
10331 module_init(tg3_init);
10332 module_exit(tg3_cleanup);