]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/atl1/atl1_main.c
atl1: bump version number
[linux-2.6-omap-h63xx.git] / drivers / net / atl1 / atl1_main.c
1 /*
2  * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3  * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
4  * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5  *
6  * Derived from Intel e1000 driver
7  * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along with
20  * this program; if not, write to the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
22  *
23  * The full GNU General Public License is included in this distribution in the
24  * file called COPYING.
25  *
26  * Contact Information:
27  * Xiong Huang <xiong_huang@attansic.com>
28  * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei,
29  * Xinzhu  302, TAIWAN, REPUBLIC OF CHINA
30  *
31  * Chris Snook <csnook@redhat.com>
32  * Jay Cliburn <jcliburn@gmail.com>
33  *
34  * This version is adapted from the Attansic reference driver for
35  * inclusion in the Linux kernel.  It is currently under heavy development.
36  * A very incomplete list of things that need to be dealt with:
37  *
38  * TODO:
39  * Fix TSO; tx performance is horrible with TSO enabled.
40  * Wake on LAN.
41  * Add more ethtool functions, including set ring parameters.
42  * Fix abstruse irq enable/disable condition described here:
43  *      http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
44  *
45  * NEEDS TESTING:
46  * VLAN
47  * multicast
48  * promiscuous mode
49  * interrupt coalescing
50  * SMP torture testing
51  */
52
53 #include <linux/types.h>
54 #include <linux/netdevice.h>
55 #include <linux/pci.h>
56 #include <linux/spinlock.h>
57 #include <linux/slab.h>
58 #include <linux/string.h>
59 #include <linux/skbuff.h>
60 #include <linux/etherdevice.h>
61 #include <linux/if_vlan.h>
62 #include <linux/irqreturn.h>
63 #include <linux/workqueue.h>
64 #include <linux/timer.h>
65 #include <linux/jiffies.h>
66 #include <linux/hardirq.h>
67 #include <linux/interrupt.h>
68 #include <linux/irqflags.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/net.h>
71 #include <linux/pm.h>
72 #include <linux/in.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <linux/compiler.h>
76 #include <linux/delay.h>
77 #include <linux/mii.h>
78 #include <net/checksum.h>
79
80 #include <asm/atomic.h>
81 #include <asm/byteorder.h>
82
83 #include "atl1.h"
84
85 #define DRIVER_VERSION "2.0.7"
86
87 char atl1_driver_name[] = "atl1";
88 static const char atl1_driver_string[] = "Attansic L1 Ethernet Network Driver";
89 static const char atl1_copyright[] = "Copyright(c) 2005-2006 Attansic Corporation.";
90 char atl1_driver_version[] = DRIVER_VERSION;
91
92 MODULE_AUTHOR
93     ("Attansic Corporation <xiong_huang@attansic.com>, Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
94 MODULE_DESCRIPTION("Attansic 1000M Ethernet Network Driver");
95 MODULE_LICENSE("GPL");
96 MODULE_VERSION(DRIVER_VERSION);
97
98 /*
99  * atl1_pci_tbl - PCI Device ID Table
100  */
101 static const struct pci_device_id atl1_pci_tbl[] = {
102         {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
103         /* required last entry */
104         {0,}
105 };
106
107 MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
108
109 /*
110  * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
111  * @adapter: board private structure to initialize
112  *
113  * atl1_sw_init initializes the Adapter private data structure.
114  * Fields are initialized based on PCI device information and
115  * OS network device settings (MTU size).
116  */
117 static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
118 {
119         struct atl1_hw *hw = &adapter->hw;
120         struct net_device *netdev = adapter->netdev;
121         struct pci_dev *pdev = adapter->pdev;
122
123         /* PCI config space info */
124         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
125
126         hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
127         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
128
129         adapter->wol = 0;
130         adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
131         adapter->ict = 50000;   /* 100ms */
132         adapter->link_speed = SPEED_0;  /* hardware init */
133         adapter->link_duplex = FULL_DUPLEX;
134
135         hw->phy_configured = false;
136         hw->preamble_len = 7;
137         hw->ipgt = 0x60;
138         hw->min_ifg = 0x50;
139         hw->ipgr1 = 0x40;
140         hw->ipgr2 = 0x60;
141         hw->max_retry = 0xf;
142         hw->lcol = 0x37;
143         hw->jam_ipg = 7;
144         hw->rfd_burst = 8;
145         hw->rrd_burst = 8;
146         hw->rfd_fetch_gap = 1;
147         hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
148         hw->rx_jumbo_lkah = 1;
149         hw->rrd_ret_timer = 16;
150         hw->tpd_burst = 4;
151         hw->tpd_fetch_th = 16;
152         hw->txf_burst = 0x100;
153         hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
154         hw->tpd_fetch_gap = 1;
155         hw->rcb_value = atl1_rcb_64;
156         hw->dma_ord = atl1_dma_ord_enh;
157         hw->dmar_block = atl1_dma_req_256;
158         hw->dmaw_block = atl1_dma_req_256;
159         hw->cmb_rrd = 4;
160         hw->cmb_tpd = 4;
161         hw->cmb_rx_timer = 1;   /* about 2us */
162         hw->cmb_tx_timer = 1;   /* about 2us */
163         hw->smb_timer = 100000; /* about 200ms */
164
165         atomic_set(&adapter->irq_sem, 0);
166         spin_lock_init(&adapter->lock);
167         spin_lock_init(&adapter->mb_lock);
168
169         return 0;
170 }
171
172 /*
173  * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
174  * @adapter: board private structure
175  *
176  * Return 0 on success, negative on failure
177  */
178 s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
179 {
180         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
181         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
182         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
183         struct atl1_ring_header *ring_header = &adapter->ring_header;
184         struct pci_dev *pdev = adapter->pdev;
185         int size;
186         u8 offset = 0;
187
188         size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
189         tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
190         if (unlikely(!tpd_ring->buffer_info)) {
191                 printk(KERN_WARNING "%s: kzalloc failed , size = D%d\n",
192                         atl1_driver_name, size);
193                 goto err_nomem;
194         }
195         rfd_ring->buffer_info =
196             (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
197
198         /* real ring DMA buffer */
199         ring_header->size = size = sizeof(struct tx_packet_desc) *
200                                         tpd_ring->count
201             + sizeof(struct rx_free_desc) * rfd_ring->count
202             + sizeof(struct rx_return_desc) * rrd_ring->count
203             + sizeof(struct coals_msg_block)
204             + sizeof(struct stats_msg_block)
205             + 40;               /* "40: for 8 bytes align" huh? -- CHS */
206
207         ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
208                                                 &ring_header->dma);
209         if (unlikely(!ring_header->desc)) {
210                 printk(KERN_WARNING
211                         "%s: pci_alloc_consistent failed, size = D%d\n",
212                         atl1_driver_name, size);
213                 goto err_nomem;
214         }
215
216         memset(ring_header->desc, 0, ring_header->size);
217
218         /* init TPD ring */
219         tpd_ring->dma = ring_header->dma;
220         offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
221         tpd_ring->dma += offset;
222         tpd_ring->desc = (u8 *) ring_header->desc + offset;
223         tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
224         atomic_set(&tpd_ring->next_to_use, 0);
225         atomic_set(&tpd_ring->next_to_clean, 0);
226
227         /* init RFD ring */
228         rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
229         offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
230         rfd_ring->dma += offset;
231         rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
232         rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
233         rfd_ring->next_to_clean = 0;
234         /* rfd_ring->next_to_use = rfd_ring->count - 1; */
235         atomic_set(&rfd_ring->next_to_use, 0);
236
237         /* init RRD ring */
238         rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
239         offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
240         rrd_ring->dma += offset;
241         rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
242         rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
243         rrd_ring->next_to_use = 0;
244         atomic_set(&rrd_ring->next_to_clean, 0);
245
246         /* init CMB */
247         adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
248         offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
249         adapter->cmb.dma += offset;
250         adapter->cmb.cmb =
251             (struct coals_msg_block *) ((u8 *) rrd_ring->desc +
252                                    (rrd_ring->size + offset));
253
254         /* init SMB */
255         adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
256         offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
257         adapter->smb.dma += offset;
258         adapter->smb.smb = (struct stats_msg_block *)
259             ((u8 *) adapter->cmb.cmb + (sizeof(struct coals_msg_block) + offset));
260
261         return ATL1_SUCCESS;
262
263 err_nomem:
264         kfree(tpd_ring->buffer_info);
265         return -ENOMEM;
266 }
267
268 /*
269  * atl1_irq_enable - Enable default interrupt generation settings
270  * @adapter: board private structure
271  */
272 static void atl1_irq_enable(struct atl1_adapter *adapter)
273 {
274         if (likely(!atomic_dec_and_test(&adapter->irq_sem)))
275                 iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
276 }
277
278 static void atl1_clear_phy_int(struct atl1_adapter *adapter)
279 {
280         u16 phy_data;
281         unsigned long flags;
282
283         spin_lock_irqsave(&adapter->lock, flags);
284         atl1_read_phy_reg(&adapter->hw, 19, &phy_data);
285         spin_unlock_irqrestore(&adapter->lock, flags);
286 }
287
288 static void atl1_inc_smb(struct atl1_adapter *adapter)
289 {
290         struct stats_msg_block *smb = adapter->smb.smb;
291
292         /* Fill out the OS statistics structure */
293         adapter->soft_stats.rx_packets += smb->rx_ok;
294         adapter->soft_stats.tx_packets += smb->tx_ok;
295         adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
296         adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
297         adapter->soft_stats.multicast += smb->rx_mcast;
298         adapter->soft_stats.collisions += (smb->tx_1_col +
299                                            smb->tx_2_col * 2 +
300                                            smb->tx_late_col +
301                                            smb->tx_abort_col *
302                                            adapter->hw.max_retry);
303
304         /* Rx Errors */
305         adapter->soft_stats.rx_errors += (smb->rx_frag +
306                                           smb->rx_fcs_err +
307                                           smb->rx_len_err +
308                                           smb->rx_sz_ov +
309                                           smb->rx_rxf_ov +
310                                           smb->rx_rrd_ov + smb->rx_align_err);
311         adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
312         adapter->soft_stats.rx_length_errors += smb->rx_len_err;
313         adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
314         adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
315         adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
316                                                  smb->rx_rxf_ov);
317
318         adapter->soft_stats.rx_pause += smb->rx_pause;
319         adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
320         adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
321
322         /* Tx Errors */
323         adapter->soft_stats.tx_errors += (smb->tx_late_col +
324                                           smb->tx_abort_col +
325                                           smb->tx_underrun + smb->tx_trunc);
326         adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
327         adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
328         adapter->soft_stats.tx_window_errors += smb->tx_late_col;
329
330         adapter->soft_stats.excecol += smb->tx_abort_col;
331         adapter->soft_stats.deffer += smb->tx_defer;
332         adapter->soft_stats.scc += smb->tx_1_col;
333         adapter->soft_stats.mcc += smb->tx_2_col;
334         adapter->soft_stats.latecol += smb->tx_late_col;
335         adapter->soft_stats.tx_underun += smb->tx_underrun;
336         adapter->soft_stats.tx_trunc += smb->tx_trunc;
337         adapter->soft_stats.tx_pause += smb->tx_pause;
338
339         adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets;
340         adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets;
341         adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes;
342         adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes;
343         adapter->net_stats.multicast = adapter->soft_stats.multicast;
344         adapter->net_stats.collisions = adapter->soft_stats.collisions;
345         adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors;
346         adapter->net_stats.rx_over_errors =
347             adapter->soft_stats.rx_missed_errors;
348         adapter->net_stats.rx_length_errors =
349             adapter->soft_stats.rx_length_errors;
350         adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
351         adapter->net_stats.rx_frame_errors =
352             adapter->soft_stats.rx_frame_errors;
353         adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
354         adapter->net_stats.rx_missed_errors =
355             adapter->soft_stats.rx_missed_errors;
356         adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors;
357         adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
358         adapter->net_stats.tx_aborted_errors =
359             adapter->soft_stats.tx_aborted_errors;
360         adapter->net_stats.tx_window_errors =
361             adapter->soft_stats.tx_window_errors;
362         adapter->net_stats.tx_carrier_errors =
363             adapter->soft_stats.tx_carrier_errors;
364 }
365
366 static void atl1_rx_checksum(struct atl1_adapter *adapter,
367                                         struct rx_return_desc *rrd,
368                                         struct sk_buff *skb)
369 {
370         skb->ip_summed = CHECKSUM_NONE;
371
372         if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
373                 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
374                                         ERR_FLAG_CODE | ERR_FLAG_OV)) {
375                         adapter->hw_csum_err++;
376                         printk(KERN_DEBUG "%s: rx checksum error\n",
377                                 atl1_driver_name);
378                         return;
379                 }
380         }
381
382         /* not IPv4 */
383         if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
384                 /* checksum is invalid, but it's not an IPv4 pkt, so ok */
385                 return;
386
387         /* IPv4 packet */
388         if (likely(!(rrd->err_flg &
389                 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
390                 skb->ip_summed = CHECKSUM_UNNECESSARY;
391                 adapter->hw_csum_good++;
392                 return;
393         }
394
395         /* IPv4, but hardware thinks its checksum is wrong */
396         printk(KERN_DEBUG "%s: hw csum wrong pkt_flag:%x, err_flag:%x\n",
397                 atl1_driver_name, rrd->pkt_flg, rrd->err_flg);
398         skb->ip_summed = CHECKSUM_COMPLETE;
399         skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
400         adapter->hw_csum_err++;
401         return;
402 }
403
404 /*
405  * atl1_alloc_rx_buffers - Replace used receive buffers
406  * @adapter: address of board private structure
407  */
408 static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
409 {
410         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
411         struct net_device *netdev = adapter->netdev;
412         struct pci_dev *pdev = adapter->pdev;
413         struct page *page;
414         unsigned long offset;
415         struct atl1_buffer *buffer_info, *next_info;
416         struct sk_buff *skb;
417         u16 num_alloc = 0;
418         u16 rfd_next_to_use, next_next;
419         struct rx_free_desc *rfd_desc;
420
421         next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
422         if (++next_next == rfd_ring->count)
423                 next_next = 0;
424         buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
425         next_info = &rfd_ring->buffer_info[next_next];
426
427         while (!buffer_info->alloced && !next_info->alloced) {
428                 if (buffer_info->skb) {
429                         buffer_info->alloced = 1;
430                         goto next;
431                 }
432
433                 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
434
435                 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
436                 if (unlikely(!skb)) {   /* Better luck next round */
437                         adapter->net_stats.rx_dropped++;
438                         break;
439                 }
440
441                 /*
442                  * Make buffer alignment 2 beyond a 16 byte boundary
443                  * this will result in a 16 byte aligned IP header after
444                  * the 14 byte MAC header is removed
445                  */
446                 skb_reserve(skb, NET_IP_ALIGN);
447                 skb->dev = netdev;
448
449                 buffer_info->alloced = 1;
450                 buffer_info->skb = skb;
451                 buffer_info->length = (u16) adapter->rx_buffer_len;
452                 page = virt_to_page(skb->data);
453                 offset = (unsigned long)skb->data & ~PAGE_MASK;
454                 buffer_info->dma = pci_map_page(pdev, page, offset,
455                                                 adapter->rx_buffer_len,
456                                                 PCI_DMA_FROMDEVICE);
457                 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
458                 rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
459                 rfd_desc->coalese = 0;
460
461 next:
462                 rfd_next_to_use = next_next;
463                 if (unlikely(++next_next == rfd_ring->count))
464                         next_next = 0;
465
466                 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
467                 next_info = &rfd_ring->buffer_info[next_next];
468                 num_alloc++;
469         }
470
471         if (num_alloc) {
472                 /*
473                  * Force memory writes to complete before letting h/w
474                  * know there are new descriptors to fetch.  (Only
475                  * applicable for weak-ordered memory model archs,
476                  * such as IA-64).
477                  */
478                 wmb();
479                 atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
480         }
481         return num_alloc;
482 }
483
484 static void atl1_intr_rx(struct atl1_adapter *adapter)
485 {
486         int i, count;
487         u16 length;
488         u16 rrd_next_to_clean;
489         u32 value;
490         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
491         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
492         struct atl1_buffer *buffer_info;
493         struct rx_return_desc *rrd;
494         struct sk_buff *skb;
495
496         count = 0;
497
498         rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
499
500         while (1) {
501                 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
502                 i = 1;
503                 if (likely(rrd->xsz.valid)) {   /* packet valid */
504 chk_rrd:
505                         /* check rrd status */
506                         if (likely(rrd->num_buf == 1))
507                                 goto rrd_ok;
508
509                         /* rrd seems to be bad */
510                         if (unlikely(i-- > 0)) {
511                                 /* rrd may not be DMAed completely */
512                                 printk(KERN_DEBUG
513                                         "%s: RRD may not be DMAed completely\n",
514                                         atl1_driver_name);
515                                 udelay(1);
516                                 goto chk_rrd;
517                         }
518                         /* bad rrd */
519                         printk(KERN_DEBUG "%s: bad RRD\n", atl1_driver_name);
520                         /* see if update RFD index */
521                         if (rrd->num_buf > 1) {
522                                 u16 num_buf;
523                                 num_buf =
524                                     (rrd->xsz.xsum_sz.pkt_size +
525                                      adapter->rx_buffer_len -
526                                      1) / adapter->rx_buffer_len;
527                                 if (rrd->num_buf == num_buf) {
528                                         /* clean alloc flag for bad rrd */
529                                         while (rfd_ring->next_to_clean !=
530                                                (rrd->buf_indx + num_buf)) {
531                                                 rfd_ring->buffer_info[rfd_ring->
532                                                                       next_to_clean].alloced = 0;
533                                                 if (++rfd_ring->next_to_clean ==
534                                                     rfd_ring->count) {
535                                                         rfd_ring->
536                                                             next_to_clean = 0;
537                                                 }
538                                         }
539                                 }
540                         }
541
542                         /* update rrd */
543                         rrd->xsz.valid = 0;
544                         if (++rrd_next_to_clean == rrd_ring->count)
545                                 rrd_next_to_clean = 0;
546                         count++;
547                         continue;
548                 } else {        /* current rrd still not be updated */
549
550                         break;
551                 }
552 rrd_ok:
553                 /* clean alloc flag for bad rrd */
554                 while (rfd_ring->next_to_clean != rrd->buf_indx) {
555                         rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced =
556                             0;
557                         if (++rfd_ring->next_to_clean == rfd_ring->count)
558                                 rfd_ring->next_to_clean = 0;
559                 }
560
561                 buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
562                 if (++rfd_ring->next_to_clean == rfd_ring->count)
563                         rfd_ring->next_to_clean = 0;
564
565                 /* update rrd next to clean */
566                 if (++rrd_next_to_clean == rrd_ring->count)
567                         rrd_next_to_clean = 0;
568                 count++;
569
570                 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
571                         if (!(rrd->err_flg &
572                                 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
573                                 | ERR_FLAG_LEN))) {
574                                 /* packet error, don't need upstream */
575                                 buffer_info->alloced = 0;
576                                 rrd->xsz.valid = 0;
577                                 continue;
578                         }
579                 }
580
581                 /* Good Receive */
582                 pci_unmap_page(adapter->pdev, buffer_info->dma,
583                                buffer_info->length, PCI_DMA_FROMDEVICE);
584                 skb = buffer_info->skb;
585                 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
586
587                 skb_put(skb, length - ETHERNET_FCS_SIZE);
588
589                 /* Receive Checksum Offload */
590                 atl1_rx_checksum(adapter, rrd, skb);
591                 skb->protocol = eth_type_trans(skb, adapter->netdev);
592
593                 if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) {
594                         u16 vlan_tag = (rrd->vlan_tag >> 4) |
595                                         ((rrd->vlan_tag & 7) << 13) |
596                                         ((rrd->vlan_tag & 8) << 9);
597                         vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
598                 } else
599                         netif_rx(skb);
600
601                 /* let protocol layer free skb */
602                 buffer_info->skb = NULL;
603                 buffer_info->alloced = 0;
604                 rrd->xsz.valid = 0;
605
606                 adapter->netdev->last_rx = jiffies;
607         }
608
609         atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
610
611         atl1_alloc_rx_buffers(adapter);
612
613         /* update mailbox ? */
614         if (count) {
615                 u32 tpd_next_to_use;
616                 u32 rfd_next_to_use;
617                 u32 rrd_next_to_clean;
618
619                 spin_lock(&adapter->mb_lock);
620
621                 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
622                 rfd_next_to_use =
623                     atomic_read(&adapter->rfd_ring.next_to_use);
624                 rrd_next_to_clean =
625                     atomic_read(&adapter->rrd_ring.next_to_clean);
626                 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
627                         MB_RFD_PROD_INDX_SHIFT) |
628                         ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
629                         MB_RRD_CONS_INDX_SHIFT) |
630                         ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
631                         MB_TPD_PROD_INDX_SHIFT);
632                 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
633                 spin_unlock(&adapter->mb_lock);
634         }
635 }
636
637 static void atl1_intr_tx(struct atl1_adapter *adapter)
638 {
639         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
640         struct atl1_buffer *buffer_info;
641         u16 sw_tpd_next_to_clean;
642         u16 cmb_tpd_next_to_clean;
643         u8 update = 0;
644
645         sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
646         cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
647
648         while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
649                 struct tx_packet_desc *tpd;
650                 update = 1;
651                 tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
652                 buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
653                 if (buffer_info->dma) {
654                         pci_unmap_page(adapter->pdev, buffer_info->dma,
655                                        buffer_info->length, PCI_DMA_TODEVICE);
656                         buffer_info->dma = 0;
657                 }
658
659                 if (buffer_info->skb) {
660                         dev_kfree_skb_irq(buffer_info->skb);
661                         buffer_info->skb = NULL;
662                 }
663                 tpd->buffer_addr = 0;
664                 tpd->desc.data = 0;
665
666                 if (++sw_tpd_next_to_clean == tpd_ring->count)
667                         sw_tpd_next_to_clean = 0;
668         }
669         atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
670
671         if (netif_queue_stopped(adapter->netdev)
672             && netif_carrier_ok(adapter->netdev))
673                 netif_wake_queue(adapter->netdev);
674 }
675
676 static void atl1_check_for_link(struct atl1_adapter *adapter)
677 {
678         struct net_device *netdev = adapter->netdev;
679         u16 phy_data = 0;
680
681         spin_lock(&adapter->lock);
682         adapter->phy_timer_pending = false;
683         atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
684         atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
685         spin_unlock(&adapter->lock);
686
687         /* notify upper layer link down ASAP */
688         if (!(phy_data & BMSR_LSTATUS)) {       /* Link Down */
689                 if (netif_carrier_ok(netdev)) { /* old link state: Up */
690                         printk(KERN_INFO "%s: %s link is down\n",
691                                atl1_driver_name, netdev->name);
692                         adapter->link_speed = SPEED_0;
693                         netif_carrier_off(netdev);
694                         netif_stop_queue(netdev);
695                 }
696         }
697         schedule_work(&adapter->link_chg_task);
698 }
699
700 /*
701  * atl1_intr - Interrupt Handler
702  * @irq: interrupt number
703  * @data: pointer to a network interface device structure
704  * @pt_regs: CPU registers structure
705  */
706 static irqreturn_t atl1_intr(int irq, void *data)
707 {
708         /*struct atl1_adapter *adapter = ((struct net_device *)data)->priv;*/
709         struct atl1_adapter *adapter = netdev_priv(data);
710         u32 status;
711         u8 update_rx;
712         int max_ints = 10;
713
714         status = adapter->cmb.cmb->int_stats;
715         if (!status)
716                 return IRQ_NONE;
717
718         update_rx = 0;
719
720         do {
721                 /* clear CMB interrupt status at once */
722                 adapter->cmb.cmb->int_stats = 0;
723
724                 if (status & ISR_GPHY)  /* clear phy status */
725                         atl1_clear_phy_int(adapter);
726
727                 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
728                 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
729
730                 /* check if SMB intr */
731                 if (status & ISR_SMB)
732                         atl1_inc_smb(adapter);
733
734                 /* check if PCIE PHY Link down */
735                 if (status & ISR_PHY_LINKDOWN) {
736                         printk(KERN_DEBUG "%s: pcie phy link down %x\n",
737                                 atl1_driver_name, status);
738                         if (netif_running(adapter->netdev)) {   /* reset MAC */
739                                 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
740                                 schedule_work(&adapter->pcie_dma_to_rst_task);
741                                 return IRQ_HANDLED;
742                         }
743                 }
744
745                 /* check if DMA read/write error ? */
746                 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
747                         printk(KERN_DEBUG
748                                 "%s: pcie DMA r/w error (status = 0x%x)\n",
749                                 atl1_driver_name, status);
750                         iowrite32(0, adapter->hw.hw_addr + REG_IMR);
751                         schedule_work(&adapter->pcie_dma_to_rst_task);
752                         return IRQ_HANDLED;
753                 }
754
755                 /* link event */
756                 if (status & ISR_GPHY) {
757                         adapter->soft_stats.tx_carrier_errors++;
758                         atl1_check_for_link(adapter);
759                 }
760
761                 /* transmit event */
762                 if (status & ISR_CMB_TX)
763                         atl1_intr_tx(adapter);
764
765                 /* rx exception */
766                 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
767                                 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
768                                 ISR_HOST_RRD_OV | ISR_CMB_RX))) {
769                         if (status &
770                             (ISR_RXF_OV | ISR_RFD_UNRUN | ISR_RRD_OV |
771                              ISR_HOST_RFD_UNRUN | ISR_HOST_RRD_OV))
772                                 printk(KERN_INFO
773                                         "%s: rx exception: status = 0x%x\n",
774                                         atl1_driver_name, status);
775                         atl1_intr_rx(adapter);
776                 }
777
778                 if (--max_ints < 0)
779                         break;
780
781         } while ((status = adapter->cmb.cmb->int_stats));
782
783         /* re-enable Interrupt */
784         iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
785         return IRQ_HANDLED;
786 }
787
788 /*
789  * atl1_set_multi - Multicast and Promiscuous mode set
790  * @netdev: network interface device structure
791  *
792  * The set_multi entry point is called whenever the multicast address
793  * list or the network interface flags are updated.  This routine is
794  * responsible for configuring the hardware for proper multicast,
795  * promiscuous mode, and all-multi behavior.
796  */
797 static void atl1_set_multi(struct net_device *netdev)
798 {
799         struct atl1_adapter *adapter = netdev_priv(netdev);
800         struct atl1_hw *hw = &adapter->hw;
801         struct dev_mc_list *mc_ptr;
802         u32 rctl;
803         u32 hash_value;
804
805         /* Check for Promiscuous and All Multicast modes */
806         rctl = ioread32(hw->hw_addr + REG_MAC_CTRL);
807         if (netdev->flags & IFF_PROMISC)
808                 rctl |= MAC_CTRL_PROMIS_EN;
809         else if (netdev->flags & IFF_ALLMULTI) {
810                 rctl |= MAC_CTRL_MC_ALL_EN;
811                 rctl &= ~MAC_CTRL_PROMIS_EN;
812         } else
813                 rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
814
815         iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL);
816
817         /* clear the old settings from the multicast hash table */
818         iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
819         iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
820
821         /* compute mc addresses' hash value ,and put it into hash table */
822         for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
823                 hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr);
824                 atl1_hash_set(hw, hash_value);
825         }
826 }
827
828 static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
829 {
830         u32 value;
831         struct atl1_hw *hw = &adapter->hw;
832         struct net_device *netdev = adapter->netdev;
833         /* Config MAC CTRL Register */
834         value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
835         /* duplex */
836         if (FULL_DUPLEX == adapter->link_duplex)
837                 value |= MAC_CTRL_DUPLX;
838         /* speed */
839         value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
840                          MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
841                   MAC_CTRL_SPEED_SHIFT);
842         /* flow control */
843         value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
844         /* PAD & CRC */
845         value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
846         /* preamble length */
847         value |= (((u32) adapter->hw.preamble_len
848                    & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
849         /* vlan */
850         if (adapter->vlgrp)
851                 value |= MAC_CTRL_RMV_VLAN;
852         /* rx checksum
853            if (adapter->rx_csum)
854            value |= MAC_CTRL_RX_CHKSUM_EN;
855          */
856         /* filter mode */
857         value |= MAC_CTRL_BC_EN;
858         if (netdev->flags & IFF_PROMISC)
859                 value |= MAC_CTRL_PROMIS_EN;
860         else if (netdev->flags & IFF_ALLMULTI)
861                 value |= MAC_CTRL_MC_ALL_EN;
862         /* value |= MAC_CTRL_LOOPBACK; */
863         iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
864 }
865
866 static u32 atl1_check_link(struct atl1_adapter *adapter)
867 {
868         struct atl1_hw *hw = &adapter->hw;
869         struct net_device *netdev = adapter->netdev;
870         u32 ret_val;
871         u16 speed, duplex, phy_data;
872         int reconfig = 0;
873
874         /* MII_BMSR must read twice */
875         atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
876         atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
877         if (!(phy_data & BMSR_LSTATUS)) {       /* link down */
878                 if (netif_carrier_ok(netdev)) { /* old link state: Up */
879                         printk(KERN_INFO "%s: link is down\n",
880                                 atl1_driver_name);
881                         adapter->link_speed = SPEED_0;
882                         netif_carrier_off(netdev);
883                         netif_stop_queue(netdev);
884                 }
885                 return ATL1_SUCCESS;
886         }
887
888         /* Link Up */
889         ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
890         if (ret_val)
891                 return ret_val;
892
893         switch (hw->media_type) {
894         case MEDIA_TYPE_1000M_FULL:
895                 if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
896                         reconfig = 1;
897                 break;
898         case MEDIA_TYPE_100M_FULL:
899                 if (speed != SPEED_100 || duplex != FULL_DUPLEX)
900                         reconfig = 1;
901                 break;
902         case MEDIA_TYPE_100M_HALF:
903                 if (speed != SPEED_100 || duplex != HALF_DUPLEX)
904                         reconfig = 1;
905                 break;
906         case MEDIA_TYPE_10M_FULL:
907                 if (speed != SPEED_10 || duplex != FULL_DUPLEX)
908                         reconfig = 1;
909                 break;
910         case MEDIA_TYPE_10M_HALF:
911                 if (speed != SPEED_10 || duplex != HALF_DUPLEX)
912                         reconfig = 1;
913                 break;
914         }
915
916         /* link result is our setting */
917         if (!reconfig) {
918                 if (adapter->link_speed != speed
919                     || adapter->link_duplex != duplex) {
920                         adapter->link_speed = speed;
921                         adapter->link_duplex = duplex;
922                         atl1_setup_mac_ctrl(adapter);
923                         printk(KERN_INFO "%s: %s link is up %d Mbps %s\n",
924                                atl1_driver_name, netdev->name,
925                                adapter->link_speed,
926                                adapter->link_duplex ==
927                                FULL_DUPLEX ? "full duplex" : "half duplex");
928                 }
929                 if (!netif_carrier_ok(netdev)) {        /* Link down -> Up */
930                         netif_carrier_on(netdev);
931                         netif_wake_queue(netdev);
932                 }
933                 return ATL1_SUCCESS;
934         }
935
936         /* change orignal link status */
937         if (netif_carrier_ok(netdev)) {
938                 adapter->link_speed = SPEED_0;
939                 netif_carrier_off(netdev);
940                 netif_stop_queue(netdev);
941         }
942
943         if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
944             hw->media_type != MEDIA_TYPE_1000M_FULL) {
945                 switch (hw->media_type) {
946                 case MEDIA_TYPE_100M_FULL:
947                         phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
948                                    MII_CR_RESET;
949                         break;
950                 case MEDIA_TYPE_100M_HALF:
951                         phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
952                         break;
953                 case MEDIA_TYPE_10M_FULL:
954                         phy_data =
955                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
956                         break;
957                 default:        /* MEDIA_TYPE_10M_HALF: */
958                         phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
959                         break;
960                 }
961                 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
962                 return ATL1_SUCCESS;
963         }
964
965         /* auto-neg, insert timer to re-config phy */
966         if (!adapter->phy_timer_pending) {
967                 adapter->phy_timer_pending = true;
968                 mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
969         }
970
971         return ATL1_SUCCESS;
972 }
973
974 static void set_flow_ctrl_old(struct atl1_adapter *adapter)
975 {
976         u32 hi, lo, value;
977
978         /* RFD Flow Control */
979         value = adapter->rfd_ring.count;
980         hi = value / 16;
981         if (hi < 2)
982                 hi = 2;
983         lo = value * 7 / 8;
984
985         value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
986             ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
987         iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
988
989         /* RRD Flow Control */
990         value = adapter->rrd_ring.count;
991         lo = value / 16;
992         hi = value * 7 / 8;
993         if (lo < 2)
994                 lo = 2;
995         value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
996             ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
997         iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
998 }
999
1000 static void set_flow_ctrl_new(struct atl1_hw *hw)
1001 {
1002         u32 hi, lo, value;
1003
1004         /* RXF Flow Control */
1005         value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
1006         lo = value / 16;
1007         if (lo < 192)
1008                 lo = 192;
1009         hi = value * 7 / 8;
1010         if (hi < lo)
1011                 hi = lo + 16;
1012         value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
1013             ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
1014         iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
1015
1016         /* RRD Flow Control */
1017         value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
1018         lo = value / 8;
1019         hi = value * 7 / 8;
1020         if (lo < 2)
1021                 lo = 2;
1022         if (hi < lo)
1023                 hi = lo + 3;
1024         value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
1025             ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
1026         iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
1027 }
1028
1029 /*
1030  * atl1_configure - Configure Transmit&Receive Unit after Reset
1031  * @adapter: board private structure
1032  *
1033  * Configure the Tx /Rx unit of the MAC after a reset.
1034  */
1035 static u32 atl1_configure(struct atl1_adapter *adapter)
1036 {
1037         struct atl1_hw *hw = &adapter->hw;
1038         u32 value;
1039
1040         /* clear interrupt status */
1041         iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
1042
1043         /* set MAC Address */
1044         value = (((u32) hw->mac_addr[2]) << 24) |
1045                 (((u32) hw->mac_addr[3]) << 16) |
1046                 (((u32) hw->mac_addr[4]) << 8) |
1047                 (((u32) hw->mac_addr[5]));
1048         iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
1049         value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
1050         iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
1051
1052         /* tx / rx ring */
1053
1054         /* HI base address */
1055         iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
1056                 hw->hw_addr + REG_DESC_BASE_ADDR_HI);
1057         /* LO base address */
1058         iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
1059                 hw->hw_addr + REG_DESC_RFD_ADDR_LO);
1060         iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
1061                 hw->hw_addr + REG_DESC_RRD_ADDR_LO);
1062         iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
1063                 hw->hw_addr + REG_DESC_TPD_ADDR_LO);
1064         iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
1065                 hw->hw_addr + REG_DESC_CMB_ADDR_LO);
1066         iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
1067                 hw->hw_addr + REG_DESC_SMB_ADDR_LO);
1068
1069         /* element count */
1070         value = adapter->rrd_ring.count;
1071         value <<= 16;
1072         value += adapter->rfd_ring.count;
1073         iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
1074         iowrite32(adapter->tpd_ring.count, hw->hw_addr + REG_DESC_TPD_RING_SIZE);
1075
1076         /* Load Ptr */
1077         iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
1078
1079         /* config Mailbox */
1080         value = ((atomic_read(&adapter->tpd_ring.next_to_use)
1081                   & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
1082             ((atomic_read(&adapter->rrd_ring.next_to_clean)
1083               & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
1084             ((atomic_read(&adapter->rfd_ring.next_to_use)
1085               & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
1086         iowrite32(value, hw->hw_addr + REG_MAILBOX);
1087
1088         /* config IPG/IFG */
1089         value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
1090                  << MAC_IPG_IFG_IPGT_SHIFT) |
1091             (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
1092              << MAC_IPG_IFG_MIFG_SHIFT) |
1093             (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
1094              << MAC_IPG_IFG_IPGR1_SHIFT) |
1095             (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
1096              << MAC_IPG_IFG_IPGR2_SHIFT);
1097         iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
1098
1099         /* config  Half-Duplex Control */
1100         value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
1101             (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
1102              << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
1103             MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
1104             (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
1105             (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
1106              << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
1107         iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
1108
1109         /* set Interrupt Moderator Timer */
1110         iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
1111         iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
1112
1113         /* set Interrupt Clear Timer */
1114         iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
1115
1116         /* set MTU, 4 : VLAN */
1117         iowrite32(hw->max_frame_size + 4, hw->hw_addr + REG_MTU);
1118
1119         /* jumbo size & rrd retirement timer */
1120         value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
1121                  << RXQ_JMBOSZ_TH_SHIFT) |
1122             (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
1123              << RXQ_JMBO_LKAH_SHIFT) |
1124             (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
1125              << RXQ_RRD_TIMER_SHIFT);
1126         iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
1127
1128         /* Flow Control */
1129         switch (hw->dev_rev) {
1130         case 0x8001:
1131         case 0x9001:
1132         case 0x9002:
1133         case 0x9003:
1134                 set_flow_ctrl_old(adapter);
1135                 break;
1136         default:
1137                 set_flow_ctrl_new(hw);
1138                 break;
1139         }
1140
1141         /* config TXQ */
1142         value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
1143                  << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
1144             (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
1145              << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
1146             (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
1147              << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN;
1148         iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
1149
1150         /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
1151         value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
1152                  << TX_JUMBO_TASK_TH_SHIFT) |
1153             (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
1154              << TX_TPD_MIN_IPG_SHIFT);
1155         iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
1156
1157         /* config RXQ */
1158         value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
1159                  << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
1160             (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
1161              << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
1162             (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
1163              << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) |
1164             RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
1165         iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
1166
1167         /* config DMA Engine */
1168         value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1169                  << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
1170             ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1171              << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
1172             DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN;
1173         value |= (u32) hw->dma_ord;
1174         if (atl1_rcb_128 == hw->rcb_value)
1175                 value |= DMA_CTRL_RCB_VALUE;
1176         iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
1177
1178         /* config CMB / SMB */
1179         value = hw->cmb_rrd | ((u32) hw->cmb_tpd << 16);
1180         iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
1181         value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
1182         iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
1183         iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
1184
1185         /* --- enable CMB / SMB */
1186         value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
1187         iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
1188
1189         value = ioread32(adapter->hw.hw_addr + REG_ISR);
1190         if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
1191                 value = 1;      /* config failed */
1192         else
1193                 value = 0;
1194
1195         /* clear all interrupt status */
1196         iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
1197         iowrite32(0, adapter->hw.hw_addr + REG_ISR);
1198         return value;
1199 }
1200
1201 /*
1202  * atl1_irq_disable - Mask off interrupt generation on the NIC
1203  * @adapter: board private structure
1204  */
1205 static void atl1_irq_disable(struct atl1_adapter *adapter)
1206 {
1207         atomic_inc(&adapter->irq_sem);
1208         iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1209         ioread32(adapter->hw.hw_addr + REG_IMR);
1210         synchronize_irq(adapter->pdev->irq);
1211 }
1212
1213 static void atl1_vlan_rx_register(struct net_device *netdev,
1214                                 struct vlan_group *grp)
1215 {
1216         struct atl1_adapter *adapter = netdev_priv(netdev);
1217         unsigned long flags;
1218         u32 ctrl;
1219
1220         spin_lock_irqsave(&adapter->lock, flags);
1221         /* atl1_irq_disable(adapter); */
1222         adapter->vlgrp = grp;
1223
1224         if (grp) {
1225                 /* enable VLAN tag insert/strip */
1226                 ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
1227                 ctrl |= MAC_CTRL_RMV_VLAN;
1228                 iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
1229         } else {
1230                 /* disable VLAN tag insert/strip */
1231                 ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
1232                 ctrl &= ~MAC_CTRL_RMV_VLAN;
1233                 iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
1234         }
1235
1236         /* atl1_irq_enable(adapter); */
1237         spin_unlock_irqrestore(&adapter->lock, flags);
1238 }
1239
1240 /* FIXME: justify or remove -- CHS */
1241 static void atl1_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1242 {
1243         /* We don't do Vlan filtering */
1244         return;
1245 }
1246
1247 /* FIXME: this looks wrong too -- CHS */
1248 static void atl1_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1249 {
1250         struct atl1_adapter *adapter = netdev_priv(netdev);
1251         unsigned long flags;
1252
1253         spin_lock_irqsave(&adapter->lock, flags);
1254         /* atl1_irq_disable(adapter); */
1255         if (adapter->vlgrp)
1256                 adapter->vlgrp->vlan_devices[vid] = NULL;
1257         /* atl1_irq_enable(adapter); */
1258         spin_unlock_irqrestore(&adapter->lock, flags);
1259         /* We don't do Vlan filtering */
1260         return;
1261 }
1262
1263 static void atl1_restore_vlan(struct atl1_adapter *adapter)
1264 {
1265         atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1266         if (adapter->vlgrp) {
1267                 u16 vid;
1268                 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1269                         if (!adapter->vlgrp->vlan_devices[vid])
1270                                 continue;
1271                         atl1_vlan_rx_add_vid(adapter->netdev, vid);
1272                 }
1273         }
1274 }
1275
1276 static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring)
1277 {
1278         u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1279         u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
1280         return ((next_to_clean >
1281                  next_to_use) ? next_to_clean - next_to_use -
1282                 1 : tpd_ring->count + next_to_clean - next_to_use - 1);
1283 }
1284
1285 static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
1286                          struct tso_param *tso)
1287 {
1288         /* We enter this function holding a spinlock. */
1289         u8 ipofst;
1290         int err;
1291
1292         if (skb_shinfo(skb)->gso_size) {
1293                 if (skb_header_cloned(skb)) {
1294                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1295                         if (unlikely(err))
1296                                 return err;
1297                 }
1298
1299                 if (skb->protocol == ntohs(ETH_P_IP)) {
1300                         skb->nh.iph->tot_len = 0;
1301                         skb->nh.iph->check = 0;
1302                         skb->h.th->check =
1303                             ~csum_tcpudp_magic(skb->nh.iph->saddr,
1304                                                skb->nh.iph->daddr, 0,
1305                                                IPPROTO_TCP, 0);
1306                         ipofst = skb->nh.raw - skb->data;
1307                         if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */
1308                                 tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
1309
1310                         tso->tsopl |= (skb->nh.iph->ihl &
1311                                 CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT;
1312                         tso->tsopl |= ((skb->h.th->doff << 2) &
1313                                 TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT;
1314                         tso->tsopl |= (skb_shinfo(skb)->gso_size &
1315                                 TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT;
1316                         tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT;
1317                         tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT;
1318                         tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT;
1319                         return true;
1320                 }
1321         }
1322         return false;
1323 }
1324
1325 static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1326                         struct csum_param *csum)
1327 {
1328         u8 css, cso;
1329
1330         if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1331                 cso = skb->h.raw - skb->data;
1332                 css = (skb->h.raw + skb->csum) - skb->data;
1333                 if (unlikely(cso & 0x1)) {
1334                         printk(KERN_DEBUG "%s: payload offset != even number\n",
1335                                 atl1_driver_name);
1336                         return -1;
1337                 }
1338                 csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) <<
1339                         CSUM_PARAM_PLOADOFFSET_SHIFT;
1340                 csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) <<
1341                         CSUM_PARAM_XSUMOFFSET_SHIFT;
1342                 csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT;
1343                 return true;
1344         }
1345
1346         return true;
1347 }
1348
1349 static void atl1_tx_map(struct atl1_adapter *adapter,
1350                                 struct sk_buff *skb, bool tcp_seg)
1351 {
1352         /* We enter this function holding a spinlock. */
1353         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1354         struct atl1_buffer *buffer_info;
1355         struct page *page;
1356         int first_buf_len = skb->len;
1357         unsigned long offset;
1358         unsigned int nr_frags;
1359         unsigned int f;
1360         u16 tpd_next_to_use;
1361         u16 proto_hdr_len;
1362         u16 i, m, len12;
1363
1364         first_buf_len -= skb->data_len;
1365         nr_frags = skb_shinfo(skb)->nr_frags;
1366         tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
1367         buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1368         if (unlikely(buffer_info->skb))
1369                 BUG();
1370         buffer_info->skb = NULL;        /* put skb in last TPD */
1371
1372         if (tcp_seg) {
1373                 /* TSO/GSO */
1374                 proto_hdr_len =
1375                     ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1376                 buffer_info->length = proto_hdr_len;
1377                 page = virt_to_page(skb->data);
1378                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1379                 buffer_info->dma = pci_map_page(adapter->pdev, page,
1380                                                 offset, proto_hdr_len,
1381                                                 PCI_DMA_TODEVICE);
1382
1383                 if (++tpd_next_to_use == tpd_ring->count)
1384                         tpd_next_to_use = 0;
1385
1386                 if (first_buf_len > proto_hdr_len) {
1387                         len12 = first_buf_len - proto_hdr_len;
1388                         m = (len12 + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
1389                         for (i = 0; i < m; i++) {
1390                                 buffer_info =
1391                                     &tpd_ring->buffer_info[tpd_next_to_use];
1392                                 buffer_info->skb = NULL;
1393                                 buffer_info->length =
1394                                     (MAX_TX_BUF_LEN >=
1395                                      len12) ? MAX_TX_BUF_LEN : len12;
1396                                 len12 -= buffer_info->length;
1397                                 page = virt_to_page(skb->data +
1398                                                  (proto_hdr_len +
1399                                                   i * MAX_TX_BUF_LEN));
1400                                 offset = (unsigned long)(skb->data +
1401                                                         (proto_hdr_len +
1402                                                         i * MAX_TX_BUF_LEN)) &
1403                                                         ~PAGE_MASK;
1404                                 buffer_info->dma =
1405                                     pci_map_page(adapter->pdev, page, offset,
1406                                                  buffer_info->length,
1407                                                  PCI_DMA_TODEVICE);
1408                                 if (++tpd_next_to_use == tpd_ring->count)
1409                                         tpd_next_to_use = 0;
1410                         }
1411                 }
1412         } else {
1413                 /* not TSO/GSO */
1414                 buffer_info->length = first_buf_len;
1415                 page = virt_to_page(skb->data);
1416                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1417                 buffer_info->dma = pci_map_page(adapter->pdev, page,
1418                                                 offset, first_buf_len,
1419                                                 PCI_DMA_TODEVICE);
1420                 if (++tpd_next_to_use == tpd_ring->count)
1421                         tpd_next_to_use = 0;
1422         }
1423
1424         for (f = 0; f < nr_frags; f++) {
1425                 struct skb_frag_struct *frag;
1426                 u16 lenf, i, m;
1427
1428                 frag = &skb_shinfo(skb)->frags[f];
1429                 lenf = frag->size;
1430
1431                 m = (lenf + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
1432                 for (i = 0; i < m; i++) {
1433                         buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1434                         if (unlikely(buffer_info->skb))
1435                                 BUG();
1436                         buffer_info->skb = NULL;
1437                         buffer_info->length =
1438                             (lenf > MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : lenf;
1439                         lenf -= buffer_info->length;
1440                         buffer_info->dma =
1441                             pci_map_page(adapter->pdev, frag->page,
1442                                          frag->page_offset + i * MAX_TX_BUF_LEN,
1443                                          buffer_info->length, PCI_DMA_TODEVICE);
1444
1445                         if (++tpd_next_to_use == tpd_ring->count)
1446                                 tpd_next_to_use = 0;
1447                 }
1448         }
1449
1450         /* last tpd's buffer-info */
1451         buffer_info->skb = skb;
1452 }
1453
1454 static void atl1_tx_queue(struct atl1_adapter *adapter, int count,
1455                                union tpd_descr *descr)
1456 {
1457         /* We enter this function holding a spinlock. */
1458         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1459         int j;
1460         u32 val;
1461         struct atl1_buffer *buffer_info;
1462         struct tx_packet_desc *tpd;
1463         u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
1464
1465         for (j = 0; j < count; j++) {
1466                 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1467                 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use);
1468                 tpd->desc.csum.csumpu = descr->csum.csumpu;
1469                 tpd->desc.csum.csumpl = descr->csum.csumpl;
1470                 tpd->desc.tso.tsopu = descr->tso.tsopu;
1471                 tpd->desc.tso.tsopl = descr->tso.tsopl;
1472                 tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1473                 tpd->desc.data = descr->data;
1474                 tpd->desc.csum.csumpu |= (cpu_to_le16(buffer_info->length) &
1475                         CSUM_PARAM_BUFLEN_MASK) << CSUM_PARAM_BUFLEN_SHIFT;
1476
1477                 val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) &
1478                         TSO_PARAM_SEGMENT_MASK;
1479                 if (val && !j)
1480                         tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT;
1481
1482                 if (j == (count - 1))
1483                         tpd->desc.csum.csumpl |= 1 << CSUM_PARAM_EOP_SHIFT;
1484
1485                 if (++tpd_next_to_use == tpd_ring->count)
1486                         tpd_next_to_use = 0;
1487         }
1488         /*
1489          * Force memory writes to complete before letting h/w
1490          * know there are new descriptors to fetch.  (Only
1491          * applicable for weak-ordered memory model archs,
1492          * such as IA-64).
1493          */
1494         wmb();
1495
1496         atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use);
1497 }
1498
1499 static void atl1_update_mailbox(struct atl1_adapter *adapter)
1500 {
1501         unsigned long flags;
1502         u32 tpd_next_to_use;
1503         u32 rfd_next_to_use;
1504         u32 rrd_next_to_clean;
1505         u32 value;
1506
1507         spin_lock_irqsave(&adapter->mb_lock, flags);
1508
1509         tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
1510         rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
1511         rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
1512
1513         value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
1514                 MB_RFD_PROD_INDX_SHIFT) |
1515                 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
1516                 MB_RRD_CONS_INDX_SHIFT) |
1517                 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
1518                 MB_TPD_PROD_INDX_SHIFT);
1519         iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
1520
1521         spin_unlock_irqrestore(&adapter->mb_lock, flags);
1522 }
1523
1524 static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1525 {
1526         struct atl1_adapter *adapter = netdev_priv(netdev);
1527         int len = skb->len;
1528         int tso;
1529         int count = 1;
1530         int ret_val;
1531         u32 val;
1532         union tpd_descr param;
1533         u16 frag_size;
1534         u16 vlan_tag;
1535         unsigned long flags;
1536         unsigned int nr_frags = 0;
1537         unsigned int mss = 0;
1538         unsigned int f;
1539         unsigned int proto_hdr_len;
1540
1541         len -= skb->data_len;
1542
1543         if (unlikely(skb->len == 0)) {
1544                 dev_kfree_skb_any(skb);
1545                 return NETDEV_TX_OK;
1546         }
1547
1548         param.data = 0;
1549         param.tso.tsopu = 0;
1550         param.tso.tsopl = 0;
1551         param.csum.csumpu = 0;
1552         param.csum.csumpl = 0;
1553
1554         /* nr_frags will be nonzero if we're doing scatter/gather (SG) */
1555         nr_frags = skb_shinfo(skb)->nr_frags;
1556         for (f = 0; f < nr_frags; f++) {
1557                 frag_size = skb_shinfo(skb)->frags[f].size;
1558                 if (frag_size)
1559                         count +=
1560                             (frag_size + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
1561         }
1562
1563         /* mss will be nonzero if we're doing segment offload (TSO/GSO) */
1564         mss = skb_shinfo(skb)->gso_size;
1565         if (mss) {
1566                 if (skb->protocol == ntohs(ETH_P_IP)) {
1567                         proto_hdr_len = ((skb->h.raw - skb->data) +
1568                                          (skb->h.th->doff << 2));
1569                         if (unlikely(proto_hdr_len > len)) {
1570                                 dev_kfree_skb_any(skb);
1571                                 return NETDEV_TX_OK;
1572                         }
1573                         /* need additional TPD ? */
1574                         if (proto_hdr_len != len)
1575                                 count += (len - proto_hdr_len +
1576                                         MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
1577                 }
1578         }
1579
1580         local_irq_save(flags);
1581         if (!spin_trylock(&adapter->lock)) {
1582                 /* Can't get lock - tell upper layer to requeue */
1583                 local_irq_restore(flags);
1584                 printk(KERN_DEBUG "%s: TX locked\n", atl1_driver_name);
1585                 return NETDEV_TX_LOCKED;
1586         }
1587
1588         if (tpd_avail(&adapter->tpd_ring) < count) {
1589                 /* not enough descriptors */
1590                 netif_stop_queue(netdev);
1591                 spin_unlock_irqrestore(&adapter->lock, flags);
1592                 printk(KERN_DEBUG "%s: TX busy\n", atl1_driver_name);
1593                 return NETDEV_TX_BUSY;
1594         }
1595
1596         param.data = 0;
1597
1598         if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1599                 vlan_tag = vlan_tx_tag_get(skb);
1600                 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
1601                         ((vlan_tag >> 9) & 0x8);
1602                 param.csum.csumpl |= 1 << CSUM_PARAM_INSVLAG_SHIFT;
1603                 param.csum.csumpu |= (vlan_tag & CSUM_PARAM_VALANTAG_MASK) <<
1604                         CSUM_PARAM_VALAN_SHIFT;
1605         }
1606
1607         tso = atl1_tso(adapter, skb, &param.tso);
1608         if (tso < 0) {
1609                 spin_unlock_irqrestore(&adapter->lock, flags);
1610                 dev_kfree_skb_any(skb);
1611                 return NETDEV_TX_OK;
1612         }
1613
1614         if (!tso) {
1615                 ret_val = atl1_tx_csum(adapter, skb, &param.csum);
1616                 if (ret_val < 0) {
1617                         spin_unlock_irqrestore(&adapter->lock, flags);
1618                         dev_kfree_skb_any(skb);
1619                         return NETDEV_TX_OK;
1620                 }
1621         }
1622
1623         val = (param.csum.csumpl >> CSUM_PARAM_SEGMENT_SHIFT) &
1624                 CSUM_PARAM_SEGMENT_MASK;
1625         atl1_tx_map(adapter, skb, 1 == val);
1626         atl1_tx_queue(adapter, count, &param);
1627         netdev->trans_start = jiffies;
1628         spin_unlock_irqrestore(&adapter->lock, flags);
1629         atl1_update_mailbox(adapter);
1630         return NETDEV_TX_OK;
1631 }
1632
1633 /*
1634  * atl1_get_stats - Get System Network Statistics
1635  * @netdev: network interface device structure
1636  *
1637  * Returns the address of the device statistics structure.
1638  * The statistics are actually updated from the timer callback.
1639  */
1640 static struct net_device_stats *atl1_get_stats(struct net_device *netdev)
1641 {
1642         struct atl1_adapter *adapter = netdev_priv(netdev);
1643         return &adapter->net_stats;
1644 }
1645
1646 /*
1647  * atl1_clean_rx_ring - Free RFD Buffers
1648  * @adapter: board private structure
1649  */
1650 static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
1651 {
1652         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1653         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1654         struct atl1_buffer *buffer_info;
1655         struct pci_dev *pdev = adapter->pdev;
1656         unsigned long size;
1657         unsigned int i;
1658
1659         /* Free all the Rx ring sk_buffs */
1660         for (i = 0; i < rfd_ring->count; i++) {
1661                 buffer_info = &rfd_ring->buffer_info[i];
1662                 if (buffer_info->dma) {
1663                         pci_unmap_page(pdev,
1664                                         buffer_info->dma,
1665                                         buffer_info->length,
1666                                         PCI_DMA_FROMDEVICE);
1667                         buffer_info->dma = 0;
1668                 }
1669                 if (buffer_info->skb) {
1670                         dev_kfree_skb(buffer_info->skb);
1671                         buffer_info->skb = NULL;
1672                 }
1673         }
1674
1675         size = sizeof(struct atl1_buffer) * rfd_ring->count;
1676         memset(rfd_ring->buffer_info, 0, size);
1677
1678         /* Zero out the descriptor ring */
1679         memset(rfd_ring->desc, 0, rfd_ring->size);
1680
1681         rfd_ring->next_to_clean = 0;
1682         atomic_set(&rfd_ring->next_to_use, 0);
1683
1684         rrd_ring->next_to_use = 0;
1685         atomic_set(&rrd_ring->next_to_clean, 0);
1686 }
1687
1688 /*
1689  * atl1_clean_tx_ring - Free Tx Buffers
1690  * @adapter: board private structure
1691  */
1692 static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
1693 {
1694         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1695         struct atl1_buffer *buffer_info;
1696         struct pci_dev *pdev = adapter->pdev;
1697         unsigned long size;
1698         unsigned int i;
1699
1700         /* Free all the Tx ring sk_buffs */
1701         for (i = 0; i < tpd_ring->count; i++) {
1702                 buffer_info = &tpd_ring->buffer_info[i];
1703                 if (buffer_info->dma) {
1704                         pci_unmap_page(pdev, buffer_info->dma,
1705                                        buffer_info->length, PCI_DMA_TODEVICE);
1706                         buffer_info->dma = 0;
1707                 }
1708         }
1709
1710         for (i = 0; i < tpd_ring->count; i++) {
1711                 buffer_info = &tpd_ring->buffer_info[i];
1712                 if (buffer_info->skb) {
1713                         dev_kfree_skb_any(buffer_info->skb);
1714                         buffer_info->skb = NULL;
1715                 }
1716         }
1717
1718         size = sizeof(struct atl1_buffer) * tpd_ring->count;
1719         memset(tpd_ring->buffer_info, 0, size);
1720
1721         /* Zero out the descriptor ring */
1722         memset(tpd_ring->desc, 0, tpd_ring->size);
1723
1724         atomic_set(&tpd_ring->next_to_use, 0);
1725         atomic_set(&tpd_ring->next_to_clean, 0);
1726 }
1727
1728 /*
1729  * atl1_free_ring_resources - Free Tx / RX descriptor Resources
1730  * @adapter: board private structure
1731  *
1732  * Free all transmit software resources
1733  */
1734 void atl1_free_ring_resources(struct atl1_adapter *adapter)
1735 {
1736         struct pci_dev *pdev = adapter->pdev;
1737         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1738         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1739         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1740         struct atl1_ring_header *ring_header = &adapter->ring_header;
1741
1742         atl1_clean_tx_ring(adapter);
1743         atl1_clean_rx_ring(adapter);
1744
1745         kfree(tpd_ring->buffer_info);
1746         pci_free_consistent(pdev, ring_header->size, ring_header->desc,
1747                             ring_header->dma);
1748
1749         tpd_ring->buffer_info = NULL;
1750         tpd_ring->desc = NULL;
1751         tpd_ring->dma = 0;
1752
1753         rfd_ring->buffer_info = NULL;
1754         rfd_ring->desc = NULL;
1755         rfd_ring->dma = 0;
1756
1757         rrd_ring->desc = NULL;
1758         rrd_ring->dma = 0;
1759 }
1760
1761 s32 atl1_up(struct atl1_adapter *adapter)
1762 {
1763         struct net_device *netdev = adapter->netdev;
1764         int err;
1765         int irq_flags = IRQF_SAMPLE_RANDOM;
1766
1767         /* hardware has been reset, we need to reload some things */
1768         atl1_set_multi(netdev);
1769         atl1_restore_vlan(adapter);
1770         err = atl1_alloc_rx_buffers(adapter);
1771         if (unlikely(!err))             /* no RX BUFFER allocated */
1772                 return -ENOMEM;
1773
1774         if (unlikely(atl1_configure(adapter))) {
1775                 err = -EIO;
1776                 goto err_up;
1777         }
1778
1779         err = pci_enable_msi(adapter->pdev);
1780         if (err) {
1781                 dev_info(&adapter->pdev->dev,
1782                         "Unable to enable MSI: %d\n", err);
1783                 irq_flags |= IRQF_SHARED;
1784         }
1785
1786         err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags,
1787                         netdev->name, netdev);
1788         if (unlikely(err))
1789                 goto err_up;
1790
1791         mod_timer(&adapter->watchdog_timer, jiffies);
1792         atl1_irq_enable(adapter);
1793         atl1_check_link(adapter);
1794         return 0;
1795
1796         /* FIXME: unreachable code! -- CHS */
1797         /* free irq disable any interrupt */
1798         iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1799         free_irq(adapter->pdev->irq, netdev);
1800
1801 err_up:
1802         pci_disable_msi(adapter->pdev);
1803         /* free rx_buffers */
1804         atl1_clean_rx_ring(adapter);
1805         return err;
1806 }
1807
1808 void atl1_down(struct atl1_adapter *adapter)
1809 {
1810         struct net_device *netdev = adapter->netdev;
1811
1812         del_timer_sync(&adapter->watchdog_timer);
1813         del_timer_sync(&adapter->phy_config_timer);
1814         adapter->phy_timer_pending = false;
1815
1816         atl1_irq_disable(adapter);
1817         free_irq(adapter->pdev->irq, netdev);
1818         pci_disable_msi(adapter->pdev);
1819         atl1_reset_hw(&adapter->hw);
1820         adapter->cmb.cmb->int_stats = 0;
1821
1822         adapter->link_speed = SPEED_0;
1823         adapter->link_duplex = -1;
1824         netif_carrier_off(netdev);
1825         netif_stop_queue(netdev);
1826
1827         atl1_clean_tx_ring(adapter);
1828         atl1_clean_rx_ring(adapter);
1829 }
1830
1831 /*
1832  * atl1_change_mtu - Change the Maximum Transfer Unit
1833  * @netdev: network interface device structure
1834  * @new_mtu: new value for maximum frame size
1835  *
1836  * Returns 0 on success, negative on failure
1837  */
1838 static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
1839 {
1840         struct atl1_adapter *adapter = netdev_priv(netdev);
1841         int old_mtu = netdev->mtu;
1842         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1843
1844         if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
1845             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
1846                 printk(KERN_WARNING "%s: invalid MTU setting\n",
1847                         atl1_driver_name);
1848                 return -EINVAL;
1849         }
1850
1851         adapter->hw.max_frame_size = max_frame;
1852         adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
1853         adapter->rx_buffer_len = (max_frame + 7) & ~7;
1854         adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
1855
1856         netdev->mtu = new_mtu;
1857         if ((old_mtu != new_mtu) && netif_running(netdev)) {
1858                 atl1_down(adapter);
1859                 atl1_up(adapter);
1860         }
1861
1862         return 0;
1863 }
1864
1865 /*
1866  * atl1_set_mac - Change the Ethernet Address of the NIC
1867  * @netdev: network interface device structure
1868  * @p: pointer to an address structure
1869  *
1870  * Returns 0 on success, negative on failure
1871  */
1872 static int atl1_set_mac(struct net_device *netdev, void *p)
1873 {
1874         struct atl1_adapter *adapter = netdev_priv(netdev);
1875         struct sockaddr *addr = p;
1876
1877         if (netif_running(netdev))
1878                 return -EBUSY;
1879
1880         if (!is_valid_ether_addr(addr->sa_data))
1881                 return -EADDRNOTAVAIL;
1882
1883         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1884         memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
1885
1886         atl1_set_mac_addr(&adapter->hw);
1887         return 0;
1888 }
1889
1890 /*
1891  * atl1_watchdog - Timer Call-back
1892  * @data: pointer to netdev cast into an unsigned long
1893  */
1894 static void atl1_watchdog(unsigned long data)
1895 {
1896         struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1897
1898         /* Reset the timer */
1899         mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1900 }
1901
1902 static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
1903 {
1904         struct atl1_adapter *adapter = netdev_priv(netdev);
1905         u16 result;
1906
1907         atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
1908
1909         return result;
1910 }
1911
1912 static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val)
1913 {
1914         struct atl1_adapter *adapter = netdev_priv(netdev);
1915
1916         atl1_write_phy_reg(&adapter->hw, reg_num, val);
1917 }
1918
1919 /*
1920  * atl1_mii_ioctl -
1921  * @netdev:
1922  * @ifreq:
1923  * @cmd:
1924  */
1925 static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1926 {
1927         struct atl1_adapter *adapter = netdev_priv(netdev);
1928         unsigned long flags;
1929         int retval;
1930
1931         if (!netif_running(netdev))
1932                 return -EINVAL;
1933
1934         spin_lock_irqsave(&adapter->lock, flags);
1935         retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
1936         spin_unlock_irqrestore(&adapter->lock, flags);
1937
1938         return retval;
1939 }
1940
1941 /*
1942  * atl1_ioctl -
1943  * @netdev:
1944  * @ifreq:
1945  * @cmd:
1946  */
1947 static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1948 {
1949         switch (cmd) {
1950         case SIOCGMIIPHY:
1951         case SIOCGMIIREG:
1952         case SIOCSMIIREG:
1953                 return atl1_mii_ioctl(netdev, ifr, cmd);
1954         default:
1955                 return -EOPNOTSUPP;
1956         }
1957 }
1958
1959 /*
1960  * atl1_tx_timeout - Respond to a Tx Hang
1961  * @netdev: network interface device structure
1962  */
1963 static void atl1_tx_timeout(struct net_device *netdev)
1964 {
1965         struct atl1_adapter *adapter = netdev_priv(netdev);
1966         /* Do the reset outside of interrupt context */
1967         schedule_work(&adapter->tx_timeout_task);
1968 }
1969
1970 /*
1971  * atl1_phy_config - Timer Call-back
1972  * @data: pointer to netdev cast into an unsigned long
1973  */
1974 static void atl1_phy_config(unsigned long data)
1975 {
1976         struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1977         struct atl1_hw *hw = &adapter->hw;
1978         unsigned long flags;
1979
1980         spin_lock_irqsave(&adapter->lock, flags);
1981         adapter->phy_timer_pending = false;
1982         atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1983         atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg);
1984         atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
1985         spin_unlock_irqrestore(&adapter->lock, flags);
1986 }
1987
1988 int atl1_reset(struct atl1_adapter *adapter)
1989 {
1990         int ret;
1991
1992         ret = atl1_reset_hw(&adapter->hw);
1993         if (ret != ATL1_SUCCESS)
1994                 return ret;
1995         return atl1_init_hw(&adapter->hw);
1996 }
1997
1998 /*
1999  * atl1_open - Called when a network interface is made active
2000  * @netdev: network interface device structure
2001  *
2002  * Returns 0 on success, negative value on failure
2003  *
2004  * The open entry point is called when a network interface is made
2005  * active by the system (IFF_UP).  At this point all resources needed
2006  * for transmit and receive operations are allocated, the interrupt
2007  * handler is registered with the OS, the watchdog timer is started,
2008  * and the stack is notified that the interface is ready.
2009  */
2010 static int atl1_open(struct net_device *netdev)
2011 {
2012         struct atl1_adapter *adapter = netdev_priv(netdev);
2013         int err;
2014
2015         /* allocate transmit descriptors */
2016         err = atl1_setup_ring_resources(adapter);
2017         if (err)
2018                 return err;
2019
2020         err = atl1_up(adapter);
2021         if (err)
2022                 goto err_up;
2023
2024         return 0;
2025
2026 err_up:
2027         atl1_reset(adapter);
2028         return err;
2029 }
2030
2031 /*
2032  * atl1_close - Disables a network interface
2033  * @netdev: network interface device structure
2034  *
2035  * Returns 0, this is not allowed to fail
2036  *
2037  * The close entry point is called when an interface is de-activated
2038  * by the OS.  The hardware is still under the drivers control, but
2039  * needs to be disabled.  A global MAC reset is issued to stop the
2040  * hardware, and all transmit and receive resources are freed.
2041  */
2042 static int atl1_close(struct net_device *netdev)
2043 {
2044         struct atl1_adapter *adapter = netdev_priv(netdev);
2045         atl1_down(adapter);
2046         atl1_free_ring_resources(adapter);
2047         return 0;
2048 }
2049
2050 /*
2051  * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
2052  * will assert. We do soft reset <0x1400=1> according
2053  * with the SPEC. BUT, it seemes that PCIE or DMA
2054  * state-machine will not be reset. DMAR_TO_INT will
2055  * assert again and again.
2056  */
2057 static void atl1_tx_timeout_task(struct work_struct *work)
2058 {
2059         struct atl1_adapter *adapter =
2060                 container_of(work, struct atl1_adapter, tx_timeout_task);
2061         struct net_device *netdev = adapter->netdev;
2062
2063         netif_device_detach(netdev);
2064         atl1_down(adapter);
2065         atl1_up(adapter);
2066         netif_device_attach(netdev);
2067 }
2068
2069 /*
2070  * atl1_link_chg_task - deal with link change event Out of interrupt context
2071  */
2072 static void atl1_link_chg_task(struct work_struct *work)
2073 {
2074         struct atl1_adapter *adapter =
2075                container_of(work, struct atl1_adapter, link_chg_task);
2076         unsigned long flags;
2077
2078         spin_lock_irqsave(&adapter->lock, flags);
2079         atl1_check_link(adapter);
2080         spin_unlock_irqrestore(&adapter->lock, flags);
2081 }
2082
2083 /*
2084  * atl1_pcie_patch - Patch for PCIE module
2085  */
2086 static void atl1_pcie_patch(struct atl1_adapter *adapter)
2087 {
2088         u32 value;
2089         value = 0x6500;
2090         iowrite32(value, adapter->hw.hw_addr + 0x12FC);
2091         /* pcie flow control mode change */
2092         value = ioread32(adapter->hw.hw_addr + 0x1008);
2093         value |= 0x8000;
2094         iowrite32(value, adapter->hw.hw_addr + 0x1008);
2095 }
2096
2097 /*
2098  * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
2099  * on PCI Command register is disable.
2100  * The function enable this bit.
2101  * Brackett, 2006/03/15
2102  */
2103 static void atl1_via_workaround(struct atl1_adapter *adapter)
2104 {
2105         unsigned long value;
2106
2107         value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
2108         if (value & PCI_COMMAND_INTX_DISABLE)
2109                 value &= ~PCI_COMMAND_INTX_DISABLE;
2110         iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
2111 }
2112
2113 /*
2114  * atl1_probe - Device Initialization Routine
2115  * @pdev: PCI device information struct
2116  * @ent: entry in atl1_pci_tbl
2117  *
2118  * Returns 0 on success, negative on failure
2119  *
2120  * atl1_probe initializes an adapter identified by a pci_dev structure.
2121  * The OS initialization, configuring of the adapter private structure,
2122  * and a hardware reset occur.
2123  */
2124 static int __devinit atl1_probe(struct pci_dev *pdev,
2125                               const struct pci_device_id *ent)
2126 {
2127         struct net_device *netdev;
2128         struct atl1_adapter *adapter;
2129         static int cards_found = 0;
2130         bool pci_using_64 = true;
2131         int err;
2132
2133         err = pci_enable_device(pdev);
2134         if (err)
2135                 return err;
2136
2137         err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
2138         if (err) {
2139                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2140                 if (err) {
2141                         printk(KERN_DEBUG
2142                                 "%s: no usable DMA configuration, aborting\n",
2143                                 atl1_driver_name);
2144                         goto err_dma;
2145                 }
2146                 pci_using_64 = false;
2147         }
2148         /* Mark all PCI regions associated with PCI device
2149          * pdev as being reserved by owner atl1_driver_name
2150          */
2151         err = pci_request_regions(pdev, atl1_driver_name);
2152         if (err)
2153                 goto err_request_regions;
2154
2155         /* Enables bus-mastering on the device and calls
2156          * pcibios_set_master to do the needed arch specific settings
2157          */
2158         pci_set_master(pdev);
2159
2160         netdev = alloc_etherdev(sizeof(struct atl1_adapter));
2161         if (!netdev) {
2162                 err = -ENOMEM;
2163                 goto err_alloc_etherdev;
2164         }
2165         SET_MODULE_OWNER(netdev);
2166         SET_NETDEV_DEV(netdev, &pdev->dev);
2167
2168         pci_set_drvdata(pdev, netdev);
2169         adapter = netdev_priv(netdev);
2170         adapter->netdev = netdev;
2171         adapter->pdev = pdev;
2172         adapter->hw.back = adapter;
2173
2174         adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
2175         if (!adapter->hw.hw_addr) {
2176                 err = -EIO;
2177                 goto err_pci_iomap;
2178         }
2179         /* get device revision number */
2180         adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + (REG_MASTER_CTRL + 2));
2181
2182         /* set default ring resource counts */
2183         adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
2184         adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
2185
2186         adapter->mii.dev = netdev;
2187         adapter->mii.mdio_read = mdio_read;
2188         adapter->mii.mdio_write = mdio_write;
2189         adapter->mii.phy_id_mask = 0x1f;
2190         adapter->mii.reg_num_mask = 0x1f;
2191
2192         netdev->open = &atl1_open;
2193         netdev->stop = &atl1_close;
2194         netdev->hard_start_xmit = &atl1_xmit_frame;
2195         netdev->get_stats = &atl1_get_stats;
2196         netdev->set_multicast_list = &atl1_set_multi;
2197         netdev->set_mac_address = &atl1_set_mac;
2198         netdev->change_mtu = &atl1_change_mtu;
2199         netdev->do_ioctl = &atl1_ioctl;
2200         netdev->tx_timeout = &atl1_tx_timeout;
2201         netdev->watchdog_timeo = 5 * HZ;
2202         netdev->vlan_rx_register = atl1_vlan_rx_register;
2203         netdev->vlan_rx_add_vid = atl1_vlan_rx_add_vid;
2204         netdev->vlan_rx_kill_vid = atl1_vlan_rx_kill_vid;
2205         netdev->ethtool_ops = &atl1_ethtool_ops;
2206         adapter->bd_number = cards_found;
2207         adapter->pci_using_64 = pci_using_64;
2208
2209         /* setup the private structure */
2210         err = atl1_sw_init(adapter);
2211         if (err)
2212                 goto err_common;
2213
2214         netdev->features = NETIF_F_HW_CSUM;
2215         netdev->features |= NETIF_F_SG;
2216         netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2217
2218         /*
2219          * FIXME - Until tso performance gets fixed, disable the feature.
2220          * Enable it with ethtool -K if desired.
2221          */
2222         /* netdev->features |= NETIF_F_TSO; */
2223
2224         if (pci_using_64)
2225                 netdev->features |= NETIF_F_HIGHDMA;
2226
2227         netdev->features |= NETIF_F_LLTX;
2228
2229         /*
2230          * patch for some L1 of old version,
2231          * the final version of L1 may not need these
2232          * patches
2233          */
2234         /* atl1_pcie_patch(adapter); */
2235
2236         /* really reset GPHY core */
2237         iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE);
2238
2239         /*
2240          * reset the controller to
2241          * put the device in a known good starting state
2242          */
2243         if (atl1_reset_hw(&adapter->hw)) {
2244                 err = -EIO;
2245                 goto err_common;
2246         }
2247
2248         /* copy the MAC address out of the EEPROM */
2249         atl1_read_mac_addr(&adapter->hw);
2250         memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2251
2252         if (!is_valid_ether_addr(netdev->dev_addr)) {
2253                 err = -EIO;
2254                 goto err_common;
2255         }
2256
2257         atl1_check_options(adapter);
2258
2259         /* pre-init the MAC, and setup link */
2260         err = atl1_init_hw(&adapter->hw);
2261         if (err) {
2262                 err = -EIO;
2263                 goto err_common;
2264         }
2265
2266         atl1_pcie_patch(adapter);
2267         /* assume we have no link for now */
2268         netif_carrier_off(netdev);
2269         netif_stop_queue(netdev);
2270
2271         init_timer(&adapter->watchdog_timer);
2272         adapter->watchdog_timer.function = &atl1_watchdog;
2273         adapter->watchdog_timer.data = (unsigned long)adapter;
2274
2275         init_timer(&adapter->phy_config_timer);
2276         adapter->phy_config_timer.function = &atl1_phy_config;
2277         adapter->phy_config_timer.data = (unsigned long)adapter;
2278         adapter->phy_timer_pending = false;
2279
2280         INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
2281
2282         INIT_WORK(&adapter->link_chg_task, atl1_link_chg_task);
2283
2284         INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
2285
2286         err = register_netdev(netdev);
2287         if (err)
2288                 goto err_common;
2289
2290         cards_found++;
2291         atl1_via_workaround(adapter);
2292         return 0;
2293
2294 err_common:
2295         pci_iounmap(pdev, adapter->hw.hw_addr);
2296 err_pci_iomap:
2297         free_netdev(netdev);
2298 err_alloc_etherdev:
2299         pci_release_regions(pdev);
2300 err_dma:
2301 err_request_regions:
2302         pci_disable_device(pdev);
2303         return err;
2304 }
2305
2306 /*
2307  * atl1_remove - Device Removal Routine
2308  * @pdev: PCI device information struct
2309  *
2310  * atl1_remove is called by the PCI subsystem to alert the driver
2311  * that it should release a PCI device.  The could be caused by a
2312  * Hot-Plug event, or because the driver is going to be removed from
2313  * memory.
2314  */
2315 static void __devexit atl1_remove(struct pci_dev *pdev)
2316 {
2317         struct net_device *netdev = pci_get_drvdata(pdev);
2318         struct atl1_adapter *adapter;
2319         /* Device not available. Return. */
2320         if (!netdev)
2321                 return;
2322
2323         adapter = netdev_priv(netdev);
2324         iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE);
2325         unregister_netdev(netdev);
2326         pci_iounmap(pdev, adapter->hw.hw_addr);
2327         pci_release_regions(pdev);
2328         free_netdev(netdev);
2329         pci_disable_device(pdev);
2330 }
2331
2332 #ifdef CONFIG_PM
2333 static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2334 {
2335         struct net_device *netdev = pci_get_drvdata(pdev);
2336         struct atl1_adapter *adapter = netdev_priv(netdev);
2337         struct atl1_hw *hw = &adapter->hw;
2338         u32 ctrl = 0;
2339         u32 wufc = adapter->wol;
2340
2341         netif_device_detach(netdev);
2342         if (netif_running(netdev))
2343                 atl1_down(adapter);
2344
2345         atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2346         atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2347         if (ctrl & BMSR_LSTATUS)
2348                 wufc &= ~ATL1_WUFC_LNKC;
2349
2350         /* reduce speed to 10/100M */
2351         if (wufc) {
2352                 atl1_phy_enter_power_saving(hw);
2353                 /* if resume, let driver to re- setup link */
2354                 hw->phy_configured = false;
2355                 atl1_set_mac_addr(hw);
2356                 atl1_set_multi(netdev);
2357
2358                 ctrl = 0;
2359                 /* turn on magic packet wol */
2360                 if (wufc & ATL1_WUFC_MAG)
2361                         ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2362
2363                 /* turn on Link change WOL */
2364                 if (wufc & ATL1_WUFC_LNKC)
2365                         ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
2366                 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
2367
2368                 /* turn on all-multi mode if wake on multicast is enabled */
2369                 ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
2370                 ctrl &= ~MAC_CTRL_DBG;
2371                 ctrl &= ~MAC_CTRL_PROMIS_EN;
2372                 if (wufc & ATL1_WUFC_MC)
2373                         ctrl |= MAC_CTRL_MC_ALL_EN;
2374                 else
2375                         ctrl &= ~MAC_CTRL_MC_ALL_EN;
2376
2377                 /* turn on broadcast mode if wake on-BC is enabled */
2378                 if (wufc & ATL1_WUFC_BC)
2379                         ctrl |= MAC_CTRL_BC_EN;
2380                 else
2381                         ctrl &= ~MAC_CTRL_BC_EN;
2382
2383                 /* enable RX */
2384                 ctrl |= MAC_CTRL_RX_EN;
2385                 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
2386                 pci_enable_wake(pdev, PCI_D3hot, 1);
2387                 pci_enable_wake(pdev, PCI_D3cold, 1);   /* 4 == D3 cold */
2388         } else {
2389                 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
2390                 pci_enable_wake(pdev, PCI_D3hot, 0);
2391                 pci_enable_wake(pdev, PCI_D3cold, 0);   /* 4 == D3 cold */
2392         }
2393
2394         pci_save_state(pdev);
2395         pci_disable_device(pdev);
2396
2397         pci_set_power_state(pdev, PCI_D3hot);
2398
2399         return 0;
2400 }
2401
2402 static int atl1_resume(struct pci_dev *pdev)
2403 {
2404         struct net_device *netdev = pci_get_drvdata(pdev);
2405         struct atl1_adapter *adapter = netdev_priv(netdev);
2406         u32 ret_val;
2407
2408         pci_set_power_state(pdev, 0);
2409         pci_restore_state(pdev);
2410
2411         ret_val = pci_enable_device(pdev);
2412         pci_enable_wake(pdev, PCI_D3hot, 0);
2413         pci_enable_wake(pdev, PCI_D3cold, 0);
2414
2415         iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
2416         atl1_reset(adapter);
2417
2418         if (netif_running(netdev))
2419                 atl1_up(adapter);
2420         netif_device_attach(netdev);
2421
2422         atl1_via_workaround(adapter);
2423
2424         return 0;
2425 }
2426 #else
2427 #define atl1_suspend NULL
2428 #define atl1_resume NULL
2429 #endif
2430
2431 static struct pci_driver atl1_driver = {
2432         .name = atl1_driver_name,
2433         .id_table = atl1_pci_tbl,
2434         .probe = atl1_probe,
2435         .remove = __devexit_p(atl1_remove),
2436         /* Power Managment Hooks */
2437         /* probably broken right now -- CHS */
2438         .suspend = atl1_suspend,
2439         .resume = atl1_resume
2440 };
2441
2442 /*
2443  * atl1_exit_module - Driver Exit Cleanup Routine
2444  *
2445  * atl1_exit_module is called just before the driver is removed
2446  * from memory.
2447  */
2448 static void __exit atl1_exit_module(void)
2449 {
2450         pci_unregister_driver(&atl1_driver);
2451 }
2452
2453 /*
2454  * atl1_init_module - Driver Registration Routine
2455  *
2456  * atl1_init_module is the first routine called when the driver is
2457  * loaded. All it does is register with the PCI subsystem.
2458  */
2459 static int __init atl1_init_module(void)
2460 {
2461         printk(KERN_INFO "%s - version %s\n", atl1_driver_string, DRIVER_VERSION);
2462         printk(KERN_INFO "%s\n", atl1_copyright);
2463         return pci_register_driver(&atl1_driver);
2464 }
2465
2466 module_init(atl1_init_module);
2467 module_exit(atl1_exit_module);