]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/atlx/atl1.c
atl1: print debug info if rrd error
[linux-2.6-omap-h63xx.git] / drivers / net / atlx / atl1.c
1 /*
2  * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3  * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
4  * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5  *
6  * Derived from Intel e1000 driver
7  * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along with
20  * this program; if not, write to the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
22  *
23  * The full GNU General Public License is included in this distribution in the
24  * file called COPYING.
25  *
26  * Contact Information:
27  * Xiong Huang <xiong_huang@attansic.com>
28  * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei,
29  * Xinzhu  302, TAIWAN, REPUBLIC OF CHINA
30  *
31  * Chris Snook <csnook@redhat.com>
32  * Jay Cliburn <jcliburn@gmail.com>
33  *
34  * This version is adapted from the Attansic reference driver for
35  * inclusion in the Linux kernel.  It is currently under heavy development.
36  * A very incomplete list of things that need to be dealt with:
37  *
38  * TODO:
39  * Wake on LAN.
40  * Add more ethtool functions.
41  * Fix abstruse irq enable/disable condition described here:
42  *      http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
43  *
44  * NEEDS TESTING:
45  * VLAN
46  * multicast
47  * promiscuous mode
48  * interrupt coalescing
49  * SMP torture testing
50  */
51
52 #include <asm/atomic.h>
53 #include <asm/byteorder.h>
54
55 #include <linux/compiler.h>
56 #include <linux/crc32.h>
57 #include <linux/delay.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/etherdevice.h>
60 #include <linux/hardirq.h>
61 #include <linux/if_ether.h>
62 #include <linux/if_vlan.h>
63 #include <linux/in.h>
64 #include <linux/interrupt.h>
65 #include <linux/ip.h>
66 #include <linux/irqflags.h>
67 #include <linux/irqreturn.h>
68 #include <linux/jiffies.h>
69 #include <linux/mii.h>
70 #include <linux/module.h>
71 #include <linux/moduleparam.h>
72 #include <linux/net.h>
73 #include <linux/netdevice.h>
74 #include <linux/pci.h>
75 #include <linux/pci_ids.h>
76 #include <linux/pm.h>
77 #include <linux/skbuff.h>
78 #include <linux/slab.h>
79 #include <linux/spinlock.h>
80 #include <linux/string.h>
81 #include <linux/tcp.h>
82 #include <linux/timer.h>
83 #include <linux/types.h>
84 #include <linux/workqueue.h>
85
86 #include <net/checksum.h>
87
88 #include "atl1.h"
89
90 /* Temporary hack for merging atl1 and atl2 */
91 #include "atlx.c"
92
93 /*
94  * atl1_pci_tbl - PCI Device ID Table
95  */
96 static const struct pci_device_id atl1_pci_tbl[] = {
97         {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
98         /* required last entry */
99         {0,}
100 };
101 MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
102
103 static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
104         NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
105
106 static int debug = -1;
107 module_param(debug, int, 0);
108 MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)");
109
110 /*
111  * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
112  * @adapter: board private structure to initialize
113  *
114  * atl1_sw_init initializes the Adapter private data structure.
115  * Fields are initialized based on PCI device information and
116  * OS network device settings (MTU size).
117  */
118 static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
119 {
120         struct atl1_hw *hw = &adapter->hw;
121         struct net_device *netdev = adapter->netdev;
122
123         hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
124         hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
125
126         adapter->wol = 0;
127         adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
128         adapter->ict = 50000;           /* 100ms */
129         adapter->link_speed = SPEED_0;  /* hardware init */
130         adapter->link_duplex = FULL_DUPLEX;
131
132         hw->phy_configured = false;
133         hw->preamble_len = 7;
134         hw->ipgt = 0x60;
135         hw->min_ifg = 0x50;
136         hw->ipgr1 = 0x40;
137         hw->ipgr2 = 0x60;
138         hw->max_retry = 0xf;
139         hw->lcol = 0x37;
140         hw->jam_ipg = 7;
141         hw->rfd_burst = 8;
142         hw->rrd_burst = 8;
143         hw->rfd_fetch_gap = 1;
144         hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
145         hw->rx_jumbo_lkah = 1;
146         hw->rrd_ret_timer = 16;
147         hw->tpd_burst = 4;
148         hw->tpd_fetch_th = 16;
149         hw->txf_burst = 0x100;
150         hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
151         hw->tpd_fetch_gap = 1;
152         hw->rcb_value = atl1_rcb_64;
153         hw->dma_ord = atl1_dma_ord_enh;
154         hw->dmar_block = atl1_dma_req_256;
155         hw->dmaw_block = atl1_dma_req_256;
156         hw->cmb_rrd = 4;
157         hw->cmb_tpd = 4;
158         hw->cmb_rx_timer = 1;   /* about 2us */
159         hw->cmb_tx_timer = 1;   /* about 2us */
160         hw->smb_timer = 100000; /* about 200ms */
161
162         spin_lock_init(&adapter->lock);
163         spin_lock_init(&adapter->mb_lock);
164
165         return 0;
166 }
167
168 static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
169 {
170         struct atl1_adapter *adapter = netdev_priv(netdev);
171         u16 result;
172
173         atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
174
175         return result;
176 }
177
178 static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
179         int val)
180 {
181         struct atl1_adapter *adapter = netdev_priv(netdev);
182
183         atl1_write_phy_reg(&adapter->hw, reg_num, val);
184 }
185
186 /*
187  * atl1_mii_ioctl -
188  * @netdev:
189  * @ifreq:
190  * @cmd:
191  */
192 static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
193 {
194         struct atl1_adapter *adapter = netdev_priv(netdev);
195         unsigned long flags;
196         int retval;
197
198         if (!netif_running(netdev))
199                 return -EINVAL;
200
201         spin_lock_irqsave(&adapter->lock, flags);
202         retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
203         spin_unlock_irqrestore(&adapter->lock, flags);
204
205         return retval;
206 }
207
208 /*
209  * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
210  * @adapter: board private structure
211  *
212  * Return 0 on success, negative on failure
213  */
214 s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
215 {
216         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
217         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
218         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
219         struct atl1_ring_header *ring_header = &adapter->ring_header;
220         struct pci_dev *pdev = adapter->pdev;
221         int size;
222         u8 offset = 0;
223
224         size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
225         tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
226         if (unlikely(!tpd_ring->buffer_info)) {
227                 if (netif_msg_drv(adapter))
228                         dev_err(&pdev->dev, "kzalloc failed , size = D%d\n",
229                                 size);
230                 goto err_nomem;
231         }
232         rfd_ring->buffer_info =
233                 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
234
235         /*
236          * real ring DMA buffer
237          * each ring/block may need up to 8 bytes for alignment, hence the
238          * additional 40 bytes tacked onto the end.
239          */
240         ring_header->size = size =
241                 sizeof(struct tx_packet_desc) * tpd_ring->count
242                 + sizeof(struct rx_free_desc) * rfd_ring->count
243                 + sizeof(struct rx_return_desc) * rrd_ring->count
244                 + sizeof(struct coals_msg_block)
245                 + sizeof(struct stats_msg_block)
246                 + 40;
247
248         ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
249                 &ring_header->dma);
250         if (unlikely(!ring_header->desc)) {
251                 if (netif_msg_drv(adapter))
252                         dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
253                 goto err_nomem;
254         }
255
256         memset(ring_header->desc, 0, ring_header->size);
257
258         /* init TPD ring */
259         tpd_ring->dma = ring_header->dma;
260         offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
261         tpd_ring->dma += offset;
262         tpd_ring->desc = (u8 *) ring_header->desc + offset;
263         tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
264
265         /* init RFD ring */
266         rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
267         offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
268         rfd_ring->dma += offset;
269         rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
270         rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
271
272
273         /* init RRD ring */
274         rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
275         offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
276         rrd_ring->dma += offset;
277         rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
278         rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
279
280
281         /* init CMB */
282         adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
283         offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
284         adapter->cmb.dma += offset;
285         adapter->cmb.cmb = (struct coals_msg_block *)
286                 ((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
287
288         /* init SMB */
289         adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
290         offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
291         adapter->smb.dma += offset;
292         adapter->smb.smb = (struct stats_msg_block *)
293                 ((u8 *) adapter->cmb.cmb +
294                 (sizeof(struct coals_msg_block) + offset));
295
296         return 0;
297
298 err_nomem:
299         kfree(tpd_ring->buffer_info);
300         return -ENOMEM;
301 }
302
303 static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
304 {
305         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
306         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
307         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
308
309         atomic_set(&tpd_ring->next_to_use, 0);
310         atomic_set(&tpd_ring->next_to_clean, 0);
311
312         rfd_ring->next_to_clean = 0;
313         atomic_set(&rfd_ring->next_to_use, 0);
314
315         rrd_ring->next_to_use = 0;
316         atomic_set(&rrd_ring->next_to_clean, 0);
317 }
318
319 /*
320  * atl1_clean_rx_ring - Free RFD Buffers
321  * @adapter: board private structure
322  */
323 static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
324 {
325         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
326         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
327         struct atl1_buffer *buffer_info;
328         struct pci_dev *pdev = adapter->pdev;
329         unsigned long size;
330         unsigned int i;
331
332         /* Free all the Rx ring sk_buffs */
333         for (i = 0; i < rfd_ring->count; i++) {
334                 buffer_info = &rfd_ring->buffer_info[i];
335                 if (buffer_info->dma) {
336                         pci_unmap_page(pdev, buffer_info->dma,
337                                 buffer_info->length, PCI_DMA_FROMDEVICE);
338                         buffer_info->dma = 0;
339                 }
340                 if (buffer_info->skb) {
341                         dev_kfree_skb(buffer_info->skb);
342                         buffer_info->skb = NULL;
343                 }
344         }
345
346         size = sizeof(struct atl1_buffer) * rfd_ring->count;
347         memset(rfd_ring->buffer_info, 0, size);
348
349         /* Zero out the descriptor ring */
350         memset(rfd_ring->desc, 0, rfd_ring->size);
351
352         rfd_ring->next_to_clean = 0;
353         atomic_set(&rfd_ring->next_to_use, 0);
354
355         rrd_ring->next_to_use = 0;
356         atomic_set(&rrd_ring->next_to_clean, 0);
357 }
358
359 /*
360  * atl1_clean_tx_ring - Free Tx Buffers
361  * @adapter: board private structure
362  */
363 static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
364 {
365         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
366         struct atl1_buffer *buffer_info;
367         struct pci_dev *pdev = adapter->pdev;
368         unsigned long size;
369         unsigned int i;
370
371         /* Free all the Tx ring sk_buffs */
372         for (i = 0; i < tpd_ring->count; i++) {
373                 buffer_info = &tpd_ring->buffer_info[i];
374                 if (buffer_info->dma) {
375                         pci_unmap_page(pdev, buffer_info->dma,
376                                 buffer_info->length, PCI_DMA_TODEVICE);
377                         buffer_info->dma = 0;
378                 }
379         }
380
381         for (i = 0; i < tpd_ring->count; i++) {
382                 buffer_info = &tpd_ring->buffer_info[i];
383                 if (buffer_info->skb) {
384                         dev_kfree_skb_any(buffer_info->skb);
385                         buffer_info->skb = NULL;
386                 }
387         }
388
389         size = sizeof(struct atl1_buffer) * tpd_ring->count;
390         memset(tpd_ring->buffer_info, 0, size);
391
392         /* Zero out the descriptor ring */
393         memset(tpd_ring->desc, 0, tpd_ring->size);
394
395         atomic_set(&tpd_ring->next_to_use, 0);
396         atomic_set(&tpd_ring->next_to_clean, 0);
397 }
398
399 /*
400  * atl1_free_ring_resources - Free Tx / RX descriptor Resources
401  * @adapter: board private structure
402  *
403  * Free all transmit software resources
404  */
405 void atl1_free_ring_resources(struct atl1_adapter *adapter)
406 {
407         struct pci_dev *pdev = adapter->pdev;
408         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
409         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
410         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
411         struct atl1_ring_header *ring_header = &adapter->ring_header;
412
413         atl1_clean_tx_ring(adapter);
414         atl1_clean_rx_ring(adapter);
415
416         kfree(tpd_ring->buffer_info);
417         pci_free_consistent(pdev, ring_header->size, ring_header->desc,
418                 ring_header->dma);
419
420         tpd_ring->buffer_info = NULL;
421         tpd_ring->desc = NULL;
422         tpd_ring->dma = 0;
423
424         rfd_ring->buffer_info = NULL;
425         rfd_ring->desc = NULL;
426         rfd_ring->dma = 0;
427
428         rrd_ring->desc = NULL;
429         rrd_ring->dma = 0;
430 }
431
432 static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
433 {
434         u32 value;
435         struct atl1_hw *hw = &adapter->hw;
436         struct net_device *netdev = adapter->netdev;
437         /* Config MAC CTRL Register */
438         value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
439         /* duplex */
440         if (FULL_DUPLEX == adapter->link_duplex)
441                 value |= MAC_CTRL_DUPLX;
442         /* speed */
443         value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
444                          MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
445                   MAC_CTRL_SPEED_SHIFT);
446         /* flow control */
447         value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
448         /* PAD & CRC */
449         value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
450         /* preamble length */
451         value |= (((u32) adapter->hw.preamble_len
452                    & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
453         /* vlan */
454         if (adapter->vlgrp)
455                 value |= MAC_CTRL_RMV_VLAN;
456         /* rx checksum
457            if (adapter->rx_csum)
458            value |= MAC_CTRL_RX_CHKSUM_EN;
459          */
460         /* filter mode */
461         value |= MAC_CTRL_BC_EN;
462         if (netdev->flags & IFF_PROMISC)
463                 value |= MAC_CTRL_PROMIS_EN;
464         else if (netdev->flags & IFF_ALLMULTI)
465                 value |= MAC_CTRL_MC_ALL_EN;
466         /* value |= MAC_CTRL_LOOPBACK; */
467         iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
468 }
469
470 static u32 atl1_check_link(struct atl1_adapter *adapter)
471 {
472         struct atl1_hw *hw = &adapter->hw;
473         struct net_device *netdev = adapter->netdev;
474         u32 ret_val;
475         u16 speed, duplex, phy_data;
476         int reconfig = 0;
477
478         /* MII_BMSR must read twice */
479         atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
480         atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
481         if (!(phy_data & BMSR_LSTATUS)) {
482                 /* link down */
483                 if (netif_carrier_ok(netdev)) {
484                         /* old link state: Up */
485                         if (netif_msg_link(adapter))
486                                 dev_info(&adapter->pdev->dev, "link is down\n");
487                         adapter->link_speed = SPEED_0;
488                         netif_carrier_off(netdev);
489                         netif_stop_queue(netdev);
490                 }
491                 return 0;
492         }
493
494         /* Link Up */
495         ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
496         if (ret_val)
497                 return ret_val;
498
499         switch (hw->media_type) {
500         case MEDIA_TYPE_1000M_FULL:
501                 if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
502                         reconfig = 1;
503                 break;
504         case MEDIA_TYPE_100M_FULL:
505                 if (speed != SPEED_100 || duplex != FULL_DUPLEX)
506                         reconfig = 1;
507                 break;
508         case MEDIA_TYPE_100M_HALF:
509                 if (speed != SPEED_100 || duplex != HALF_DUPLEX)
510                         reconfig = 1;
511                 break;
512         case MEDIA_TYPE_10M_FULL:
513                 if (speed != SPEED_10 || duplex != FULL_DUPLEX)
514                         reconfig = 1;
515                 break;
516         case MEDIA_TYPE_10M_HALF:
517                 if (speed != SPEED_10 || duplex != HALF_DUPLEX)
518                         reconfig = 1;
519                 break;
520         }
521
522         /* link result is our setting */
523         if (!reconfig) {
524                 if (adapter->link_speed != speed
525                     || adapter->link_duplex != duplex) {
526                         adapter->link_speed = speed;
527                         adapter->link_duplex = duplex;
528                         atl1_setup_mac_ctrl(adapter);
529                         if (netif_msg_link(adapter))
530                                 dev_info(&adapter->pdev->dev,
531                                         "%s link is up %d Mbps %s\n",
532                                         netdev->name, adapter->link_speed,
533                                         adapter->link_duplex == FULL_DUPLEX ?
534                                         "full duplex" : "half duplex");
535                 }
536                 if (!netif_carrier_ok(netdev)) {
537                         /* Link down -> Up */
538                         netif_carrier_on(netdev);
539                         netif_wake_queue(netdev);
540                 }
541                 return 0;
542         }
543
544         /* change original link status */
545         if (netif_carrier_ok(netdev)) {
546                 adapter->link_speed = SPEED_0;
547                 netif_carrier_off(netdev);
548                 netif_stop_queue(netdev);
549         }
550
551         if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
552             hw->media_type != MEDIA_TYPE_1000M_FULL) {
553                 switch (hw->media_type) {
554                 case MEDIA_TYPE_100M_FULL:
555                         phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
556                                    MII_CR_RESET;
557                         break;
558                 case MEDIA_TYPE_100M_HALF:
559                         phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
560                         break;
561                 case MEDIA_TYPE_10M_FULL:
562                         phy_data =
563                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
564                         break;
565                 default:
566                         /* MEDIA_TYPE_10M_HALF: */
567                         phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
568                         break;
569                 }
570                 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
571                 return 0;
572         }
573
574         /* auto-neg, insert timer to re-config phy */
575         if (!adapter->phy_timer_pending) {
576                 adapter->phy_timer_pending = true;
577                 mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
578         }
579
580         return 0;
581 }
582
583 /*
584  * atl1_change_mtu - Change the Maximum Transfer Unit
585  * @netdev: network interface device structure
586  * @new_mtu: new value for maximum frame size
587  *
588  * Returns 0 on success, negative on failure
589  */
590 static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
591 {
592         struct atl1_adapter *adapter = netdev_priv(netdev);
593         int old_mtu = netdev->mtu;
594         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
595
596         if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
597             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
598                 if (netif_msg_link(adapter))
599                         dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
600                 return -EINVAL;
601         }
602
603         adapter->hw.max_frame_size = max_frame;
604         adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
605         adapter->rx_buffer_len = (max_frame + 7) & ~7;
606         adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
607
608         netdev->mtu = new_mtu;
609         if ((old_mtu != new_mtu) && netif_running(netdev)) {
610                 atl1_down(adapter);
611                 atl1_up(adapter);
612         }
613
614         return 0;
615 }
616
617 static void set_flow_ctrl_old(struct atl1_adapter *adapter)
618 {
619         u32 hi, lo, value;
620
621         /* RFD Flow Control */
622         value = adapter->rfd_ring.count;
623         hi = value / 16;
624         if (hi < 2)
625                 hi = 2;
626         lo = value * 7 / 8;
627
628         value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
629                 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
630         iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
631
632         /* RRD Flow Control */
633         value = adapter->rrd_ring.count;
634         lo = value / 16;
635         hi = value * 7 / 8;
636         if (lo < 2)
637                 lo = 2;
638         value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
639                 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
640         iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
641 }
642
643 static void set_flow_ctrl_new(struct atl1_hw *hw)
644 {
645         u32 hi, lo, value;
646
647         /* RXF Flow Control */
648         value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
649         lo = value / 16;
650         if (lo < 192)
651                 lo = 192;
652         hi = value * 7 / 8;
653         if (hi < lo)
654                 hi = lo + 16;
655         value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
656                 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
657         iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
658
659         /* RRD Flow Control */
660         value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
661         lo = value / 8;
662         hi = value * 7 / 8;
663         if (lo < 2)
664                 lo = 2;
665         if (hi < lo)
666                 hi = lo + 3;
667         value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
668                 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
669         iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
670 }
671
672 /*
673  * atl1_configure - Configure Transmit&Receive Unit after Reset
674  * @adapter: board private structure
675  *
676  * Configure the Tx /Rx unit of the MAC after a reset.
677  */
678 static u32 atl1_configure(struct atl1_adapter *adapter)
679 {
680         struct atl1_hw *hw = &adapter->hw;
681         u32 value;
682
683         /* clear interrupt status */
684         iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
685
686         /* set MAC Address */
687         value = (((u32) hw->mac_addr[2]) << 24) |
688                 (((u32) hw->mac_addr[3]) << 16) |
689                 (((u32) hw->mac_addr[4]) << 8) |
690                 (((u32) hw->mac_addr[5]));
691         iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
692         value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
693         iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
694
695         /* tx / rx ring */
696
697         /* HI base address */
698         iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
699                 hw->hw_addr + REG_DESC_BASE_ADDR_HI);
700         /* LO base address */
701         iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
702                 hw->hw_addr + REG_DESC_RFD_ADDR_LO);
703         iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
704                 hw->hw_addr + REG_DESC_RRD_ADDR_LO);
705         iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
706                 hw->hw_addr + REG_DESC_TPD_ADDR_LO);
707         iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
708                 hw->hw_addr + REG_DESC_CMB_ADDR_LO);
709         iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
710                 hw->hw_addr + REG_DESC_SMB_ADDR_LO);
711
712         /* element count */
713         value = adapter->rrd_ring.count;
714         value <<= 16;
715         value += adapter->rfd_ring.count;
716         iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
717         iowrite32(adapter->tpd_ring.count, hw->hw_addr +
718                 REG_DESC_TPD_RING_SIZE);
719
720         /* Load Ptr */
721         iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
722
723         /* config Mailbox */
724         value = ((atomic_read(&adapter->tpd_ring.next_to_use)
725                   & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
726                 ((atomic_read(&adapter->rrd_ring.next_to_clean)
727                 & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
728                 ((atomic_read(&adapter->rfd_ring.next_to_use)
729                 & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
730         iowrite32(value, hw->hw_addr + REG_MAILBOX);
731
732         /* config IPG/IFG */
733         value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
734                  << MAC_IPG_IFG_IPGT_SHIFT) |
735                 (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
736                 << MAC_IPG_IFG_MIFG_SHIFT) |
737                 (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
738                 << MAC_IPG_IFG_IPGR1_SHIFT) |
739                 (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
740                 << MAC_IPG_IFG_IPGR2_SHIFT);
741         iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
742
743         /* config  Half-Duplex Control */
744         value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
745                 (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
746                 << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
747                 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
748                 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
749                 (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
750                 << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
751         iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
752
753         /* set Interrupt Moderator Timer */
754         iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
755         iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
756
757         /* set Interrupt Clear Timer */
758         iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
759
760         /* set max frame size hw will accept */
761         iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU);
762
763         /* jumbo size & rrd retirement timer */
764         value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
765                  << RXQ_JMBOSZ_TH_SHIFT) |
766                 (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
767                 << RXQ_JMBO_LKAH_SHIFT) |
768                 (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
769                 << RXQ_RRD_TIMER_SHIFT);
770         iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
771
772         /* Flow Control */
773         switch (hw->dev_rev) {
774         case 0x8001:
775         case 0x9001:
776         case 0x9002:
777         case 0x9003:
778                 set_flow_ctrl_old(adapter);
779                 break;
780         default:
781                 set_flow_ctrl_new(hw);
782                 break;
783         }
784
785         /* config TXQ */
786         value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
787                  << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
788                 (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
789                 << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
790                 (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
791                 << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
792                 TXQ_CTRL_EN;
793         iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
794
795         /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
796         value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
797                 << TX_JUMBO_TASK_TH_SHIFT) |
798                 (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
799                 << TX_TPD_MIN_IPG_SHIFT);
800         iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
801
802         /* config RXQ */
803         value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
804                 << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
805                 (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
806                 << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
807                 (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
808                 << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
809                 RXQ_CTRL_EN;
810         iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
811
812         /* config DMA Engine */
813         value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
814                 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
815                 ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
816                 << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
817                 DMA_CTRL_DMAW_EN;
818         value |= (u32) hw->dma_ord;
819         if (atl1_rcb_128 == hw->rcb_value)
820                 value |= DMA_CTRL_RCB_VALUE;
821         iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
822
823         /* config CMB / SMB */
824         value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
825                 hw->cmb_tpd : adapter->tpd_ring.count;
826         value <<= 16;
827         value |= hw->cmb_rrd;
828         iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
829         value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
830         iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
831         iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
832
833         /* --- enable CMB / SMB */
834         value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
835         iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
836
837         value = ioread32(adapter->hw.hw_addr + REG_ISR);
838         if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
839                 value = 1;      /* config failed */
840         else
841                 value = 0;
842
843         /* clear all interrupt status */
844         iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
845         iowrite32(0, adapter->hw.hw_addr + REG_ISR);
846         return value;
847 }
848
849 /*
850  * atl1_pcie_patch - Patch for PCIE module
851  */
852 static void atl1_pcie_patch(struct atl1_adapter *adapter)
853 {
854         u32 value;
855
856         /* much vendor magic here */
857         value = 0x6500;
858         iowrite32(value, adapter->hw.hw_addr + 0x12FC);
859         /* pcie flow control mode change */
860         value = ioread32(adapter->hw.hw_addr + 0x1008);
861         value |= 0x8000;
862         iowrite32(value, adapter->hw.hw_addr + 0x1008);
863 }
864
865 /*
866  * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
867  * on PCI Command register is disable.
868  * The function enable this bit.
869  * Brackett, 2006/03/15
870  */
871 static void atl1_via_workaround(struct atl1_adapter *adapter)
872 {
873         unsigned long value;
874
875         value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
876         if (value & PCI_COMMAND_INTX_DISABLE)
877                 value &= ~PCI_COMMAND_INTX_DISABLE;
878         iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
879 }
880
881 static void atl1_inc_smb(struct atl1_adapter *adapter)
882 {
883         struct stats_msg_block *smb = adapter->smb.smb;
884
885         /* Fill out the OS statistics structure */
886         adapter->soft_stats.rx_packets += smb->rx_ok;
887         adapter->soft_stats.tx_packets += smb->tx_ok;
888         adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
889         adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
890         adapter->soft_stats.multicast += smb->rx_mcast;
891         adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
892                 smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
893
894         /* Rx Errors */
895         adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
896                 smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
897                 smb->rx_rrd_ov + smb->rx_align_err);
898         adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
899         adapter->soft_stats.rx_length_errors += smb->rx_len_err;
900         adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
901         adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
902         adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
903                 smb->rx_rxf_ov);
904
905         adapter->soft_stats.rx_pause += smb->rx_pause;
906         adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
907         adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
908
909         /* Tx Errors */
910         adapter->soft_stats.tx_errors += (smb->tx_late_col +
911                 smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
912         adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
913         adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
914         adapter->soft_stats.tx_window_errors += smb->tx_late_col;
915
916         adapter->soft_stats.excecol += smb->tx_abort_col;
917         adapter->soft_stats.deffer += smb->tx_defer;
918         adapter->soft_stats.scc += smb->tx_1_col;
919         adapter->soft_stats.mcc += smb->tx_2_col;
920         adapter->soft_stats.latecol += smb->tx_late_col;
921         adapter->soft_stats.tx_underun += smb->tx_underrun;
922         adapter->soft_stats.tx_trunc += smb->tx_trunc;
923         adapter->soft_stats.tx_pause += smb->tx_pause;
924
925         adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets;
926         adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets;
927         adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes;
928         adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes;
929         adapter->net_stats.multicast = adapter->soft_stats.multicast;
930         adapter->net_stats.collisions = adapter->soft_stats.collisions;
931         adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors;
932         adapter->net_stats.rx_over_errors =
933                 adapter->soft_stats.rx_missed_errors;
934         adapter->net_stats.rx_length_errors =
935                 adapter->soft_stats.rx_length_errors;
936         adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
937         adapter->net_stats.rx_frame_errors =
938                 adapter->soft_stats.rx_frame_errors;
939         adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
940         adapter->net_stats.rx_missed_errors =
941                 adapter->soft_stats.rx_missed_errors;
942         adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors;
943         adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
944         adapter->net_stats.tx_aborted_errors =
945                 adapter->soft_stats.tx_aborted_errors;
946         adapter->net_stats.tx_window_errors =
947                 adapter->soft_stats.tx_window_errors;
948         adapter->net_stats.tx_carrier_errors =
949                 adapter->soft_stats.tx_carrier_errors;
950 }
951
952 static void atl1_update_mailbox(struct atl1_adapter *adapter)
953 {
954         unsigned long flags;
955         u32 tpd_next_to_use;
956         u32 rfd_next_to_use;
957         u32 rrd_next_to_clean;
958         u32 value;
959
960         spin_lock_irqsave(&adapter->mb_lock, flags);
961
962         tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
963         rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
964         rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
965
966         value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
967                 MB_RFD_PROD_INDX_SHIFT) |
968                 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
969                 MB_RRD_CONS_INDX_SHIFT) |
970                 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
971                 MB_TPD_PROD_INDX_SHIFT);
972         iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
973
974         spin_unlock_irqrestore(&adapter->mb_lock, flags);
975 }
976
977 static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
978         struct rx_return_desc *rrd, u16 offset)
979 {
980         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
981
982         while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
983                 rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
984                 if (++rfd_ring->next_to_clean == rfd_ring->count) {
985                         rfd_ring->next_to_clean = 0;
986                 }
987         }
988 }
989
990 static void atl1_update_rfd_index(struct atl1_adapter *adapter,
991         struct rx_return_desc *rrd)
992 {
993         u16 num_buf;
994
995         num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
996                 adapter->rx_buffer_len;
997         if (rrd->num_buf == num_buf)
998                 /* clean alloc flag for bad rrd */
999                 atl1_clean_alloc_flag(adapter, rrd, num_buf);
1000 }
1001
1002 static void atl1_rx_checksum(struct atl1_adapter *adapter,
1003         struct rx_return_desc *rrd, struct sk_buff *skb)
1004 {
1005         struct pci_dev *pdev = adapter->pdev;
1006
1007         skb->ip_summed = CHECKSUM_NONE;
1008
1009         if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
1010                 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
1011                                         ERR_FLAG_CODE | ERR_FLAG_OV)) {
1012                         adapter->hw_csum_err++;
1013                         if (netif_msg_rx_err(adapter))
1014                                 dev_printk(KERN_DEBUG, &pdev->dev,
1015                                         "rx checksum error\n");
1016                         return;
1017                 }
1018         }
1019
1020         /* not IPv4 */
1021         if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
1022                 /* checksum is invalid, but it's not an IPv4 pkt, so ok */
1023                 return;
1024
1025         /* IPv4 packet */
1026         if (likely(!(rrd->err_flg &
1027                 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
1028                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1029                 adapter->hw_csum_good++;
1030                 return;
1031         }
1032
1033         /* IPv4, but hardware thinks its checksum is wrong */
1034         if (netif_msg_rx_err(adapter))
1035                 dev_printk(KERN_DEBUG, &pdev->dev,
1036                         "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
1037                         rrd->pkt_flg, rrd->err_flg);
1038         skb->ip_summed = CHECKSUM_COMPLETE;
1039         skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
1040         adapter->hw_csum_err++;
1041         return;
1042 }
1043
1044 /*
1045  * atl1_alloc_rx_buffers - Replace used receive buffers
1046  * @adapter: address of board private structure
1047  */
1048 static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1049 {
1050         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1051         struct pci_dev *pdev = adapter->pdev;
1052         struct page *page;
1053         unsigned long offset;
1054         struct atl1_buffer *buffer_info, *next_info;
1055         struct sk_buff *skb;
1056         u16 num_alloc = 0;
1057         u16 rfd_next_to_use, next_next;
1058         struct rx_free_desc *rfd_desc;
1059
1060         next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
1061         if (++next_next == rfd_ring->count)
1062                 next_next = 0;
1063         buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1064         next_info = &rfd_ring->buffer_info[next_next];
1065
1066         while (!buffer_info->alloced && !next_info->alloced) {
1067                 if (buffer_info->skb) {
1068                         buffer_info->alloced = 1;
1069                         goto next;
1070                 }
1071
1072                 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1073
1074                 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
1075                 if (unlikely(!skb)) {
1076                         /* Better luck next round */
1077                         adapter->net_stats.rx_dropped++;
1078                         break;
1079                 }
1080
1081                 /*
1082                  * Make buffer alignment 2 beyond a 16 byte boundary
1083                  * this will result in a 16 byte aligned IP header after
1084                  * the 14 byte MAC header is removed
1085                  */
1086                 skb_reserve(skb, NET_IP_ALIGN);
1087
1088                 buffer_info->alloced = 1;
1089                 buffer_info->skb = skb;
1090                 buffer_info->length = (u16) adapter->rx_buffer_len;
1091                 page = virt_to_page(skb->data);
1092                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1093                 buffer_info->dma = pci_map_page(pdev, page, offset,
1094                                                 adapter->rx_buffer_len,
1095                                                 PCI_DMA_FROMDEVICE);
1096                 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1097                 rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
1098                 rfd_desc->coalese = 0;
1099
1100 next:
1101                 rfd_next_to_use = next_next;
1102                 if (unlikely(++next_next == rfd_ring->count))
1103                         next_next = 0;
1104
1105                 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1106                 next_info = &rfd_ring->buffer_info[next_next];
1107                 num_alloc++;
1108         }
1109
1110         if (num_alloc) {
1111                 /*
1112                  * Force memory writes to complete before letting h/w
1113                  * know there are new descriptors to fetch.  (Only
1114                  * applicable for weak-ordered memory model archs,
1115                  * such as IA-64).
1116                  */
1117                 wmb();
1118                 atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
1119         }
1120         return num_alloc;
1121 }
1122
1123 static void atl1_intr_rx(struct atl1_adapter *adapter)
1124 {
1125         int i, count;
1126         u16 length;
1127         u16 rrd_next_to_clean;
1128         u32 value;
1129         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1130         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1131         struct atl1_buffer *buffer_info;
1132         struct rx_return_desc *rrd;
1133         struct sk_buff *skb;
1134
1135         count = 0;
1136
1137         rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
1138
1139         while (1) {
1140                 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
1141                 i = 1;
1142                 if (likely(rrd->xsz.valid)) {   /* packet valid */
1143 chk_rrd:
1144                         /* check rrd status */
1145                         if (likely(rrd->num_buf == 1))
1146                                 goto rrd_ok;
1147                         else if (netif_msg_rx_err(adapter)) {
1148                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1149                                         "unexpected RRD buffer count\n");
1150                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1151                                         "rx_buf_len = %d\n",
1152                                         adapter->rx_buffer_len);
1153                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1154                                         "RRD num_buf = %d\n",
1155                                         rrd->num_buf);
1156                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1157                                         "RRD pkt_len = %d\n",
1158                                         rrd->xsz.xsum_sz.pkt_size);
1159                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1160                                         "RRD pkt_flg = 0x%08X\n",
1161                                         rrd->pkt_flg);
1162                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1163                                         "RRD err_flg = 0x%08X\n",
1164                                         rrd->err_flg);
1165                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1166                                         "RRD vlan_tag = 0x%08X\n",
1167                                         rrd->vlan_tag);
1168                         }
1169
1170                         /* rrd seems to be bad */
1171                         if (unlikely(i-- > 0)) {
1172                                 /* rrd may not be DMAed completely */
1173                                 udelay(1);
1174                                 goto chk_rrd;
1175                         }
1176                         /* bad rrd */
1177                         if (netif_msg_rx_err(adapter))
1178                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1179                                         "bad RRD\n");
1180                         /* see if update RFD index */
1181                         if (rrd->num_buf > 1)
1182                                 atl1_update_rfd_index(adapter, rrd);
1183
1184                         /* update rrd */
1185                         rrd->xsz.valid = 0;
1186                         if (++rrd_next_to_clean == rrd_ring->count)
1187                                 rrd_next_to_clean = 0;
1188                         count++;
1189                         continue;
1190                 } else {        /* current rrd still not be updated */
1191
1192                         break;
1193                 }
1194 rrd_ok:
1195                 /* clean alloc flag for bad rrd */
1196                 atl1_clean_alloc_flag(adapter, rrd, 0);
1197
1198                 buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
1199                 if (++rfd_ring->next_to_clean == rfd_ring->count)
1200                         rfd_ring->next_to_clean = 0;
1201
1202                 /* update rrd next to clean */
1203                 if (++rrd_next_to_clean == rrd_ring->count)
1204                         rrd_next_to_clean = 0;
1205                 count++;
1206
1207                 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
1208                         if (!(rrd->err_flg &
1209                                 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
1210                                 | ERR_FLAG_LEN))) {
1211                                 /* packet error, don't need upstream */
1212                                 buffer_info->alloced = 0;
1213                                 rrd->xsz.valid = 0;
1214                                 continue;
1215                         }
1216                 }
1217
1218                 /* Good Receive */
1219                 pci_unmap_page(adapter->pdev, buffer_info->dma,
1220                                buffer_info->length, PCI_DMA_FROMDEVICE);
1221                 skb = buffer_info->skb;
1222                 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
1223
1224                 skb_put(skb, length - ETH_FCS_LEN);
1225
1226                 /* Receive Checksum Offload */
1227                 atl1_rx_checksum(adapter, rrd, skb);
1228                 skb->protocol = eth_type_trans(skb, adapter->netdev);
1229
1230                 if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) {
1231                         u16 vlan_tag = (rrd->vlan_tag >> 4) |
1232                                         ((rrd->vlan_tag & 7) << 13) |
1233                                         ((rrd->vlan_tag & 8) << 9);
1234                         vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
1235                 } else
1236                         netif_rx(skb);
1237
1238                 /* let protocol layer free skb */
1239                 buffer_info->skb = NULL;
1240                 buffer_info->alloced = 0;
1241                 rrd->xsz.valid = 0;
1242
1243                 adapter->netdev->last_rx = jiffies;
1244         }
1245
1246         atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
1247
1248         atl1_alloc_rx_buffers(adapter);
1249
1250         /* update mailbox ? */
1251         if (count) {
1252                 u32 tpd_next_to_use;
1253                 u32 rfd_next_to_use;
1254
1255                 spin_lock(&adapter->mb_lock);
1256
1257                 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
1258                 rfd_next_to_use =
1259                     atomic_read(&adapter->rfd_ring.next_to_use);
1260                 rrd_next_to_clean =
1261                     atomic_read(&adapter->rrd_ring.next_to_clean);
1262                 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
1263                         MB_RFD_PROD_INDX_SHIFT) |
1264                         ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
1265                         MB_RRD_CONS_INDX_SHIFT) |
1266                         ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
1267                         MB_TPD_PROD_INDX_SHIFT);
1268                 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
1269                 spin_unlock(&adapter->mb_lock);
1270         }
1271 }
1272
1273 static void atl1_intr_tx(struct atl1_adapter *adapter)
1274 {
1275         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1276         struct atl1_buffer *buffer_info;
1277         u16 sw_tpd_next_to_clean;
1278         u16 cmb_tpd_next_to_clean;
1279
1280         sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1281         cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
1282
1283         while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
1284                 struct tx_packet_desc *tpd;
1285
1286                 tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
1287                 buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
1288                 if (buffer_info->dma) {
1289                         pci_unmap_page(adapter->pdev, buffer_info->dma,
1290                                        buffer_info->length, PCI_DMA_TODEVICE);
1291                         buffer_info->dma = 0;
1292                 }
1293
1294                 if (buffer_info->skb) {
1295                         dev_kfree_skb_irq(buffer_info->skb);
1296                         buffer_info->skb = NULL;
1297                 }
1298
1299                 if (++sw_tpd_next_to_clean == tpd_ring->count)
1300                         sw_tpd_next_to_clean = 0;
1301         }
1302         atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
1303
1304         if (netif_queue_stopped(adapter->netdev)
1305             && netif_carrier_ok(adapter->netdev))
1306                 netif_wake_queue(adapter->netdev);
1307 }
1308
1309 static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
1310 {
1311         u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1312         u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
1313         return ((next_to_clean > next_to_use) ?
1314                 next_to_clean - next_to_use - 1 :
1315                 tpd_ring->count + next_to_clean - next_to_use - 1);
1316 }
1317
1318 static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
1319         struct tx_packet_desc *ptpd)
1320 {
1321         /* spinlock held */
1322         u8 hdr_len, ip_off;
1323         u32 real_len;
1324         int err;
1325
1326         if (skb_shinfo(skb)->gso_size) {
1327                 if (skb_header_cloned(skb)) {
1328                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1329                         if (unlikely(err))
1330                                 return -1;
1331                 }
1332
1333                 if (skb->protocol == ntohs(ETH_P_IP)) {
1334                         struct iphdr *iph = ip_hdr(skb);
1335
1336                         real_len = (((unsigned char *)iph - skb->data) +
1337                                 ntohs(iph->tot_len));
1338                         if (real_len < skb->len)
1339                                 pskb_trim(skb, real_len);
1340                         hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1341                         if (skb->len == hdr_len) {
1342                                 iph->check = 0;
1343                                 tcp_hdr(skb)->check =
1344                                         ~csum_tcpudp_magic(iph->saddr,
1345                                         iph->daddr, tcp_hdrlen(skb),
1346                                         IPPROTO_TCP, 0);
1347                                 ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
1348                                         TPD_IPHL_SHIFT;
1349                                 ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1350                                         TPD_TCPHDRLEN_MASK) <<
1351                                         TPD_TCPHDRLEN_SHIFT;
1352                                 ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT;
1353                                 ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT;
1354                                 return 1;
1355                         }
1356
1357                         iph->check = 0;
1358                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1359                                         iph->daddr, 0, IPPROTO_TCP, 0);
1360                         ip_off = (unsigned char *)iph -
1361                                 (unsigned char *) skb_network_header(skb);
1362                         if (ip_off == 8) /* 802.3-SNAP frame */
1363                                 ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT;
1364                         else if (ip_off != 0)
1365                                 return -2;
1366
1367                         ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
1368                                 TPD_IPHL_SHIFT;
1369                         ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1370                                 TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT;
1371                         ptpd->word3 |= (skb_shinfo(skb)->gso_size &
1372                                 TPD_MSS_MASK) << TPD_MSS_SHIFT;
1373                         ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
1374                         return 3;
1375                 }
1376         }
1377         return false;
1378 }
1379
1380 static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1381         struct tx_packet_desc *ptpd)
1382 {
1383         u8 css, cso;
1384
1385         if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1386                 css = (u8) (skb->csum_start - skb_headroom(skb));
1387                 cso = css + (u8) skb->csum_offset;
1388                 if (unlikely(css & 0x1)) {
1389                         /* L1 hardware requires an even number here */
1390                         if (netif_msg_tx_err(adapter))
1391                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1392                                         "payload offset not an even number\n");
1393                         return -1;
1394                 }
1395                 ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) <<
1396                         TPD_PLOADOFFSET_SHIFT;
1397                 ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) <<
1398                         TPD_CCSUMOFFSET_SHIFT;
1399                 ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT;
1400                 return true;
1401         }
1402         return 0;
1403 }
1404
1405 static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
1406         struct tx_packet_desc *ptpd)
1407 {
1408         /* spinlock held */
1409         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1410         struct atl1_buffer *buffer_info;
1411         u16 buf_len = skb->len;
1412         struct page *page;
1413         unsigned long offset;
1414         unsigned int nr_frags;
1415         unsigned int f;
1416         int retval;
1417         u16 next_to_use;
1418         u16 data_len;
1419         u8 hdr_len;
1420
1421         buf_len -= skb->data_len;
1422         nr_frags = skb_shinfo(skb)->nr_frags;
1423         next_to_use = atomic_read(&tpd_ring->next_to_use);
1424         buffer_info = &tpd_ring->buffer_info[next_to_use];
1425         if (unlikely(buffer_info->skb))
1426                 BUG();
1427         /* put skb in last TPD */
1428         buffer_info->skb = NULL;
1429
1430         retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
1431         if (retval) {
1432                 /* TSO */
1433                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1434                 buffer_info->length = hdr_len;
1435                 page = virt_to_page(skb->data);
1436                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1437                 buffer_info->dma = pci_map_page(adapter->pdev, page,
1438                                                 offset, hdr_len,
1439                                                 PCI_DMA_TODEVICE);
1440
1441                 if (++next_to_use == tpd_ring->count)
1442                         next_to_use = 0;
1443
1444                 if (buf_len > hdr_len) {
1445                         int i, nseg;
1446
1447                         data_len = buf_len - hdr_len;
1448                         nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) /
1449                                 ATL1_MAX_TX_BUF_LEN;
1450                         for (i = 0; i < nseg; i++) {
1451                                 buffer_info =
1452                                     &tpd_ring->buffer_info[next_to_use];
1453                                 buffer_info->skb = NULL;
1454                                 buffer_info->length =
1455                                     (ATL1_MAX_TX_BUF_LEN >=
1456                                      data_len) ? ATL1_MAX_TX_BUF_LEN : data_len;
1457                                 data_len -= buffer_info->length;
1458                                 page = virt_to_page(skb->data +
1459                                         (hdr_len + i * ATL1_MAX_TX_BUF_LEN));
1460                                 offset = (unsigned long)(skb->data +
1461                                         (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) &
1462                                         ~PAGE_MASK;
1463                                 buffer_info->dma = pci_map_page(adapter->pdev,
1464                                         page, offset, buffer_info->length,
1465                                         PCI_DMA_TODEVICE);
1466                                 if (++next_to_use == tpd_ring->count)
1467                                         next_to_use = 0;
1468                         }
1469                 }
1470         } else {
1471                 /* not TSO */
1472                 buffer_info->length = buf_len;
1473                 page = virt_to_page(skb->data);
1474                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1475                 buffer_info->dma = pci_map_page(adapter->pdev, page,
1476                         offset, buf_len, PCI_DMA_TODEVICE);
1477                 if (++next_to_use == tpd_ring->count)
1478                         next_to_use = 0;
1479         }
1480
1481         for (f = 0; f < nr_frags; f++) {
1482                 struct skb_frag_struct *frag;
1483                 u16 i, nseg;
1484
1485                 frag = &skb_shinfo(skb)->frags[f];
1486                 buf_len = frag->size;
1487
1488                 nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
1489                         ATL1_MAX_TX_BUF_LEN;
1490                 for (i = 0; i < nseg; i++) {
1491                         buffer_info = &tpd_ring->buffer_info[next_to_use];
1492                         if (unlikely(buffer_info->skb))
1493                                 BUG();
1494                         buffer_info->skb = NULL;
1495                         buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
1496                                 ATL1_MAX_TX_BUF_LEN : buf_len;
1497                         buf_len -= buffer_info->length;
1498                         buffer_info->dma = pci_map_page(adapter->pdev,
1499                                 frag->page,
1500                                 frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
1501                                 buffer_info->length, PCI_DMA_TODEVICE);
1502
1503                         if (++next_to_use == tpd_ring->count)
1504                                 next_to_use = 0;
1505                 }
1506         }
1507
1508         /* last tpd's buffer-info */
1509         buffer_info->skb = skb;
1510 }
1511
1512 static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
1513        struct tx_packet_desc *ptpd)
1514 {
1515         /* spinlock held */
1516         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1517         struct atl1_buffer *buffer_info;
1518         struct tx_packet_desc *tpd;
1519         u16 j;
1520         u32 val;
1521         u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use);
1522
1523         for (j = 0; j < count; j++) {
1524                 buffer_info = &tpd_ring->buffer_info[next_to_use];
1525                 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use);
1526                 if (tpd != ptpd)
1527                         memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
1528                 tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1529                 tpd->word2 = (cpu_to_le16(buffer_info->length) &
1530                         TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT;
1531
1532                 /*
1533                  * if this is the first packet in a TSO chain, set
1534                  * TPD_HDRFLAG, otherwise, clear it.
1535                  */
1536                 val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) &
1537                         TPD_SEGMENT_EN_MASK;
1538                 if (val) {
1539                         if (!j)
1540                                 tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
1541                         else
1542                                 tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT);
1543                 }
1544
1545                 if (j == (count - 1))
1546                         tpd->word3 |= 1 << TPD_EOP_SHIFT;
1547
1548                 if (++next_to_use == tpd_ring->count)
1549                         next_to_use = 0;
1550         }
1551         /*
1552          * Force memory writes to complete before letting h/w
1553          * know there are new descriptors to fetch.  (Only
1554          * applicable for weak-ordered memory model archs,
1555          * such as IA-64).
1556          */
1557         wmb();
1558
1559         atomic_set(&tpd_ring->next_to_use, next_to_use);
1560 }
1561
1562 static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1563 {
1564         struct atl1_adapter *adapter = netdev_priv(netdev);
1565         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1566         int len = skb->len;
1567         int tso;
1568         int count = 1;
1569         int ret_val;
1570         struct tx_packet_desc *ptpd;
1571         u16 frag_size;
1572         u16 vlan_tag;
1573         unsigned long flags;
1574         unsigned int nr_frags = 0;
1575         unsigned int mss = 0;
1576         unsigned int f;
1577         unsigned int proto_hdr_len;
1578
1579         len -= skb->data_len;
1580
1581         if (unlikely(skb->len <= 0)) {
1582                 dev_kfree_skb_any(skb);
1583                 return NETDEV_TX_OK;
1584         }
1585
1586         nr_frags = skb_shinfo(skb)->nr_frags;
1587         for (f = 0; f < nr_frags; f++) {
1588                 frag_size = skb_shinfo(skb)->frags[f].size;
1589                 if (frag_size)
1590                         count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
1591                                 ATL1_MAX_TX_BUF_LEN;
1592         }
1593
1594         mss = skb_shinfo(skb)->gso_size;
1595         if (mss) {
1596                 if (skb->protocol == ntohs(ETH_P_IP)) {
1597                         proto_hdr_len = (skb_transport_offset(skb) +
1598                                          tcp_hdrlen(skb));
1599                         if (unlikely(proto_hdr_len > len)) {
1600                                 dev_kfree_skb_any(skb);
1601                                 return NETDEV_TX_OK;
1602                         }
1603                         /* need additional TPD ? */
1604                         if (proto_hdr_len != len)
1605                                 count += (len - proto_hdr_len +
1606                                         ATL1_MAX_TX_BUF_LEN - 1) /
1607                                         ATL1_MAX_TX_BUF_LEN;
1608                 }
1609         }
1610
1611         if (!spin_trylock_irqsave(&adapter->lock, flags)) {
1612                 /* Can't get lock - tell upper layer to requeue */
1613                 if (netif_msg_tx_queued(adapter))
1614                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1615                                 "tx locked\n");
1616                 return NETDEV_TX_LOCKED;
1617         }
1618
1619         if (atl1_tpd_avail(&adapter->tpd_ring) < count) {
1620                 /* not enough descriptors */
1621                 netif_stop_queue(netdev);
1622                 spin_unlock_irqrestore(&adapter->lock, flags);
1623                 if (netif_msg_tx_queued(adapter))
1624                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1625                                 "tx busy\n");
1626                 return NETDEV_TX_BUSY;
1627         }
1628
1629         ptpd = ATL1_TPD_DESC(tpd_ring,
1630                 (u16) atomic_read(&tpd_ring->next_to_use));
1631         memset(ptpd, 0, sizeof(struct tx_packet_desc));
1632
1633         if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1634                 vlan_tag = vlan_tx_tag_get(skb);
1635                 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
1636                         ((vlan_tag >> 9) & 0x8);
1637                 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
1638                 ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) <<
1639                         TPD_VL_TAGGED_SHIFT;
1640         }
1641
1642         tso = atl1_tso(adapter, skb, ptpd);
1643         if (tso < 0) {
1644                 spin_unlock_irqrestore(&adapter->lock, flags);
1645                 dev_kfree_skb_any(skb);
1646                 return NETDEV_TX_OK;
1647         }
1648
1649         if (!tso) {
1650                 ret_val = atl1_tx_csum(adapter, skb, ptpd);
1651                 if (ret_val < 0) {
1652                         spin_unlock_irqrestore(&adapter->lock, flags);
1653                         dev_kfree_skb_any(skb);
1654                         return NETDEV_TX_OK;
1655                 }
1656         }
1657
1658         atl1_tx_map(adapter, skb, ptpd);
1659         atl1_tx_queue(adapter, count, ptpd);
1660         atl1_update_mailbox(adapter);
1661         spin_unlock_irqrestore(&adapter->lock, flags);
1662         netdev->trans_start = jiffies;
1663         return NETDEV_TX_OK;
1664 }
1665
1666 /*
1667  * atl1_intr - Interrupt Handler
1668  * @irq: interrupt number
1669  * @data: pointer to a network interface device structure
1670  * @pt_regs: CPU registers structure
1671  */
1672 static irqreturn_t atl1_intr(int irq, void *data)
1673 {
1674         struct atl1_adapter *adapter = netdev_priv(data);
1675         u32 status;
1676         u8 update_rx;
1677         int max_ints = 10;
1678
1679         status = adapter->cmb.cmb->int_stats;
1680         if (!status)
1681                 return IRQ_NONE;
1682
1683         update_rx = 0;
1684
1685         do {
1686                 /* clear CMB interrupt status at once */
1687                 adapter->cmb.cmb->int_stats = 0;
1688
1689                 if (status & ISR_GPHY)  /* clear phy status */
1690                         atlx_clear_phy_int(adapter);
1691
1692                 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
1693                 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
1694
1695                 /* check if SMB intr */
1696                 if (status & ISR_SMB)
1697                         atl1_inc_smb(adapter);
1698
1699                 /* check if PCIE PHY Link down */
1700                 if (status & ISR_PHY_LINKDOWN) {
1701                         if (netif_msg_intr(adapter))
1702                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1703                                         "pcie phy link down %x\n", status);
1704                         if (netif_running(adapter->netdev)) {   /* reset MAC */
1705                                 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1706                                 schedule_work(&adapter->pcie_dma_to_rst_task);
1707                                 return IRQ_HANDLED;
1708                         }
1709                 }
1710
1711                 /* check if DMA read/write error ? */
1712                 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
1713                         if (netif_msg_intr(adapter))
1714                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1715                                         "pcie DMA r/w error (status = 0x%x)\n",
1716                                         status);
1717                         iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1718                         schedule_work(&adapter->pcie_dma_to_rst_task);
1719                         return IRQ_HANDLED;
1720                 }
1721
1722                 /* link event */
1723                 if (status & ISR_GPHY) {
1724                         adapter->soft_stats.tx_carrier_errors++;
1725                         atl1_check_for_link(adapter);
1726                 }
1727
1728                 /* transmit event */
1729                 if (status & ISR_CMB_TX)
1730                         atl1_intr_tx(adapter);
1731
1732                 /* rx exception */
1733                 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1734                         ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1735                         ISR_HOST_RRD_OV | ISR_CMB_RX))) {
1736                         if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1737                                 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1738                                 ISR_HOST_RRD_OV))
1739                                 if (netif_msg_intr(adapter))
1740                                         dev_printk(KERN_DEBUG,
1741                                                 &adapter->pdev->dev,
1742                                                 "rx exception, ISR = 0x%x\n",
1743                                                 status);
1744                         atl1_intr_rx(adapter);
1745                 }
1746
1747                 if (--max_ints < 0)
1748                         break;
1749
1750         } while ((status = adapter->cmb.cmb->int_stats));
1751
1752         /* re-enable Interrupt */
1753         iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
1754         return IRQ_HANDLED;
1755 }
1756
1757 /*
1758  * atl1_watchdog - Timer Call-back
1759  * @data: pointer to netdev cast into an unsigned long
1760  */
1761 static void atl1_watchdog(unsigned long data)
1762 {
1763         struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1764
1765         /* Reset the timer */
1766         mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1767 }
1768
1769 /*
1770  * atl1_phy_config - Timer Call-back
1771  * @data: pointer to netdev cast into an unsigned long
1772  */
1773 static void atl1_phy_config(unsigned long data)
1774 {
1775         struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1776         struct atl1_hw *hw = &adapter->hw;
1777         unsigned long flags;
1778
1779         spin_lock_irqsave(&adapter->lock, flags);
1780         adapter->phy_timer_pending = false;
1781         atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1782         atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg);
1783         atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
1784         spin_unlock_irqrestore(&adapter->lock, flags);
1785 }
1786
1787 /*
1788  * Orphaned vendor comment left intact here:
1789  * <vendor comment>
1790  * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
1791  * will assert. We do soft reset <0x1400=1> according
1792  * with the SPEC. BUT, it seemes that PCIE or DMA
1793  * state-machine will not be reset. DMAR_TO_INT will
1794  * assert again and again.
1795  * </vendor comment>
1796  */
1797 static void atl1_tx_timeout_task(struct work_struct *work)
1798 {
1799         struct atl1_adapter *adapter =
1800                 container_of(work, struct atl1_adapter, tx_timeout_task);
1801         struct net_device *netdev = adapter->netdev;
1802
1803         netif_device_detach(netdev);
1804         atl1_down(adapter);
1805         atl1_up(adapter);
1806         netif_device_attach(netdev);
1807 }
1808
1809 int atl1_reset(struct atl1_adapter *adapter)
1810 {
1811         int ret;
1812         ret = atl1_reset_hw(&adapter->hw);
1813         if (ret)
1814                 return ret;
1815         return atl1_init_hw(&adapter->hw);
1816 }
1817
1818 s32 atl1_up(struct atl1_adapter *adapter)
1819 {
1820         struct net_device *netdev = adapter->netdev;
1821         int err;
1822         int irq_flags = IRQF_SAMPLE_RANDOM;
1823
1824         /* hardware has been reset, we need to reload some things */
1825         atlx_set_multi(netdev);
1826         atl1_init_ring_ptrs(adapter);
1827         atlx_restore_vlan(adapter);
1828         err = atl1_alloc_rx_buffers(adapter);
1829         if (unlikely(!err))
1830                 /* no RX BUFFER allocated */
1831                 return -ENOMEM;
1832
1833         if (unlikely(atl1_configure(adapter))) {
1834                 err = -EIO;
1835                 goto err_up;
1836         }
1837
1838         err = pci_enable_msi(adapter->pdev);
1839         if (err) {
1840                 if (netif_msg_ifup(adapter))
1841                         dev_info(&adapter->pdev->dev,
1842                                 "Unable to enable MSI: %d\n", err);
1843                 irq_flags |= IRQF_SHARED;
1844         }
1845
1846         err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags,
1847                         netdev->name, netdev);
1848         if (unlikely(err))
1849                 goto err_up;
1850
1851         mod_timer(&adapter->watchdog_timer, jiffies);
1852         atlx_irq_enable(adapter);
1853         atl1_check_link(adapter);
1854         return 0;
1855
1856 err_up:
1857         pci_disable_msi(adapter->pdev);
1858         /* free rx_buffers */
1859         atl1_clean_rx_ring(adapter);
1860         return err;
1861 }
1862
1863 void atl1_down(struct atl1_adapter *adapter)
1864 {
1865         struct net_device *netdev = adapter->netdev;
1866
1867         del_timer_sync(&adapter->watchdog_timer);
1868         del_timer_sync(&adapter->phy_config_timer);
1869         adapter->phy_timer_pending = false;
1870
1871         atlx_irq_disable(adapter);
1872         free_irq(adapter->pdev->irq, netdev);
1873         pci_disable_msi(adapter->pdev);
1874         atl1_reset_hw(&adapter->hw);
1875         adapter->cmb.cmb->int_stats = 0;
1876
1877         adapter->link_speed = SPEED_0;
1878         adapter->link_duplex = -1;
1879         netif_carrier_off(netdev);
1880         netif_stop_queue(netdev);
1881
1882         atl1_clean_tx_ring(adapter);
1883         atl1_clean_rx_ring(adapter);
1884 }
1885
1886 /*
1887  * atl1_open - Called when a network interface is made active
1888  * @netdev: network interface device structure
1889  *
1890  * Returns 0 on success, negative value on failure
1891  *
1892  * The open entry point is called when a network interface is made
1893  * active by the system (IFF_UP).  At this point all resources needed
1894  * for transmit and receive operations are allocated, the interrupt
1895  * handler is registered with the OS, the watchdog timer is started,
1896  * and the stack is notified that the interface is ready.
1897  */
1898 static int atl1_open(struct net_device *netdev)
1899 {
1900         struct atl1_adapter *adapter = netdev_priv(netdev);
1901         int err;
1902
1903         /* allocate transmit descriptors */
1904         err = atl1_setup_ring_resources(adapter);
1905         if (err)
1906                 return err;
1907
1908         err = atl1_up(adapter);
1909         if (err)
1910                 goto err_up;
1911
1912         return 0;
1913
1914 err_up:
1915         atl1_reset(adapter);
1916         return err;
1917 }
1918
1919 /*
1920  * atl1_close - Disables a network interface
1921  * @netdev: network interface device structure
1922  *
1923  * Returns 0, this is not allowed to fail
1924  *
1925  * The close entry point is called when an interface is de-activated
1926  * by the OS.  The hardware is still under the drivers control, but
1927  * needs to be disabled.  A global MAC reset is issued to stop the
1928  * hardware, and all transmit and receive resources are freed.
1929  */
1930 static int atl1_close(struct net_device *netdev)
1931 {
1932         struct atl1_adapter *adapter = netdev_priv(netdev);
1933         atl1_down(adapter);
1934         atl1_free_ring_resources(adapter);
1935         return 0;
1936 }
1937
1938 #ifdef CONFIG_PM
1939 static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
1940 {
1941         struct net_device *netdev = pci_get_drvdata(pdev);
1942         struct atl1_adapter *adapter = netdev_priv(netdev);
1943         struct atl1_hw *hw = &adapter->hw;
1944         u32 ctrl = 0;
1945         u32 wufc = adapter->wol;
1946
1947         netif_device_detach(netdev);
1948         if (netif_running(netdev))
1949                 atl1_down(adapter);
1950
1951         atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1952         atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1953         if (ctrl & BMSR_LSTATUS)
1954                 wufc &= ~ATLX_WUFC_LNKC;
1955
1956         /* reduce speed to 10/100M */
1957         if (wufc) {
1958                 atl1_phy_enter_power_saving(hw);
1959                 /* if resume, let driver to re- setup link */
1960                 hw->phy_configured = false;
1961                 atl1_set_mac_addr(hw);
1962                 atlx_set_multi(netdev);
1963
1964                 ctrl = 0;
1965                 /* turn on magic packet wol */
1966                 if (wufc & ATLX_WUFC_MAG)
1967                         ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
1968
1969                 /* turn on Link change WOL */
1970                 if (wufc & ATLX_WUFC_LNKC)
1971                         ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
1972                 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
1973
1974                 /* turn on all-multi mode if wake on multicast is enabled */
1975                 ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
1976                 ctrl &= ~MAC_CTRL_DBG;
1977                 ctrl &= ~MAC_CTRL_PROMIS_EN;
1978                 if (wufc & ATLX_WUFC_MC)
1979                         ctrl |= MAC_CTRL_MC_ALL_EN;
1980                 else
1981                         ctrl &= ~MAC_CTRL_MC_ALL_EN;
1982
1983                 /* turn on broadcast mode if wake on-BC is enabled */
1984                 if (wufc & ATLX_WUFC_BC)
1985                         ctrl |= MAC_CTRL_BC_EN;
1986                 else
1987                         ctrl &= ~MAC_CTRL_BC_EN;
1988
1989                 /* enable RX */
1990                 ctrl |= MAC_CTRL_RX_EN;
1991                 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
1992                 pci_enable_wake(pdev, PCI_D3hot, 1);
1993                 pci_enable_wake(pdev, PCI_D3cold, 1);
1994         } else {
1995                 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
1996                 pci_enable_wake(pdev, PCI_D3hot, 0);
1997                 pci_enable_wake(pdev, PCI_D3cold, 0);
1998         }
1999
2000         pci_save_state(pdev);
2001         pci_disable_device(pdev);
2002
2003         pci_set_power_state(pdev, PCI_D3hot);
2004
2005         return 0;
2006 }
2007
2008 static int atl1_resume(struct pci_dev *pdev)
2009 {
2010         struct net_device *netdev = pci_get_drvdata(pdev);
2011         struct atl1_adapter *adapter = netdev_priv(netdev);
2012         u32 err;
2013
2014         pci_set_power_state(pdev, PCI_D0);
2015         pci_restore_state(pdev);
2016
2017         /* FIXME: check and handle */
2018         err = pci_enable_device(pdev);
2019         pci_enable_wake(pdev, PCI_D3hot, 0);
2020         pci_enable_wake(pdev, PCI_D3cold, 0);
2021
2022         iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
2023         atl1_reset(adapter);
2024
2025         if (netif_running(netdev))
2026                 atl1_up(adapter);
2027         netif_device_attach(netdev);
2028
2029         atl1_via_workaround(adapter);
2030
2031         return 0;
2032 }
2033 #else
2034 #define atl1_suspend NULL
2035 #define atl1_resume NULL
2036 #endif
2037
2038 #ifdef CONFIG_NET_POLL_CONTROLLER
2039 static void atl1_poll_controller(struct net_device *netdev)
2040 {
2041         disable_irq(netdev->irq);
2042         atl1_intr(netdev->irq, netdev);
2043         enable_irq(netdev->irq);
2044 }
2045 #endif
2046
2047 /*
2048  * atl1_probe - Device Initialization Routine
2049  * @pdev: PCI device information struct
2050  * @ent: entry in atl1_pci_tbl
2051  *
2052  * Returns 0 on success, negative on failure
2053  *
2054  * atl1_probe initializes an adapter identified by a pci_dev structure.
2055  * The OS initialization, configuring of the adapter private structure,
2056  * and a hardware reset occur.
2057  */
2058 static int __devinit atl1_probe(struct pci_dev *pdev,
2059         const struct pci_device_id *ent)
2060 {
2061         struct net_device *netdev;
2062         struct atl1_adapter *adapter;
2063         static int cards_found = 0;
2064         int err;
2065
2066         err = pci_enable_device(pdev);
2067         if (err)
2068                 return err;
2069
2070         /*
2071          * The atl1 chip can DMA to 64-bit addresses, but it uses a single
2072          * shared register for the high 32 bits, so only a single, aligned,
2073          * 4 GB physical address range can be used at a time.
2074          *
2075          * Supporting 64-bit DMA on this hardware is more trouble than it's
2076          * worth.  It is far easier to limit to 32-bit DMA than update
2077          * various kernel subsystems to support the mechanics required by a
2078          * fixed-high-32-bit system.
2079          */
2080         err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2081         if (err) {
2082                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2083                 goto err_dma;
2084         }
2085         /*
2086          * Mark all PCI regions associated with PCI device
2087          * pdev as being reserved by owner atl1_driver_name
2088          */
2089         err = pci_request_regions(pdev, ATLX_DRIVER_NAME);
2090         if (err)
2091                 goto err_request_regions;
2092
2093         /*
2094          * Enables bus-mastering on the device and calls
2095          * pcibios_set_master to do the needed arch specific settings
2096          */
2097         pci_set_master(pdev);
2098
2099         netdev = alloc_etherdev(sizeof(struct atl1_adapter));
2100         if (!netdev) {
2101                 err = -ENOMEM;
2102                 goto err_alloc_etherdev;
2103         }
2104         SET_NETDEV_DEV(netdev, &pdev->dev);
2105
2106         pci_set_drvdata(pdev, netdev);
2107         adapter = netdev_priv(netdev);
2108         adapter->netdev = netdev;
2109         adapter->pdev = pdev;
2110         adapter->hw.back = adapter;
2111         adapter->msg_enable = netif_msg_init(debug, atl1_default_msg);
2112
2113         adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
2114         if (!adapter->hw.hw_addr) {
2115                 err = -EIO;
2116                 goto err_pci_iomap;
2117         }
2118         /* get device revision number */
2119         adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
2120                 (REG_MASTER_CTRL + 2));
2121         if (netif_msg_probe(adapter))
2122                 dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
2123
2124         /* set default ring resource counts */
2125         adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
2126         adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
2127
2128         adapter->mii.dev = netdev;
2129         adapter->mii.mdio_read = mdio_read;
2130         adapter->mii.mdio_write = mdio_write;
2131         adapter->mii.phy_id_mask = 0x1f;
2132         adapter->mii.reg_num_mask = 0x1f;
2133
2134         netdev->open = &atl1_open;
2135         netdev->stop = &atl1_close;
2136         netdev->hard_start_xmit = &atl1_xmit_frame;
2137         netdev->get_stats = &atlx_get_stats;
2138         netdev->set_multicast_list = &atlx_set_multi;
2139         netdev->set_mac_address = &atl1_set_mac;
2140         netdev->change_mtu = &atl1_change_mtu;
2141         netdev->do_ioctl = &atlx_ioctl;
2142         netdev->tx_timeout = &atlx_tx_timeout;
2143         netdev->watchdog_timeo = 5 * HZ;
2144 #ifdef CONFIG_NET_POLL_CONTROLLER
2145         netdev->poll_controller = atl1_poll_controller;
2146 #endif
2147         netdev->vlan_rx_register = atlx_vlan_rx_register;
2148
2149         netdev->ethtool_ops = &atl1_ethtool_ops;
2150         adapter->bd_number = cards_found;
2151
2152         /* setup the private structure */
2153         err = atl1_sw_init(adapter);
2154         if (err)
2155                 goto err_common;
2156
2157         netdev->features = NETIF_F_HW_CSUM;
2158         netdev->features |= NETIF_F_SG;
2159         netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2160         netdev->features |= NETIF_F_TSO;
2161         netdev->features |= NETIF_F_LLTX;
2162
2163         /*
2164          * patch for some L1 of old version,
2165          * the final version of L1 may not need these
2166          * patches
2167          */
2168         /* atl1_pcie_patch(adapter); */
2169
2170         /* really reset GPHY core */
2171         iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2172
2173         /*
2174          * reset the controller to
2175          * put the device in a known good starting state
2176          */
2177         if (atl1_reset_hw(&adapter->hw)) {
2178                 err = -EIO;
2179                 goto err_common;
2180         }
2181
2182         /* copy the MAC address out of the EEPROM */
2183         atl1_read_mac_addr(&adapter->hw);
2184         memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2185
2186         if (!is_valid_ether_addr(netdev->dev_addr)) {
2187                 err = -EIO;
2188                 goto err_common;
2189         }
2190
2191         atl1_check_options(adapter);
2192
2193         /* pre-init the MAC, and setup link */
2194         err = atl1_init_hw(&adapter->hw);
2195         if (err) {
2196                 err = -EIO;
2197                 goto err_common;
2198         }
2199
2200         atl1_pcie_patch(adapter);
2201         /* assume we have no link for now */
2202         netif_carrier_off(netdev);
2203         netif_stop_queue(netdev);
2204
2205         init_timer(&adapter->watchdog_timer);
2206         adapter->watchdog_timer.function = &atl1_watchdog;
2207         adapter->watchdog_timer.data = (unsigned long)adapter;
2208
2209         init_timer(&adapter->phy_config_timer);
2210         adapter->phy_config_timer.function = &atl1_phy_config;
2211         adapter->phy_config_timer.data = (unsigned long)adapter;
2212         adapter->phy_timer_pending = false;
2213
2214         INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
2215
2216         INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
2217
2218         INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
2219
2220         err = register_netdev(netdev);
2221         if (err)
2222                 goto err_common;
2223
2224         cards_found++;
2225         atl1_via_workaround(adapter);
2226         return 0;
2227
2228 err_common:
2229         pci_iounmap(pdev, adapter->hw.hw_addr);
2230 err_pci_iomap:
2231         free_netdev(netdev);
2232 err_alloc_etherdev:
2233         pci_release_regions(pdev);
2234 err_dma:
2235 err_request_regions:
2236         pci_disable_device(pdev);
2237         return err;
2238 }
2239
2240 /*
2241  * atl1_remove - Device Removal Routine
2242  * @pdev: PCI device information struct
2243  *
2244  * atl1_remove is called by the PCI subsystem to alert the driver
2245  * that it should release a PCI device.  The could be caused by a
2246  * Hot-Plug event, or because the driver is going to be removed from
2247  * memory.
2248  */
2249 static void __devexit atl1_remove(struct pci_dev *pdev)
2250 {
2251         struct net_device *netdev = pci_get_drvdata(pdev);
2252         struct atl1_adapter *adapter;
2253         /* Device not available. Return. */
2254         if (!netdev)
2255                 return;
2256
2257         adapter = netdev_priv(netdev);
2258
2259         /*
2260          * Some atl1 boards lack persistent storage for their MAC, and get it
2261          * from the BIOS during POST.  If we've been messing with the MAC
2262          * address, we need to save the permanent one.
2263          */
2264         if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
2265                 memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
2266                         ETH_ALEN);
2267                 atl1_set_mac_addr(&adapter->hw);
2268         }
2269
2270         iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2271         unregister_netdev(netdev);
2272         pci_iounmap(pdev, adapter->hw.hw_addr);
2273         pci_release_regions(pdev);
2274         free_netdev(netdev);
2275         pci_disable_device(pdev);
2276 }
2277
2278 static struct pci_driver atl1_driver = {
2279         .name = ATLX_DRIVER_NAME,
2280         .id_table = atl1_pci_tbl,
2281         .probe = atl1_probe,
2282         .remove = __devexit_p(atl1_remove),
2283         .suspend = atl1_suspend,
2284         .resume = atl1_resume
2285 };
2286
2287 /*
2288  * atl1_exit_module - Driver Exit Cleanup Routine
2289  *
2290  * atl1_exit_module is called just before the driver is removed
2291  * from memory.
2292  */
2293 static void __exit atl1_exit_module(void)
2294 {
2295         pci_unregister_driver(&atl1_driver);
2296 }
2297
2298 /*
2299  * atl1_init_module - Driver Registration Routine
2300  *
2301  * atl1_init_module is the first routine called when the driver is
2302  * loaded. All it does is register with the PCI subsystem.
2303  */
2304 static int __init atl1_init_module(void)
2305 {
2306         return pci_register_driver(&atl1_driver);
2307 }
2308
2309 module_init(atl1_init_module);
2310 module_exit(atl1_exit_module);
2311
2312 struct atl1_stats {
2313         char stat_string[ETH_GSTRING_LEN];
2314         int sizeof_stat;
2315         int stat_offset;
2316 };
2317
2318 #define ATL1_STAT(m) \
2319         sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m)
2320
2321 static struct atl1_stats atl1_gstrings_stats[] = {
2322         {"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
2323         {"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
2324         {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
2325         {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
2326         {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
2327         {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
2328         {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
2329         {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
2330         {"multicast", ATL1_STAT(soft_stats.multicast)},
2331         {"collisions", ATL1_STAT(soft_stats.collisions)},
2332         {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
2333         {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2334         {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
2335         {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
2336         {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
2337         {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2338         {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
2339         {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
2340         {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
2341         {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
2342         {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
2343         {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
2344         {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
2345         {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
2346         {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
2347         {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
2348         {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
2349         {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
2350         {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
2351         {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
2352         {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
2353 };
2354
2355 static void atl1_get_ethtool_stats(struct net_device *netdev,
2356         struct ethtool_stats *stats, u64 *data)
2357 {
2358         struct atl1_adapter *adapter = netdev_priv(netdev);
2359         int i;
2360         char *p;
2361
2362         for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2363                 p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
2364                 data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
2365                         sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2366         }
2367
2368 }
2369
2370 static int atl1_get_sset_count(struct net_device *netdev, int sset)
2371 {
2372         switch (sset) {
2373         case ETH_SS_STATS:
2374                 return ARRAY_SIZE(atl1_gstrings_stats);
2375         default:
2376                 return -EOPNOTSUPP;
2377         }
2378 }
2379
2380 static int atl1_get_settings(struct net_device *netdev,
2381         struct ethtool_cmd *ecmd)
2382 {
2383         struct atl1_adapter *adapter = netdev_priv(netdev);
2384         struct atl1_hw *hw = &adapter->hw;
2385
2386         ecmd->supported = (SUPPORTED_10baseT_Half |
2387                            SUPPORTED_10baseT_Full |
2388                            SUPPORTED_100baseT_Half |
2389                            SUPPORTED_100baseT_Full |
2390                            SUPPORTED_1000baseT_Full |
2391                            SUPPORTED_Autoneg | SUPPORTED_TP);
2392         ecmd->advertising = ADVERTISED_TP;
2393         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2394             hw->media_type == MEDIA_TYPE_1000M_FULL) {
2395                 ecmd->advertising |= ADVERTISED_Autoneg;
2396                 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
2397                         ecmd->advertising |= ADVERTISED_Autoneg;
2398                         ecmd->advertising |=
2399                             (ADVERTISED_10baseT_Half |
2400                              ADVERTISED_10baseT_Full |
2401                              ADVERTISED_100baseT_Half |
2402                              ADVERTISED_100baseT_Full |
2403                              ADVERTISED_1000baseT_Full);
2404                 } else
2405                         ecmd->advertising |= (ADVERTISED_1000baseT_Full);
2406         }
2407         ecmd->port = PORT_TP;
2408         ecmd->phy_address = 0;
2409         ecmd->transceiver = XCVR_INTERNAL;
2410
2411         if (netif_carrier_ok(adapter->netdev)) {
2412                 u16 link_speed, link_duplex;
2413                 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
2414                 ecmd->speed = link_speed;
2415                 if (link_duplex == FULL_DUPLEX)
2416                         ecmd->duplex = DUPLEX_FULL;
2417                 else
2418                         ecmd->duplex = DUPLEX_HALF;
2419         } else {
2420                 ecmd->speed = -1;
2421                 ecmd->duplex = -1;
2422         }
2423         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2424             hw->media_type == MEDIA_TYPE_1000M_FULL)
2425                 ecmd->autoneg = AUTONEG_ENABLE;
2426         else
2427                 ecmd->autoneg = AUTONEG_DISABLE;
2428
2429         return 0;
2430 }
2431
2432 static int atl1_set_settings(struct net_device *netdev,
2433         struct ethtool_cmd *ecmd)
2434 {
2435         struct atl1_adapter *adapter = netdev_priv(netdev);
2436         struct atl1_hw *hw = &adapter->hw;
2437         u16 phy_data;
2438         int ret_val = 0;
2439         u16 old_media_type = hw->media_type;
2440
2441         if (netif_running(adapter->netdev)) {
2442                 if (netif_msg_link(adapter))
2443                         dev_dbg(&adapter->pdev->dev,
2444                                 "ethtool shutting down adapter\n");
2445                 atl1_down(adapter);
2446         }
2447
2448         if (ecmd->autoneg == AUTONEG_ENABLE)
2449                 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
2450         else {
2451                 if (ecmd->speed == SPEED_1000) {
2452                         if (ecmd->duplex != DUPLEX_FULL) {
2453                                 if (netif_msg_link(adapter))
2454                                         dev_warn(&adapter->pdev->dev,
2455                                                 "1000M half is invalid\n");
2456                                 ret_val = -EINVAL;
2457                                 goto exit_sset;
2458                         }
2459                         hw->media_type = MEDIA_TYPE_1000M_FULL;
2460                 } else if (ecmd->speed == SPEED_100) {
2461                         if (ecmd->duplex == DUPLEX_FULL)
2462                                 hw->media_type = MEDIA_TYPE_100M_FULL;
2463                         else
2464                                 hw->media_type = MEDIA_TYPE_100M_HALF;
2465                 } else {
2466                         if (ecmd->duplex == DUPLEX_FULL)
2467                                 hw->media_type = MEDIA_TYPE_10M_FULL;
2468                         else
2469                                 hw->media_type = MEDIA_TYPE_10M_HALF;
2470                 }
2471         }
2472         switch (hw->media_type) {
2473         case MEDIA_TYPE_AUTO_SENSOR:
2474                 ecmd->advertising =
2475                     ADVERTISED_10baseT_Half |
2476                     ADVERTISED_10baseT_Full |
2477                     ADVERTISED_100baseT_Half |
2478                     ADVERTISED_100baseT_Full |
2479                     ADVERTISED_1000baseT_Full |
2480                     ADVERTISED_Autoneg | ADVERTISED_TP;
2481                 break;
2482         case MEDIA_TYPE_1000M_FULL:
2483                 ecmd->advertising =
2484                     ADVERTISED_1000baseT_Full |
2485                     ADVERTISED_Autoneg | ADVERTISED_TP;
2486                 break;
2487         default:
2488                 ecmd->advertising = 0;
2489                 break;
2490         }
2491         if (atl1_phy_setup_autoneg_adv(hw)) {
2492                 ret_val = -EINVAL;
2493                 if (netif_msg_link(adapter))
2494                         dev_warn(&adapter->pdev->dev,
2495                                 "invalid ethtool speed/duplex setting\n");
2496                 goto exit_sset;
2497         }
2498         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2499             hw->media_type == MEDIA_TYPE_1000M_FULL)
2500                 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2501         else {
2502                 switch (hw->media_type) {
2503                 case MEDIA_TYPE_100M_FULL:
2504                         phy_data =
2505                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
2506                             MII_CR_RESET;
2507                         break;
2508                 case MEDIA_TYPE_100M_HALF:
2509                         phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2510                         break;
2511                 case MEDIA_TYPE_10M_FULL:
2512                         phy_data =
2513                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
2514                         break;
2515                 default:
2516                         /* MEDIA_TYPE_10M_HALF: */
2517                         phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2518                         break;
2519                 }
2520         }
2521         atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2522 exit_sset:
2523         if (ret_val)
2524                 hw->media_type = old_media_type;
2525
2526         if (netif_running(adapter->netdev)) {
2527                 if (netif_msg_link(adapter))
2528                         dev_dbg(&adapter->pdev->dev,
2529                                 "ethtool starting adapter\n");
2530                 atl1_up(adapter);
2531         } else if (!ret_val) {
2532                 if (netif_msg_link(adapter))
2533                         dev_dbg(&adapter->pdev->dev,
2534                                 "ethtool resetting adapter\n");
2535                 atl1_reset(adapter);
2536         }
2537         return ret_val;
2538 }
2539
2540 static void atl1_get_drvinfo(struct net_device *netdev,
2541         struct ethtool_drvinfo *drvinfo)
2542 {
2543         struct atl1_adapter *adapter = netdev_priv(netdev);
2544
2545         strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
2546         strncpy(drvinfo->version, ATLX_DRIVER_VERSION,
2547                 sizeof(drvinfo->version));
2548         strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2549         strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
2550                 sizeof(drvinfo->bus_info));
2551         drvinfo->eedump_len = ATL1_EEDUMP_LEN;
2552 }
2553
2554 static void atl1_get_wol(struct net_device *netdev,
2555         struct ethtool_wolinfo *wol)
2556 {
2557         struct atl1_adapter *adapter = netdev_priv(netdev);
2558
2559         wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
2560         wol->wolopts = 0;
2561         if (adapter->wol & ATLX_WUFC_EX)
2562                 wol->wolopts |= WAKE_UCAST;
2563         if (adapter->wol & ATLX_WUFC_MC)
2564                 wol->wolopts |= WAKE_MCAST;
2565         if (adapter->wol & ATLX_WUFC_BC)
2566                 wol->wolopts |= WAKE_BCAST;
2567         if (adapter->wol & ATLX_WUFC_MAG)
2568                 wol->wolopts |= WAKE_MAGIC;
2569         return;
2570 }
2571
2572 static int atl1_set_wol(struct net_device *netdev,
2573         struct ethtool_wolinfo *wol)
2574 {
2575         struct atl1_adapter *adapter = netdev_priv(netdev);
2576
2577         if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2578                 return -EOPNOTSUPP;
2579         adapter->wol = 0;
2580         if (wol->wolopts & WAKE_UCAST)
2581                 adapter->wol |= ATLX_WUFC_EX;
2582         if (wol->wolopts & WAKE_MCAST)
2583                 adapter->wol |= ATLX_WUFC_MC;
2584         if (wol->wolopts & WAKE_BCAST)
2585                 adapter->wol |= ATLX_WUFC_BC;
2586         if (wol->wolopts & WAKE_MAGIC)
2587                 adapter->wol |= ATLX_WUFC_MAG;
2588         return 0;
2589 }
2590
2591 static u32 atl1_get_msglevel(struct net_device *netdev)
2592 {
2593         struct atl1_adapter *adapter = netdev_priv(netdev);
2594         return adapter->msg_enable;
2595 }
2596
2597 static void atl1_set_msglevel(struct net_device *netdev, u32 value)
2598 {
2599         struct atl1_adapter *adapter = netdev_priv(netdev);
2600         adapter->msg_enable = value;
2601 }
2602
2603 static int atl1_get_regs_len(struct net_device *netdev)
2604 {
2605         return ATL1_REG_COUNT * sizeof(u32);
2606 }
2607
2608 static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2609         void *p)
2610 {
2611         struct atl1_adapter *adapter = netdev_priv(netdev);
2612         struct atl1_hw *hw = &adapter->hw;
2613         unsigned int i;
2614         u32 *regbuf = p;
2615
2616         for (i = 0; i < ATL1_REG_COUNT; i++) {
2617                 /*
2618                  * This switch statement avoids reserved regions
2619                  * of register space.
2620                  */
2621                 switch (i) {
2622                 case 6 ... 9:
2623                 case 14:
2624                 case 29 ... 31:
2625                 case 34 ... 63:
2626                 case 75 ... 127:
2627                 case 136 ... 1023:
2628                 case 1027 ... 1087:
2629                 case 1091 ... 1151:
2630                 case 1194 ... 1195:
2631                 case 1200 ... 1201:
2632                 case 1206 ... 1213:
2633                 case 1216 ... 1279:
2634                 case 1290 ... 1311:
2635                 case 1323 ... 1343:
2636                 case 1358 ... 1359:
2637                 case 1368 ... 1375:
2638                 case 1378 ... 1383:
2639                 case 1388 ... 1391:
2640                 case 1393 ... 1395:
2641                 case 1402 ... 1403:
2642                 case 1410 ... 1471:
2643                 case 1522 ... 1535:
2644                         /* reserved region; don't read it */
2645                         regbuf[i] = 0;
2646                         break;
2647                 default:
2648                         /* unreserved region */
2649                         regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32)));
2650                 }
2651         }
2652 }
2653
2654 static void atl1_get_ringparam(struct net_device *netdev,
2655         struct ethtool_ringparam *ring)
2656 {
2657         struct atl1_adapter *adapter = netdev_priv(netdev);
2658         struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
2659         struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
2660
2661         ring->rx_max_pending = ATL1_MAX_RFD;
2662         ring->tx_max_pending = ATL1_MAX_TPD;
2663         ring->rx_mini_max_pending = 0;
2664         ring->rx_jumbo_max_pending = 0;
2665         ring->rx_pending = rxdr->count;
2666         ring->tx_pending = txdr->count;
2667         ring->rx_mini_pending = 0;
2668         ring->rx_jumbo_pending = 0;
2669 }
2670
2671 static int atl1_set_ringparam(struct net_device *netdev,
2672         struct ethtool_ringparam *ring)
2673 {
2674         struct atl1_adapter *adapter = netdev_priv(netdev);
2675         struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
2676         struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
2677         struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
2678
2679         struct atl1_tpd_ring tpd_old, tpd_new;
2680         struct atl1_rfd_ring rfd_old, rfd_new;
2681         struct atl1_rrd_ring rrd_old, rrd_new;
2682         struct atl1_ring_header rhdr_old, rhdr_new;
2683         int err;
2684
2685         tpd_old = adapter->tpd_ring;
2686         rfd_old = adapter->rfd_ring;
2687         rrd_old = adapter->rrd_ring;
2688         rhdr_old = adapter->ring_header;
2689
2690         if (netif_running(adapter->netdev))
2691                 atl1_down(adapter);
2692
2693         rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
2694         rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
2695                         rfdr->count;
2696         rfdr->count = (rfdr->count + 3) & ~3;
2697         rrdr->count = rfdr->count;
2698
2699         tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
2700         tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
2701                         tpdr->count;
2702         tpdr->count = (tpdr->count + 3) & ~3;
2703
2704         if (netif_running(adapter->netdev)) {
2705                 /* try to get new resources before deleting old */
2706                 err = atl1_setup_ring_resources(adapter);
2707                 if (err)
2708                         goto err_setup_ring;
2709
2710                 /*
2711                  * save the new, restore the old in order to free it,
2712                  * then restore the new back again
2713                  */
2714
2715                 rfd_new = adapter->rfd_ring;
2716                 rrd_new = adapter->rrd_ring;
2717                 tpd_new = adapter->tpd_ring;
2718                 rhdr_new = adapter->ring_header;
2719                 adapter->rfd_ring = rfd_old;
2720                 adapter->rrd_ring = rrd_old;
2721                 adapter->tpd_ring = tpd_old;
2722                 adapter->ring_header = rhdr_old;
2723                 atl1_free_ring_resources(adapter);
2724                 adapter->rfd_ring = rfd_new;
2725                 adapter->rrd_ring = rrd_new;
2726                 adapter->tpd_ring = tpd_new;
2727                 adapter->ring_header = rhdr_new;
2728
2729                 err = atl1_up(adapter);
2730                 if (err)
2731                         return err;
2732         }
2733         return 0;
2734
2735 err_setup_ring:
2736         adapter->rfd_ring = rfd_old;
2737         adapter->rrd_ring = rrd_old;
2738         adapter->tpd_ring = tpd_old;
2739         adapter->ring_header = rhdr_old;
2740         atl1_up(adapter);
2741         return err;
2742 }
2743
2744 static void atl1_get_pauseparam(struct net_device *netdev,
2745         struct ethtool_pauseparam *epause)
2746 {
2747         struct atl1_adapter *adapter = netdev_priv(netdev);
2748         struct atl1_hw *hw = &adapter->hw;
2749
2750         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2751             hw->media_type == MEDIA_TYPE_1000M_FULL) {
2752                 epause->autoneg = AUTONEG_ENABLE;
2753         } else {
2754                 epause->autoneg = AUTONEG_DISABLE;
2755         }
2756         epause->rx_pause = 1;
2757         epause->tx_pause = 1;
2758 }
2759
2760 static int atl1_set_pauseparam(struct net_device *netdev,
2761         struct ethtool_pauseparam *epause)
2762 {
2763         struct atl1_adapter *adapter = netdev_priv(netdev);
2764         struct atl1_hw *hw = &adapter->hw;
2765
2766         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2767             hw->media_type == MEDIA_TYPE_1000M_FULL) {
2768                 epause->autoneg = AUTONEG_ENABLE;
2769         } else {
2770                 epause->autoneg = AUTONEG_DISABLE;
2771         }
2772
2773         epause->rx_pause = 1;
2774         epause->tx_pause = 1;
2775
2776         return 0;
2777 }
2778
2779 /* FIXME: is this right? -- CHS */
2780 static u32 atl1_get_rx_csum(struct net_device *netdev)
2781 {
2782         return 1;
2783 }
2784
2785 static void atl1_get_strings(struct net_device *netdev, u32 stringset,
2786         u8 *data)
2787 {
2788         u8 *p = data;
2789         int i;
2790
2791         switch (stringset) {
2792         case ETH_SS_STATS:
2793                 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2794                         memcpy(p, atl1_gstrings_stats[i].stat_string,
2795                                 ETH_GSTRING_LEN);
2796                         p += ETH_GSTRING_LEN;
2797                 }
2798                 break;
2799         }
2800 }
2801
2802 static int atl1_nway_reset(struct net_device *netdev)
2803 {
2804         struct atl1_adapter *adapter = netdev_priv(netdev);
2805         struct atl1_hw *hw = &adapter->hw;
2806
2807         if (netif_running(netdev)) {
2808                 u16 phy_data;
2809                 atl1_down(adapter);
2810
2811                 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2812                         hw->media_type == MEDIA_TYPE_1000M_FULL) {
2813                         phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2814                 } else {
2815                         switch (hw->media_type) {
2816                         case MEDIA_TYPE_100M_FULL:
2817                                 phy_data = MII_CR_FULL_DUPLEX |
2818                                         MII_CR_SPEED_100 | MII_CR_RESET;
2819                                 break;
2820                         case MEDIA_TYPE_100M_HALF:
2821                                 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2822                                 break;
2823                         case MEDIA_TYPE_10M_FULL:
2824                                 phy_data = MII_CR_FULL_DUPLEX |
2825                                         MII_CR_SPEED_10 | MII_CR_RESET;
2826                                 break;
2827                         default:
2828                                 /* MEDIA_TYPE_10M_HALF */
2829                                 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2830                         }
2831                 }
2832                 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2833                 atl1_up(adapter);
2834         }
2835         return 0;
2836 }
2837
2838 const struct ethtool_ops atl1_ethtool_ops = {
2839         .get_settings           = atl1_get_settings,
2840         .set_settings           = atl1_set_settings,
2841         .get_drvinfo            = atl1_get_drvinfo,
2842         .get_wol                = atl1_get_wol,
2843         .set_wol                = atl1_set_wol,
2844         .get_msglevel           = atl1_get_msglevel,
2845         .set_msglevel           = atl1_set_msglevel,
2846         .get_regs_len           = atl1_get_regs_len,
2847         .get_regs               = atl1_get_regs,
2848         .get_ringparam          = atl1_get_ringparam,
2849         .set_ringparam          = atl1_set_ringparam,
2850         .get_pauseparam         = atl1_get_pauseparam,
2851         .set_pauseparam         = atl1_set_pauseparam,
2852         .get_rx_csum            = atl1_get_rx_csum,
2853         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
2854         .get_link               = ethtool_op_get_link,
2855         .set_sg                 = ethtool_op_set_sg,
2856         .get_strings            = atl1_get_strings,
2857         .nway_reset             = atl1_nway_reset,
2858         .get_ethtool_stats      = atl1_get_ethtool_stats,
2859         .get_sset_count         = atl1_get_sset_count,
2860         .set_tso                = ethtool_op_set_tso,
2861 };
2862
2863 /*
2864  * Reset the transmit and receive units; mask and clear all interrupts.
2865  * hw - Struct containing variables accessed by shared code
2866  * return : 0  or  idle status (if error)
2867  */
2868 s32 atl1_reset_hw(struct atl1_hw *hw)
2869 {
2870         struct pci_dev *pdev = hw->back->pdev;
2871         struct atl1_adapter *adapter = hw->back;
2872         u32 icr;
2873         int i;
2874
2875         /*
2876          * Clear Interrupt mask to stop board from generating
2877          * interrupts & Clear any pending interrupt events
2878          */
2879         /*
2880          * iowrite32(0, hw->hw_addr + REG_IMR);
2881          * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
2882          */
2883
2884         /*
2885          * Issue Soft Reset to the MAC.  This will reset the chip's
2886          * transmit, receive, DMA.  It will not effect
2887          * the current PCI configuration.  The global reset bit is self-
2888          * clearing, and should clear within a microsecond.
2889          */
2890         iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
2891         ioread32(hw->hw_addr + REG_MASTER_CTRL);
2892
2893         iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
2894         ioread16(hw->hw_addr + REG_PHY_ENABLE);
2895
2896         /* delay about 1ms */
2897         msleep(1);
2898
2899         /* Wait at least 10ms for All module to be Idle */
2900         for (i = 0; i < 10; i++) {
2901                 icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
2902                 if (!icr)
2903                         break;
2904                 /* delay 1 ms */
2905                 msleep(1);
2906                 /* FIXME: still the right way to do this? */
2907                 cpu_relax();
2908         }
2909
2910         if (icr) {
2911                 if (netif_msg_hw(adapter))
2912                         dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
2913                 return icr;
2914         }
2915
2916         return 0;
2917 }
2918
2919 /* function about EEPROM
2920  *
2921  * check_eeprom_exist
2922  * return 0 if eeprom exist
2923  */
2924 static int atl1_check_eeprom_exist(struct atl1_hw *hw)
2925 {
2926         u32 value;
2927         value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2928         if (value & SPI_FLASH_CTRL_EN_VPD) {
2929                 value &= ~SPI_FLASH_CTRL_EN_VPD;
2930                 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2931         }
2932
2933         value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
2934         return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
2935 }
2936
2937 static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
2938 {
2939         int i;
2940         u32 control;
2941
2942         if (offset & 3)
2943                 /* address do not align */
2944                 return false;
2945
2946         iowrite32(0, hw->hw_addr + REG_VPD_DATA);
2947         control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
2948         iowrite32(control, hw->hw_addr + REG_VPD_CAP);
2949         ioread32(hw->hw_addr + REG_VPD_CAP);
2950
2951         for (i = 0; i < 10; i++) {
2952                 msleep(2);
2953                 control = ioread32(hw->hw_addr + REG_VPD_CAP);
2954                 if (control & VPD_CAP_VPD_FLAG)
2955                         break;
2956         }
2957         if (control & VPD_CAP_VPD_FLAG) {
2958                 *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
2959                 return true;
2960         }
2961         /* timeout */
2962         return false;
2963 }
2964
2965 /*
2966  * Reads the value from a PHY register
2967  * hw - Struct containing variables accessed by shared code
2968  * reg_addr - address of the PHY register to read
2969  */
2970 s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
2971 {
2972         u32 val;
2973         int i;
2974
2975         val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
2976                 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
2977                 MDIO_CLK_SEL_SHIFT;
2978         iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
2979         ioread32(hw->hw_addr + REG_MDIO_CTRL);
2980
2981         for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2982                 udelay(2);
2983                 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
2984                 if (!(val & (MDIO_START | MDIO_BUSY)))
2985                         break;
2986         }
2987         if (!(val & (MDIO_START | MDIO_BUSY))) {
2988                 *phy_data = (u16) val;
2989                 return 0;
2990         }
2991         return ATLX_ERR_PHY;
2992 }
2993
2994 #define CUSTOM_SPI_CS_SETUP     2
2995 #define CUSTOM_SPI_CLK_HI       2
2996 #define CUSTOM_SPI_CLK_LO       2
2997 #define CUSTOM_SPI_CS_HOLD      2
2998 #define CUSTOM_SPI_CS_HI        3
2999
3000 static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
3001 {
3002         int i;
3003         u32 value;
3004
3005         iowrite32(0, hw->hw_addr + REG_SPI_DATA);
3006         iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
3007
3008         value = SPI_FLASH_CTRL_WAIT_READY |
3009             (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
3010             SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
3011                                              SPI_FLASH_CTRL_CLK_HI_MASK) <<
3012             SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
3013                                            SPI_FLASH_CTRL_CLK_LO_MASK) <<
3014             SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
3015                                            SPI_FLASH_CTRL_CS_HOLD_MASK) <<
3016             SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
3017                                             SPI_FLASH_CTRL_CS_HI_MASK) <<
3018             SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
3019             SPI_FLASH_CTRL_INS_SHIFT;
3020
3021         iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
3022
3023         value |= SPI_FLASH_CTRL_START;
3024         iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
3025         ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
3026
3027         for (i = 0; i < 10; i++) {
3028                 msleep(1);
3029                 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
3030                 if (!(value & SPI_FLASH_CTRL_START))
3031                         break;
3032         }
3033
3034         if (value & SPI_FLASH_CTRL_START)
3035                 return false;
3036
3037         *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
3038
3039         return true;
3040 }
3041
3042 /*
3043  * get_permanent_address
3044  * return 0 if get valid mac address,
3045  */
3046 static int atl1_get_permanent_address(struct atl1_hw *hw)
3047 {
3048         u32 addr[2];
3049         u32 i, control;
3050         u16 reg;
3051         u8 eth_addr[ETH_ALEN];
3052         bool key_valid;
3053
3054         if (is_valid_ether_addr(hw->perm_mac_addr))
3055                 return 0;
3056
3057         /* init */
3058         addr[0] = addr[1] = 0;
3059
3060         if (!atl1_check_eeprom_exist(hw)) {
3061                 reg = 0;
3062                 key_valid = false;
3063                 /* Read out all EEPROM content */
3064                 i = 0;
3065                 while (1) {
3066                         if (atl1_read_eeprom(hw, i + 0x100, &control)) {
3067                                 if (key_valid) {
3068                                         if (reg == REG_MAC_STA_ADDR)
3069                                                 addr[0] = control;
3070                                         else if (reg == (REG_MAC_STA_ADDR + 4))
3071                                                 addr[1] = control;
3072                                         key_valid = false;
3073                                 } else if ((control & 0xff) == 0x5A) {
3074                                         key_valid = true;
3075                                         reg = (u16) (control >> 16);
3076                                 } else
3077                                         break;
3078                         } else
3079                                 /* read error */
3080                                 break;
3081                         i += 4;
3082                 }
3083
3084                 *(u32 *) &eth_addr[2] = swab32(addr[0]);
3085                 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
3086                 if (is_valid_ether_addr(eth_addr)) {
3087                         memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3088                         return 0;
3089                 }
3090                 return 1;
3091         }
3092
3093         /* see if SPI FLAGS exist ? */
3094         addr[0] = addr[1] = 0;
3095         reg = 0;
3096         key_valid = false;
3097         i = 0;
3098         while (1) {
3099                 if (atl1_spi_read(hw, i + 0x1f000, &control)) {
3100                         if (key_valid) {
3101                                 if (reg == REG_MAC_STA_ADDR)
3102                                         addr[0] = control;
3103                                 else if (reg == (REG_MAC_STA_ADDR + 4))
3104                                         addr[1] = control;
3105                                 key_valid = false;
3106                         } else if ((control & 0xff) == 0x5A) {
3107                                 key_valid = true;
3108                                 reg = (u16) (control >> 16);
3109                         } else
3110                                 /* data end */
3111                                 break;
3112                 } else
3113                         /* read error */
3114                         break;
3115                 i += 4;
3116         }
3117
3118         *(u32 *) &eth_addr[2] = swab32(addr[0]);
3119         *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
3120         if (is_valid_ether_addr(eth_addr)) {
3121                 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3122                 return 0;
3123         }
3124
3125         /*
3126          * On some motherboards, the MAC address is written by the
3127          * BIOS directly to the MAC register during POST, and is
3128          * not stored in eeprom.  If all else thus far has failed
3129          * to fetch the permanent MAC address, try reading it directly.
3130          */
3131         addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
3132         addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
3133         *(u32 *) &eth_addr[2] = swab32(addr[0]);
3134         *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
3135         if (is_valid_ether_addr(eth_addr)) {
3136                 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3137                 return 0;
3138         }
3139
3140         return 1;
3141 }
3142
3143 /*
3144  * Reads the adapter's MAC address from the EEPROM
3145  * hw - Struct containing variables accessed by shared code
3146  */
3147 s32 atl1_read_mac_addr(struct atl1_hw *hw)
3148 {
3149         u16 i;
3150
3151         if (atl1_get_permanent_address(hw))
3152                 random_ether_addr(hw->perm_mac_addr);
3153
3154         for (i = 0; i < ETH_ALEN; i++)
3155                 hw->mac_addr[i] = hw->perm_mac_addr[i];
3156         return 0;
3157 }
3158
3159 /*
3160  * Hashes an address to determine its location in the multicast table
3161  * hw - Struct containing variables accessed by shared code
3162  * mc_addr - the multicast address to hash
3163  *
3164  * atl1_hash_mc_addr
3165  *  purpose
3166  *      set hash value for a multicast address
3167  *      hash calcu processing :
3168  *          1. calcu 32bit CRC for multicast address
3169  *          2. reverse crc with MSB to LSB
3170  */
3171 u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
3172 {
3173         u32 crc32, value = 0;
3174         int i;
3175
3176         crc32 = ether_crc_le(6, mc_addr);
3177         for (i = 0; i < 32; i++)
3178                 value |= (((crc32 >> i) & 1) << (31 - i));
3179
3180         return value;
3181 }
3182
3183 /*
3184  * Sets the bit in the multicast table corresponding to the hash value.
3185  * hw - Struct containing variables accessed by shared code
3186  * hash_value - Multicast address hash value
3187  */
3188 void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
3189 {
3190         u32 hash_bit, hash_reg;
3191         u32 mta;
3192
3193         /*
3194          * The HASH Table  is a register array of 2 32-bit registers.
3195          * It is treated like an array of 64 bits.  We want to set
3196          * bit BitArray[hash_value]. So we figure out what register
3197          * the bit is in, read it, OR in the new bit, then write
3198          * back the new value.  The register is determined by the
3199          * upper 7 bits of the hash value and the bit within that
3200          * register are determined by the lower 5 bits of the value.
3201          */
3202         hash_reg = (hash_value >> 31) & 0x1;
3203         hash_bit = (hash_value >> 26) & 0x1F;
3204         mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3205         mta |= (1 << hash_bit);
3206         iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3207 }
3208
3209 /*
3210  * Writes a value to a PHY register
3211  * hw - Struct containing variables accessed by shared code
3212  * reg_addr - address of the PHY register to write
3213  * data - data to write to the PHY
3214  */
3215 s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
3216 {
3217         int i;
3218         u32 val;
3219
3220         val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
3221             (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
3222             MDIO_SUP_PREAMBLE |
3223             MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
3224         iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
3225         ioread32(hw->hw_addr + REG_MDIO_CTRL);
3226
3227         for (i = 0; i < MDIO_WAIT_TIMES; i++) {
3228                 udelay(2);
3229                 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3230                 if (!(val & (MDIO_START | MDIO_BUSY)))
3231                         break;
3232         }
3233
3234         if (!(val & (MDIO_START | MDIO_BUSY)))
3235                 return 0;
3236
3237         return ATLX_ERR_PHY;
3238 }
3239
3240 /*
3241  * Make L001's PHY out of Power Saving State (bug)
3242  * hw - Struct containing variables accessed by shared code
3243  * when power on, L001's PHY always on Power saving State
3244  * (Gigabit Link forbidden)
3245  */
3246 static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
3247 {
3248         s32 ret;
3249         ret = atl1_write_phy_reg(hw, 29, 0x0029);
3250         if (ret)
3251                 return ret;
3252         return atl1_write_phy_reg(hw, 30, 0);
3253 }
3254
3255 /*
3256  *TODO: do something or get rid of this
3257  */
3258 s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
3259 {
3260 /*    s32 ret_val;
3261  *    u16 phy_data;
3262  */
3263
3264 /*
3265     ret_val = atl1_write_phy_reg(hw, ...);
3266     ret_val = atl1_write_phy_reg(hw, ...);
3267     ....
3268 */
3269         return 0;
3270 }
3271
3272 /*
3273  * Resets the PHY and make all config validate
3274  * hw - Struct containing variables accessed by shared code
3275  *
3276  * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
3277  */
3278 static s32 atl1_phy_reset(struct atl1_hw *hw)
3279 {
3280         struct pci_dev *pdev = hw->back->pdev;
3281         struct atl1_adapter *adapter = hw->back;
3282         s32 ret_val;
3283         u16 phy_data;
3284
3285         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3286             hw->media_type == MEDIA_TYPE_1000M_FULL)
3287                 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
3288         else {
3289                 switch (hw->media_type) {
3290                 case MEDIA_TYPE_100M_FULL:
3291                         phy_data =
3292                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
3293                             MII_CR_RESET;
3294                         break;
3295                 case MEDIA_TYPE_100M_HALF:
3296                         phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
3297                         break;
3298                 case MEDIA_TYPE_10M_FULL:
3299                         phy_data =
3300                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
3301                         break;
3302                 default:
3303                         /* MEDIA_TYPE_10M_HALF: */
3304                         phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
3305                         break;
3306                 }
3307         }
3308
3309         ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
3310         if (ret_val) {
3311                 u32 val;
3312                 int i;
3313                 /* pcie serdes link may be down! */
3314                 if (netif_msg_hw(adapter))
3315                         dev_dbg(&pdev->dev, "pcie phy link down\n");
3316
3317                 for (i = 0; i < 25; i++) {
3318                         msleep(1);
3319                         val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3320                         if (!(val & (MDIO_START | MDIO_BUSY)))
3321                                 break;
3322                 }
3323
3324                 if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
3325                         if (netif_msg_hw(adapter))
3326                                 dev_warn(&pdev->dev,
3327                                         "pcie link down at least 25ms\n");
3328                         return ret_val;
3329                 }
3330         }
3331         return 0;
3332 }
3333
3334 /*
3335  * Configures PHY autoneg and flow control advertisement settings
3336  * hw - Struct containing variables accessed by shared code
3337  */
3338 s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
3339 {
3340         s32 ret_val;
3341         s16 mii_autoneg_adv_reg;
3342         s16 mii_1000t_ctrl_reg;
3343
3344         /* Read the MII Auto-Neg Advertisement Register (Address 4). */
3345         mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
3346
3347         /* Read the MII 1000Base-T Control Register (Address 9). */
3348         mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
3349
3350         /*
3351          * First we clear all the 10/100 mb speed bits in the Auto-Neg
3352          * Advertisement Register (Address 4) and the 1000 mb speed bits in
3353          * the  1000Base-T Control Register (Address 9).
3354          */
3355         mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
3356         mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
3357
3358         /*
3359          * Need to parse media_type  and set up
3360          * the appropriate PHY registers.
3361          */
3362         switch (hw->media_type) {
3363         case MEDIA_TYPE_AUTO_SENSOR:
3364                 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
3365                                         MII_AR_10T_FD_CAPS |
3366                                         MII_AR_100TX_HD_CAPS |
3367                                         MII_AR_100TX_FD_CAPS);
3368                 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3369                 break;
3370
3371         case MEDIA_TYPE_1000M_FULL:
3372                 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3373                 break;
3374
3375         case MEDIA_TYPE_100M_FULL:
3376                 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
3377                 break;
3378
3379         case MEDIA_TYPE_100M_HALF:
3380                 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
3381                 break;
3382
3383         case MEDIA_TYPE_10M_FULL:
3384                 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
3385                 break;
3386
3387         default:
3388                 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
3389                 break;
3390         }
3391
3392         /* flow control fixed to enable all */
3393         mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
3394
3395         hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
3396         hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
3397
3398         ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
3399         if (ret_val)
3400                 return ret_val;
3401
3402         ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
3403         if (ret_val)
3404                 return ret_val;
3405
3406         return 0;
3407 }
3408
3409 /*
3410  * Configures link settings.
3411  * hw - Struct containing variables accessed by shared code
3412  * Assumes the hardware has previously been reset and the
3413  * transmitter and receiver are not enabled.
3414  */
3415 static s32 atl1_setup_link(struct atl1_hw *hw)
3416 {
3417         struct pci_dev *pdev = hw->back->pdev;
3418         struct atl1_adapter *adapter = hw->back;
3419         s32 ret_val;
3420
3421         /*
3422          * Options:
3423          *  PHY will advertise value(s) parsed from
3424          *  autoneg_advertised and fc
3425          *  no matter what autoneg is , We will not wait link result.
3426          */
3427         ret_val = atl1_phy_setup_autoneg_adv(hw);
3428         if (ret_val) {
3429                 if (netif_msg_link(adapter))
3430                         dev_dbg(&pdev->dev,
3431                                 "error setting up autonegotiation\n");
3432                 return ret_val;
3433         }
3434         /* SW.Reset , En-Auto-Neg if needed */
3435         ret_val = atl1_phy_reset(hw);
3436         if (ret_val) {
3437                 if (netif_msg_link(adapter))
3438                         dev_dbg(&pdev->dev, "error resetting phy\n");
3439                 return ret_val;
3440         }
3441         hw->phy_configured = true;
3442         return ret_val;
3443 }
3444
3445 static void atl1_init_flash_opcode(struct atl1_hw *hw)
3446 {
3447         if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
3448                 /* Atmel */
3449                 hw->flash_vendor = 0;
3450
3451         /* Init OP table */
3452         iowrite8(flash_table[hw->flash_vendor].cmd_program,
3453                 hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
3454         iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
3455                 hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
3456         iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
3457                 hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
3458         iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
3459                 hw->hw_addr + REG_SPI_FLASH_OP_RDID);
3460         iowrite8(flash_table[hw->flash_vendor].cmd_wren,
3461                 hw->hw_addr + REG_SPI_FLASH_OP_WREN);
3462         iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
3463                 hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
3464         iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
3465                 hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
3466         iowrite8(flash_table[hw->flash_vendor].cmd_read,
3467                 hw->hw_addr + REG_SPI_FLASH_OP_READ);
3468 }
3469
3470 /*
3471  * Performs basic configuration of the adapter.
3472  * hw - Struct containing variables accessed by shared code
3473  * Assumes that the controller has previously been reset and is in a
3474  * post-reset uninitialized state. Initializes multicast table,
3475  * and  Calls routines to setup link
3476  * Leaves the transmit and receive units disabled and uninitialized.
3477  */
3478 s32 atl1_init_hw(struct atl1_hw *hw)
3479 {
3480         u32 ret_val = 0;
3481
3482         /* Zero out the Multicast HASH table */
3483         iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
3484         /* clear the old settings from the multicast hash table */
3485         iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
3486
3487         atl1_init_flash_opcode(hw);
3488
3489         if (!hw->phy_configured) {
3490                 /* enable GPHY LinkChange Interrrupt */
3491                 ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
3492                 if (ret_val)
3493                         return ret_val;
3494                 /* make PHY out of power-saving state */
3495                 ret_val = atl1_phy_leave_power_saving(hw);
3496                 if (ret_val)
3497                         return ret_val;
3498                 /* Call a subroutine to configure the link */
3499                 ret_val = atl1_setup_link(hw);
3500         }
3501         return ret_val;
3502 }
3503
3504 /*
3505  * Detects the current speed and duplex settings of the hardware.
3506  * hw - Struct containing variables accessed by shared code
3507  * speed - Speed of the connection
3508  * duplex - Duplex setting of the connection
3509  */
3510 s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
3511 {
3512         struct pci_dev *pdev = hw->back->pdev;
3513         struct atl1_adapter *adapter = hw->back;
3514         s32 ret_val;
3515         u16 phy_data;
3516
3517         /* ; --- Read   PHY Specific Status Register (17) */
3518         ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
3519         if (ret_val)
3520                 return ret_val;
3521
3522         if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
3523                 return ATLX_ERR_PHY_RES;
3524
3525         switch (phy_data & MII_ATLX_PSSR_SPEED) {
3526         case MII_ATLX_PSSR_1000MBS:
3527                 *speed = SPEED_1000;
3528                 break;
3529         case MII_ATLX_PSSR_100MBS:
3530                 *speed = SPEED_100;
3531                 break;
3532         case MII_ATLX_PSSR_10MBS:
3533                 *speed = SPEED_10;
3534                 break;
3535         default:
3536                 if (netif_msg_hw(adapter))
3537                         dev_dbg(&pdev->dev, "error getting speed\n");
3538                 return ATLX_ERR_PHY_SPEED;
3539                 break;
3540         }
3541         if (phy_data & MII_ATLX_PSSR_DPLX)
3542                 *duplex = FULL_DUPLEX;
3543         else
3544                 *duplex = HALF_DUPLEX;
3545
3546         return 0;
3547 }
3548
3549 void atl1_set_mac_addr(struct atl1_hw *hw)
3550 {
3551         u32 value;
3552         /*
3553          * 00-0B-6A-F6-00-DC
3554          * 0:  6AF600DC   1: 000B
3555          * low dword
3556          */
3557         value = (((u32) hw->mac_addr[2]) << 24) |
3558             (((u32) hw->mac_addr[3]) << 16) |
3559             (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
3560         iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
3561         /* high dword */
3562         value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
3563         iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
3564 }