]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/atlx/atl1.c
atl1: simplify tx packet descriptor
[linux-2.6-omap-h63xx.git] / drivers / net / atlx / atl1.c
1 /*
2  * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3  * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
4  * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5  *
6  * Derived from Intel e1000 driver
7  * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along with
20  * this program; if not, write to the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
22  *
23  * The full GNU General Public License is included in this distribution in the
24  * file called COPYING.
25  *
26  * Contact Information:
27  * Xiong Huang <xiong_huang@attansic.com>
28  * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei,
29  * Xinzhu  302, TAIWAN, REPUBLIC OF CHINA
30  *
31  * Chris Snook <csnook@redhat.com>
32  * Jay Cliburn <jcliburn@gmail.com>
33  *
34  * This version is adapted from the Attansic reference driver for
35  * inclusion in the Linux kernel.  It is currently under heavy development.
36  * A very incomplete list of things that need to be dealt with:
37  *
38  * TODO:
39  * Wake on LAN.
40  * Add more ethtool functions.
41  * Fix abstruse irq enable/disable condition described here:
42  *      http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
43  *
44  * NEEDS TESTING:
45  * VLAN
46  * multicast
47  * promiscuous mode
48  * interrupt coalescing
49  * SMP torture testing
50  */
51
52 #include <asm/atomic.h>
53 #include <asm/byteorder.h>
54
55 #include <linux/compiler.h>
56 #include <linux/crc32.h>
57 #include <linux/delay.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/etherdevice.h>
60 #include <linux/hardirq.h>
61 #include <linux/if_ether.h>
62 #include <linux/if_vlan.h>
63 #include <linux/in.h>
64 #include <linux/interrupt.h>
65 #include <linux/ip.h>
66 #include <linux/irqflags.h>
67 #include <linux/irqreturn.h>
68 #include <linux/jiffies.h>
69 #include <linux/mii.h>
70 #include <linux/module.h>
71 #include <linux/moduleparam.h>
72 #include <linux/net.h>
73 #include <linux/netdevice.h>
74 #include <linux/pci.h>
75 #include <linux/pci_ids.h>
76 #include <linux/pm.h>
77 #include <linux/skbuff.h>
78 #include <linux/slab.h>
79 #include <linux/spinlock.h>
80 #include <linux/string.h>
81 #include <linux/tcp.h>
82 #include <linux/timer.h>
83 #include <linux/types.h>
84 #include <linux/workqueue.h>
85
86 #include <net/checksum.h>
87
88 #include "atl1.h"
89
90 /* Temporary hack for merging atl1 and atl2 */
91 #include "atlx.c"
92
93 /*
94  * atl1_pci_tbl - PCI Device ID Table
95  */
96 static const struct pci_device_id atl1_pci_tbl[] = {
97         {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
98         /* required last entry */
99         {0,}
100 };
101 MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
102
103 /*
104  * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
105  * @adapter: board private structure to initialize
106  *
107  * atl1_sw_init initializes the Adapter private data structure.
108  * Fields are initialized based on PCI device information and
109  * OS network device settings (MTU size).
110  */
111 static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
112 {
113         struct atl1_hw *hw = &adapter->hw;
114         struct net_device *netdev = adapter->netdev;
115
116         hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
117         hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
118
119         adapter->wol = 0;
120         adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
121         adapter->ict = 50000;           /* 100ms */
122         adapter->link_speed = SPEED_0;  /* hardware init */
123         adapter->link_duplex = FULL_DUPLEX;
124
125         hw->phy_configured = false;
126         hw->preamble_len = 7;
127         hw->ipgt = 0x60;
128         hw->min_ifg = 0x50;
129         hw->ipgr1 = 0x40;
130         hw->ipgr2 = 0x60;
131         hw->max_retry = 0xf;
132         hw->lcol = 0x37;
133         hw->jam_ipg = 7;
134         hw->rfd_burst = 8;
135         hw->rrd_burst = 8;
136         hw->rfd_fetch_gap = 1;
137         hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
138         hw->rx_jumbo_lkah = 1;
139         hw->rrd_ret_timer = 16;
140         hw->tpd_burst = 4;
141         hw->tpd_fetch_th = 16;
142         hw->txf_burst = 0x100;
143         hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
144         hw->tpd_fetch_gap = 1;
145         hw->rcb_value = atl1_rcb_64;
146         hw->dma_ord = atl1_dma_ord_enh;
147         hw->dmar_block = atl1_dma_req_256;
148         hw->dmaw_block = atl1_dma_req_256;
149         hw->cmb_rrd = 4;
150         hw->cmb_tpd = 4;
151         hw->cmb_rx_timer = 1;   /* about 2us */
152         hw->cmb_tx_timer = 1;   /* about 2us */
153         hw->smb_timer = 100000; /* about 200ms */
154
155         spin_lock_init(&adapter->lock);
156         spin_lock_init(&adapter->mb_lock);
157
158         return 0;
159 }
160
161 static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
162 {
163         struct atl1_adapter *adapter = netdev_priv(netdev);
164         u16 result;
165
166         atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
167
168         return result;
169 }
170
171 static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
172         int val)
173 {
174         struct atl1_adapter *adapter = netdev_priv(netdev);
175
176         atl1_write_phy_reg(&adapter->hw, reg_num, val);
177 }
178
179 /*
180  * atl1_mii_ioctl -
181  * @netdev:
182  * @ifreq:
183  * @cmd:
184  */
185 static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
186 {
187         struct atl1_adapter *adapter = netdev_priv(netdev);
188         unsigned long flags;
189         int retval;
190
191         if (!netif_running(netdev))
192                 return -EINVAL;
193
194         spin_lock_irqsave(&adapter->lock, flags);
195         retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
196         spin_unlock_irqrestore(&adapter->lock, flags);
197
198         return retval;
199 }
200
201 /*
202  * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
203  * @adapter: board private structure
204  *
205  * Return 0 on success, negative on failure
206  */
207 s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
208 {
209         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
210         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
211         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
212         struct atl1_ring_header *ring_header = &adapter->ring_header;
213         struct pci_dev *pdev = adapter->pdev;
214         int size;
215         u8 offset = 0;
216
217         size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
218         tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
219         if (unlikely(!tpd_ring->buffer_info)) {
220                 dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size);
221                 goto err_nomem;
222         }
223         rfd_ring->buffer_info =
224                 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
225
226         /*
227          * real ring DMA buffer
228          * each ring/block may need up to 8 bytes for alignment, hence the
229          * additional 40 bytes tacked onto the end.
230          */
231         ring_header->size = size =
232                 sizeof(struct tx_packet_desc) * tpd_ring->count
233                 + sizeof(struct rx_free_desc) * rfd_ring->count
234                 + sizeof(struct rx_return_desc) * rrd_ring->count
235                 + sizeof(struct coals_msg_block)
236                 + sizeof(struct stats_msg_block)
237                 + 40;
238
239         ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
240                 &ring_header->dma);
241         if (unlikely(!ring_header->desc)) {
242                 dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
243                 goto err_nomem;
244         }
245
246         memset(ring_header->desc, 0, ring_header->size);
247
248         /* init TPD ring */
249         tpd_ring->dma = ring_header->dma;
250         offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
251         tpd_ring->dma += offset;
252         tpd_ring->desc = (u8 *) ring_header->desc + offset;
253         tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
254
255         /* init RFD ring */
256         rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
257         offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
258         rfd_ring->dma += offset;
259         rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
260         rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
261
262
263         /* init RRD ring */
264         rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
265         offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
266         rrd_ring->dma += offset;
267         rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
268         rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
269
270
271         /* init CMB */
272         adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
273         offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
274         adapter->cmb.dma += offset;
275         adapter->cmb.cmb = (struct coals_msg_block *)
276                 ((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
277
278         /* init SMB */
279         adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
280         offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
281         adapter->smb.dma += offset;
282         adapter->smb.smb = (struct stats_msg_block *)
283                 ((u8 *) adapter->cmb.cmb +
284                 (sizeof(struct coals_msg_block) + offset));
285
286         return 0;
287
288 err_nomem:
289         kfree(tpd_ring->buffer_info);
290         return -ENOMEM;
291 }
292
293 static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
294 {
295         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
296         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
297         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
298
299         atomic_set(&tpd_ring->next_to_use, 0);
300         atomic_set(&tpd_ring->next_to_clean, 0);
301
302         rfd_ring->next_to_clean = 0;
303         atomic_set(&rfd_ring->next_to_use, 0);
304
305         rrd_ring->next_to_use = 0;
306         atomic_set(&rrd_ring->next_to_clean, 0);
307 }
308
309 /*
310  * atl1_clean_rx_ring - Free RFD Buffers
311  * @adapter: board private structure
312  */
313 static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
314 {
315         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
316         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
317         struct atl1_buffer *buffer_info;
318         struct pci_dev *pdev = adapter->pdev;
319         unsigned long size;
320         unsigned int i;
321
322         /* Free all the Rx ring sk_buffs */
323         for (i = 0; i < rfd_ring->count; i++) {
324                 buffer_info = &rfd_ring->buffer_info[i];
325                 if (buffer_info->dma) {
326                         pci_unmap_page(pdev, buffer_info->dma,
327                                 buffer_info->length, PCI_DMA_FROMDEVICE);
328                         buffer_info->dma = 0;
329                 }
330                 if (buffer_info->skb) {
331                         dev_kfree_skb(buffer_info->skb);
332                         buffer_info->skb = NULL;
333                 }
334         }
335
336         size = sizeof(struct atl1_buffer) * rfd_ring->count;
337         memset(rfd_ring->buffer_info, 0, size);
338
339         /* Zero out the descriptor ring */
340         memset(rfd_ring->desc, 0, rfd_ring->size);
341
342         rfd_ring->next_to_clean = 0;
343         atomic_set(&rfd_ring->next_to_use, 0);
344
345         rrd_ring->next_to_use = 0;
346         atomic_set(&rrd_ring->next_to_clean, 0);
347 }
348
349 /*
350  * atl1_clean_tx_ring - Free Tx Buffers
351  * @adapter: board private structure
352  */
353 static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
354 {
355         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
356         struct atl1_buffer *buffer_info;
357         struct pci_dev *pdev = adapter->pdev;
358         unsigned long size;
359         unsigned int i;
360
361         /* Free all the Tx ring sk_buffs */
362         for (i = 0; i < tpd_ring->count; i++) {
363                 buffer_info = &tpd_ring->buffer_info[i];
364                 if (buffer_info->dma) {
365                         pci_unmap_page(pdev, buffer_info->dma,
366                                 buffer_info->length, PCI_DMA_TODEVICE);
367                         buffer_info->dma = 0;
368                 }
369         }
370
371         for (i = 0; i < tpd_ring->count; i++) {
372                 buffer_info = &tpd_ring->buffer_info[i];
373                 if (buffer_info->skb) {
374                         dev_kfree_skb_any(buffer_info->skb);
375                         buffer_info->skb = NULL;
376                 }
377         }
378
379         size = sizeof(struct atl1_buffer) * tpd_ring->count;
380         memset(tpd_ring->buffer_info, 0, size);
381
382         /* Zero out the descriptor ring */
383         memset(tpd_ring->desc, 0, tpd_ring->size);
384
385         atomic_set(&tpd_ring->next_to_use, 0);
386         atomic_set(&tpd_ring->next_to_clean, 0);
387 }
388
389 /*
390  * atl1_free_ring_resources - Free Tx / RX descriptor Resources
391  * @adapter: board private structure
392  *
393  * Free all transmit software resources
394  */
395 void atl1_free_ring_resources(struct atl1_adapter *adapter)
396 {
397         struct pci_dev *pdev = adapter->pdev;
398         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
399         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
400         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
401         struct atl1_ring_header *ring_header = &adapter->ring_header;
402
403         atl1_clean_tx_ring(adapter);
404         atl1_clean_rx_ring(adapter);
405
406         kfree(tpd_ring->buffer_info);
407         pci_free_consistent(pdev, ring_header->size, ring_header->desc,
408                 ring_header->dma);
409
410         tpd_ring->buffer_info = NULL;
411         tpd_ring->desc = NULL;
412         tpd_ring->dma = 0;
413
414         rfd_ring->buffer_info = NULL;
415         rfd_ring->desc = NULL;
416         rfd_ring->dma = 0;
417
418         rrd_ring->desc = NULL;
419         rrd_ring->dma = 0;
420 }
421
422 static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
423 {
424         u32 value;
425         struct atl1_hw *hw = &adapter->hw;
426         struct net_device *netdev = adapter->netdev;
427         /* Config MAC CTRL Register */
428         value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
429         /* duplex */
430         if (FULL_DUPLEX == adapter->link_duplex)
431                 value |= MAC_CTRL_DUPLX;
432         /* speed */
433         value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
434                          MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
435                   MAC_CTRL_SPEED_SHIFT);
436         /* flow control */
437         value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
438         /* PAD & CRC */
439         value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
440         /* preamble length */
441         value |= (((u32) adapter->hw.preamble_len
442                    & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
443         /* vlan */
444         if (adapter->vlgrp)
445                 value |= MAC_CTRL_RMV_VLAN;
446         /* rx checksum
447            if (adapter->rx_csum)
448            value |= MAC_CTRL_RX_CHKSUM_EN;
449          */
450         /* filter mode */
451         value |= MAC_CTRL_BC_EN;
452         if (netdev->flags & IFF_PROMISC)
453                 value |= MAC_CTRL_PROMIS_EN;
454         else if (netdev->flags & IFF_ALLMULTI)
455                 value |= MAC_CTRL_MC_ALL_EN;
456         /* value |= MAC_CTRL_LOOPBACK; */
457         iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
458 }
459
460 static u32 atl1_check_link(struct atl1_adapter *adapter)
461 {
462         struct atl1_hw *hw = &adapter->hw;
463         struct net_device *netdev = adapter->netdev;
464         u32 ret_val;
465         u16 speed, duplex, phy_data;
466         int reconfig = 0;
467
468         /* MII_BMSR must read twice */
469         atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
470         atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
471         if (!(phy_data & BMSR_LSTATUS)) {
472                 /* link down */
473                 if (netif_carrier_ok(netdev)) {
474                         /* old link state: Up */
475                         dev_info(&adapter->pdev->dev, "link is down\n");
476                         adapter->link_speed = SPEED_0;
477                         netif_carrier_off(netdev);
478                         netif_stop_queue(netdev);
479                 }
480                 return 0;
481         }
482
483         /* Link Up */
484         ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
485         if (ret_val)
486                 return ret_val;
487
488         switch (hw->media_type) {
489         case MEDIA_TYPE_1000M_FULL:
490                 if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
491                         reconfig = 1;
492                 break;
493         case MEDIA_TYPE_100M_FULL:
494                 if (speed != SPEED_100 || duplex != FULL_DUPLEX)
495                         reconfig = 1;
496                 break;
497         case MEDIA_TYPE_100M_HALF:
498                 if (speed != SPEED_100 || duplex != HALF_DUPLEX)
499                         reconfig = 1;
500                 break;
501         case MEDIA_TYPE_10M_FULL:
502                 if (speed != SPEED_10 || duplex != FULL_DUPLEX)
503                         reconfig = 1;
504                 break;
505         case MEDIA_TYPE_10M_HALF:
506                 if (speed != SPEED_10 || duplex != HALF_DUPLEX)
507                         reconfig = 1;
508                 break;
509         }
510
511         /* link result is our setting */
512         if (!reconfig) {
513                 if (adapter->link_speed != speed
514                     || adapter->link_duplex != duplex) {
515                         adapter->link_speed = speed;
516                         adapter->link_duplex = duplex;
517                         atl1_setup_mac_ctrl(adapter);
518                         dev_info(&adapter->pdev->dev,
519                                 "%s link is up %d Mbps %s\n",
520                                 netdev->name, adapter->link_speed,
521                                 adapter->link_duplex == FULL_DUPLEX ?
522                                 "full duplex" : "half duplex");
523                 }
524                 if (!netif_carrier_ok(netdev)) {
525                         /* Link down -> Up */
526                         netif_carrier_on(netdev);
527                         netif_wake_queue(netdev);
528                 }
529                 return 0;
530         }
531
532         /* change original link status */
533         if (netif_carrier_ok(netdev)) {
534                 adapter->link_speed = SPEED_0;
535                 netif_carrier_off(netdev);
536                 netif_stop_queue(netdev);
537         }
538
539         if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
540             hw->media_type != MEDIA_TYPE_1000M_FULL) {
541                 switch (hw->media_type) {
542                 case MEDIA_TYPE_100M_FULL:
543                         phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
544                                    MII_CR_RESET;
545                         break;
546                 case MEDIA_TYPE_100M_HALF:
547                         phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
548                         break;
549                 case MEDIA_TYPE_10M_FULL:
550                         phy_data =
551                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
552                         break;
553                 default:
554                         /* MEDIA_TYPE_10M_HALF: */
555                         phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
556                         break;
557                 }
558                 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
559                 return 0;
560         }
561
562         /* auto-neg, insert timer to re-config phy */
563         if (!adapter->phy_timer_pending) {
564                 adapter->phy_timer_pending = true;
565                 mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
566         }
567
568         return 0;
569 }
570
571 /*
572  * atl1_change_mtu - Change the Maximum Transfer Unit
573  * @netdev: network interface device structure
574  * @new_mtu: new value for maximum frame size
575  *
576  * Returns 0 on success, negative on failure
577  */
578 static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
579 {
580         struct atl1_adapter *adapter = netdev_priv(netdev);
581         int old_mtu = netdev->mtu;
582         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
583
584         if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
585             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
586                 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
587                 return -EINVAL;
588         }
589
590         adapter->hw.max_frame_size = max_frame;
591         adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
592         adapter->rx_buffer_len = (max_frame + 7) & ~7;
593         adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
594
595         netdev->mtu = new_mtu;
596         if ((old_mtu != new_mtu) && netif_running(netdev)) {
597                 atl1_down(adapter);
598                 atl1_up(adapter);
599         }
600
601         return 0;
602 }
603
604 static void set_flow_ctrl_old(struct atl1_adapter *adapter)
605 {
606         u32 hi, lo, value;
607
608         /* RFD Flow Control */
609         value = adapter->rfd_ring.count;
610         hi = value / 16;
611         if (hi < 2)
612                 hi = 2;
613         lo = value * 7 / 8;
614
615         value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
616                 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
617         iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
618
619         /* RRD Flow Control */
620         value = adapter->rrd_ring.count;
621         lo = value / 16;
622         hi = value * 7 / 8;
623         if (lo < 2)
624                 lo = 2;
625         value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
626                 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
627         iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
628 }
629
630 static void set_flow_ctrl_new(struct atl1_hw *hw)
631 {
632         u32 hi, lo, value;
633
634         /* RXF Flow Control */
635         value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
636         lo = value / 16;
637         if (lo < 192)
638                 lo = 192;
639         hi = value * 7 / 8;
640         if (hi < lo)
641                 hi = lo + 16;
642         value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
643                 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
644         iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
645
646         /* RRD Flow Control */
647         value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
648         lo = value / 8;
649         hi = value * 7 / 8;
650         if (lo < 2)
651                 lo = 2;
652         if (hi < lo)
653                 hi = lo + 3;
654         value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
655                 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
656         iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
657 }
658
659 /*
660  * atl1_configure - Configure Transmit&Receive Unit after Reset
661  * @adapter: board private structure
662  *
663  * Configure the Tx /Rx unit of the MAC after a reset.
664  */
665 static u32 atl1_configure(struct atl1_adapter *adapter)
666 {
667         struct atl1_hw *hw = &adapter->hw;
668         u32 value;
669
670         /* clear interrupt status */
671         iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
672
673         /* set MAC Address */
674         value = (((u32) hw->mac_addr[2]) << 24) |
675                 (((u32) hw->mac_addr[3]) << 16) |
676                 (((u32) hw->mac_addr[4]) << 8) |
677                 (((u32) hw->mac_addr[5]));
678         iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
679         value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
680         iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
681
682         /* tx / rx ring */
683
684         /* HI base address */
685         iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
686                 hw->hw_addr + REG_DESC_BASE_ADDR_HI);
687         /* LO base address */
688         iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
689                 hw->hw_addr + REG_DESC_RFD_ADDR_LO);
690         iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
691                 hw->hw_addr + REG_DESC_RRD_ADDR_LO);
692         iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
693                 hw->hw_addr + REG_DESC_TPD_ADDR_LO);
694         iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
695                 hw->hw_addr + REG_DESC_CMB_ADDR_LO);
696         iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
697                 hw->hw_addr + REG_DESC_SMB_ADDR_LO);
698
699         /* element count */
700         value = adapter->rrd_ring.count;
701         value <<= 16;
702         value += adapter->rfd_ring.count;
703         iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
704         iowrite32(adapter->tpd_ring.count, hw->hw_addr +
705                 REG_DESC_TPD_RING_SIZE);
706
707         /* Load Ptr */
708         iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
709
710         /* config Mailbox */
711         value = ((atomic_read(&adapter->tpd_ring.next_to_use)
712                   & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
713                 ((atomic_read(&adapter->rrd_ring.next_to_clean)
714                 & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
715                 ((atomic_read(&adapter->rfd_ring.next_to_use)
716                 & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
717         iowrite32(value, hw->hw_addr + REG_MAILBOX);
718
719         /* config IPG/IFG */
720         value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
721                  << MAC_IPG_IFG_IPGT_SHIFT) |
722                 (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
723                 << MAC_IPG_IFG_MIFG_SHIFT) |
724                 (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
725                 << MAC_IPG_IFG_IPGR1_SHIFT) |
726                 (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
727                 << MAC_IPG_IFG_IPGR2_SHIFT);
728         iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
729
730         /* config  Half-Duplex Control */
731         value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
732                 (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
733                 << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
734                 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
735                 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
736                 (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
737                 << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
738         iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
739
740         /* set Interrupt Moderator Timer */
741         iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
742         iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
743
744         /* set Interrupt Clear Timer */
745         iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
746
747         /* set max frame size hw will accept */
748         iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU);
749
750         /* jumbo size & rrd retirement timer */
751         value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
752                  << RXQ_JMBOSZ_TH_SHIFT) |
753                 (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
754                 << RXQ_JMBO_LKAH_SHIFT) |
755                 (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
756                 << RXQ_RRD_TIMER_SHIFT);
757         iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
758
759         /* Flow Control */
760         switch (hw->dev_rev) {
761         case 0x8001:
762         case 0x9001:
763         case 0x9002:
764         case 0x9003:
765                 set_flow_ctrl_old(adapter);
766                 break;
767         default:
768                 set_flow_ctrl_new(hw);
769                 break;
770         }
771
772         /* config TXQ */
773         value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
774                  << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
775                 (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
776                 << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
777                 (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
778                 << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
779                 TXQ_CTRL_EN;
780         iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
781
782         /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
783         value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
784                 << TX_JUMBO_TASK_TH_SHIFT) |
785                 (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
786                 << TX_TPD_MIN_IPG_SHIFT);
787         iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
788
789         /* config RXQ */
790         value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
791                 << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
792                 (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
793                 << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
794                 (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
795                 << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
796                 RXQ_CTRL_EN;
797         iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
798
799         /* config DMA Engine */
800         value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
801                 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
802                 ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
803                 << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
804                 DMA_CTRL_DMAW_EN;
805         value |= (u32) hw->dma_ord;
806         if (atl1_rcb_128 == hw->rcb_value)
807                 value |= DMA_CTRL_RCB_VALUE;
808         iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
809
810         /* config CMB / SMB */
811         value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
812                 hw->cmb_tpd : adapter->tpd_ring.count;
813         value <<= 16;
814         value |= hw->cmb_rrd;
815         iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
816         value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
817         iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
818         iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
819
820         /* --- enable CMB / SMB */
821         value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
822         iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
823
824         value = ioread32(adapter->hw.hw_addr + REG_ISR);
825         if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
826                 value = 1;      /* config failed */
827         else
828                 value = 0;
829
830         /* clear all interrupt status */
831         iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
832         iowrite32(0, adapter->hw.hw_addr + REG_ISR);
833         return value;
834 }
835
836 /*
837  * atl1_pcie_patch - Patch for PCIE module
838  */
839 static void atl1_pcie_patch(struct atl1_adapter *adapter)
840 {
841         u32 value;
842
843         /* much vendor magic here */
844         value = 0x6500;
845         iowrite32(value, adapter->hw.hw_addr + 0x12FC);
846         /* pcie flow control mode change */
847         value = ioread32(adapter->hw.hw_addr + 0x1008);
848         value |= 0x8000;
849         iowrite32(value, adapter->hw.hw_addr + 0x1008);
850 }
851
852 /*
853  * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
854  * on PCI Command register is disable.
855  * The function enable this bit.
856  * Brackett, 2006/03/15
857  */
858 static void atl1_via_workaround(struct atl1_adapter *adapter)
859 {
860         unsigned long value;
861
862         value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
863         if (value & PCI_COMMAND_INTX_DISABLE)
864                 value &= ~PCI_COMMAND_INTX_DISABLE;
865         iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
866 }
867
868 static void atl1_inc_smb(struct atl1_adapter *adapter)
869 {
870         struct stats_msg_block *smb = adapter->smb.smb;
871
872         /* Fill out the OS statistics structure */
873         adapter->soft_stats.rx_packets += smb->rx_ok;
874         adapter->soft_stats.tx_packets += smb->tx_ok;
875         adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
876         adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
877         adapter->soft_stats.multicast += smb->rx_mcast;
878         adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
879                 smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
880
881         /* Rx Errors */
882         adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
883                 smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
884                 smb->rx_rrd_ov + smb->rx_align_err);
885         adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
886         adapter->soft_stats.rx_length_errors += smb->rx_len_err;
887         adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
888         adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
889         adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
890                 smb->rx_rxf_ov);
891
892         adapter->soft_stats.rx_pause += smb->rx_pause;
893         adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
894         adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
895
896         /* Tx Errors */
897         adapter->soft_stats.tx_errors += (smb->tx_late_col +
898                 smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
899         adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
900         adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
901         adapter->soft_stats.tx_window_errors += smb->tx_late_col;
902
903         adapter->soft_stats.excecol += smb->tx_abort_col;
904         adapter->soft_stats.deffer += smb->tx_defer;
905         adapter->soft_stats.scc += smb->tx_1_col;
906         adapter->soft_stats.mcc += smb->tx_2_col;
907         adapter->soft_stats.latecol += smb->tx_late_col;
908         adapter->soft_stats.tx_underun += smb->tx_underrun;
909         adapter->soft_stats.tx_trunc += smb->tx_trunc;
910         adapter->soft_stats.tx_pause += smb->tx_pause;
911
912         adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets;
913         adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets;
914         adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes;
915         adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes;
916         adapter->net_stats.multicast = adapter->soft_stats.multicast;
917         adapter->net_stats.collisions = adapter->soft_stats.collisions;
918         adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors;
919         adapter->net_stats.rx_over_errors =
920                 adapter->soft_stats.rx_missed_errors;
921         adapter->net_stats.rx_length_errors =
922                 adapter->soft_stats.rx_length_errors;
923         adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
924         adapter->net_stats.rx_frame_errors =
925                 adapter->soft_stats.rx_frame_errors;
926         adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
927         adapter->net_stats.rx_missed_errors =
928                 adapter->soft_stats.rx_missed_errors;
929         adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors;
930         adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
931         adapter->net_stats.tx_aborted_errors =
932                 adapter->soft_stats.tx_aborted_errors;
933         adapter->net_stats.tx_window_errors =
934                 adapter->soft_stats.tx_window_errors;
935         adapter->net_stats.tx_carrier_errors =
936                 adapter->soft_stats.tx_carrier_errors;
937 }
938
939 static void atl1_update_mailbox(struct atl1_adapter *adapter)
940 {
941         unsigned long flags;
942         u32 tpd_next_to_use;
943         u32 rfd_next_to_use;
944         u32 rrd_next_to_clean;
945         u32 value;
946
947         spin_lock_irqsave(&adapter->mb_lock, flags);
948
949         tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
950         rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
951         rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
952
953         value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
954                 MB_RFD_PROD_INDX_SHIFT) |
955                 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
956                 MB_RRD_CONS_INDX_SHIFT) |
957                 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
958                 MB_TPD_PROD_INDX_SHIFT);
959         iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
960
961         spin_unlock_irqrestore(&adapter->mb_lock, flags);
962 }
963
964 static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
965         struct rx_return_desc *rrd, u16 offset)
966 {
967         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
968
969         while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
970                 rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
971                 if (++rfd_ring->next_to_clean == rfd_ring->count) {
972                         rfd_ring->next_to_clean = 0;
973                 }
974         }
975 }
976
977 static void atl1_update_rfd_index(struct atl1_adapter *adapter,
978         struct rx_return_desc *rrd)
979 {
980         u16 num_buf;
981
982         num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
983                 adapter->rx_buffer_len;
984         if (rrd->num_buf == num_buf)
985                 /* clean alloc flag for bad rrd */
986                 atl1_clean_alloc_flag(adapter, rrd, num_buf);
987 }
988
989 static void atl1_rx_checksum(struct atl1_adapter *adapter,
990         struct rx_return_desc *rrd, struct sk_buff *skb)
991 {
992         struct pci_dev *pdev = adapter->pdev;
993
994         skb->ip_summed = CHECKSUM_NONE;
995
996         if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
997                 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
998                                         ERR_FLAG_CODE | ERR_FLAG_OV)) {
999                         adapter->hw_csum_err++;
1000                         dev_printk(KERN_DEBUG, &pdev->dev,
1001                                 "rx checksum error\n");
1002                         return;
1003                 }
1004         }
1005
1006         /* not IPv4 */
1007         if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
1008                 /* checksum is invalid, but it's not an IPv4 pkt, so ok */
1009                 return;
1010
1011         /* IPv4 packet */
1012         if (likely(!(rrd->err_flg &
1013                 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
1014                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1015                 adapter->hw_csum_good++;
1016                 return;
1017         }
1018
1019         /* IPv4, but hardware thinks its checksum is wrong */
1020         dev_printk(KERN_DEBUG, &pdev->dev,
1021                 "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
1022                 rrd->pkt_flg, rrd->err_flg);
1023         skb->ip_summed = CHECKSUM_COMPLETE;
1024         skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
1025         adapter->hw_csum_err++;
1026         return;
1027 }
1028
1029 /*
1030  * atl1_alloc_rx_buffers - Replace used receive buffers
1031  * @adapter: address of board private structure
1032  */
1033 static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1034 {
1035         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1036         struct pci_dev *pdev = adapter->pdev;
1037         struct page *page;
1038         unsigned long offset;
1039         struct atl1_buffer *buffer_info, *next_info;
1040         struct sk_buff *skb;
1041         u16 num_alloc = 0;
1042         u16 rfd_next_to_use, next_next;
1043         struct rx_free_desc *rfd_desc;
1044
1045         next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
1046         if (++next_next == rfd_ring->count)
1047                 next_next = 0;
1048         buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1049         next_info = &rfd_ring->buffer_info[next_next];
1050
1051         while (!buffer_info->alloced && !next_info->alloced) {
1052                 if (buffer_info->skb) {
1053                         buffer_info->alloced = 1;
1054                         goto next;
1055                 }
1056
1057                 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1058
1059                 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
1060                 if (unlikely(!skb)) {
1061                         /* Better luck next round */
1062                         adapter->net_stats.rx_dropped++;
1063                         break;
1064                 }
1065
1066                 /*
1067                  * Make buffer alignment 2 beyond a 16 byte boundary
1068                  * this will result in a 16 byte aligned IP header after
1069                  * the 14 byte MAC header is removed
1070                  */
1071                 skb_reserve(skb, NET_IP_ALIGN);
1072
1073                 buffer_info->alloced = 1;
1074                 buffer_info->skb = skb;
1075                 buffer_info->length = (u16) adapter->rx_buffer_len;
1076                 page = virt_to_page(skb->data);
1077                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1078                 buffer_info->dma = pci_map_page(pdev, page, offset,
1079                                                 adapter->rx_buffer_len,
1080                                                 PCI_DMA_FROMDEVICE);
1081                 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1082                 rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
1083                 rfd_desc->coalese = 0;
1084
1085 next:
1086                 rfd_next_to_use = next_next;
1087                 if (unlikely(++next_next == rfd_ring->count))
1088                         next_next = 0;
1089
1090                 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1091                 next_info = &rfd_ring->buffer_info[next_next];
1092                 num_alloc++;
1093         }
1094
1095         if (num_alloc) {
1096                 /*
1097                  * Force memory writes to complete before letting h/w
1098                  * know there are new descriptors to fetch.  (Only
1099                  * applicable for weak-ordered memory model archs,
1100                  * such as IA-64).
1101                  */
1102                 wmb();
1103                 atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
1104         }
1105         return num_alloc;
1106 }
1107
1108 static void atl1_intr_rx(struct atl1_adapter *adapter)
1109 {
1110         int i, count;
1111         u16 length;
1112         u16 rrd_next_to_clean;
1113         u32 value;
1114         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1115         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1116         struct atl1_buffer *buffer_info;
1117         struct rx_return_desc *rrd;
1118         struct sk_buff *skb;
1119
1120         count = 0;
1121
1122         rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
1123
1124         while (1) {
1125                 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
1126                 i = 1;
1127                 if (likely(rrd->xsz.valid)) {   /* packet valid */
1128 chk_rrd:
1129                         /* check rrd status */
1130                         if (likely(rrd->num_buf == 1))
1131                                 goto rrd_ok;
1132
1133                         /* rrd seems to be bad */
1134                         if (unlikely(i-- > 0)) {
1135                                 /* rrd may not be DMAed completely */
1136                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1137                                         "incomplete RRD DMA transfer\n");
1138                                 udelay(1);
1139                                 goto chk_rrd;
1140                         }
1141                         /* bad rrd */
1142                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1143                                 "bad RRD\n");
1144                         /* see if update RFD index */
1145                         if (rrd->num_buf > 1)
1146                                 atl1_update_rfd_index(adapter, rrd);
1147
1148                         /* update rrd */
1149                         rrd->xsz.valid = 0;
1150                         if (++rrd_next_to_clean == rrd_ring->count)
1151                                 rrd_next_to_clean = 0;
1152                         count++;
1153                         continue;
1154                 } else {        /* current rrd still not be updated */
1155
1156                         break;
1157                 }
1158 rrd_ok:
1159                 /* clean alloc flag for bad rrd */
1160                 atl1_clean_alloc_flag(adapter, rrd, 0);
1161
1162                 buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
1163                 if (++rfd_ring->next_to_clean == rfd_ring->count)
1164                         rfd_ring->next_to_clean = 0;
1165
1166                 /* update rrd next to clean */
1167                 if (++rrd_next_to_clean == rrd_ring->count)
1168                         rrd_next_to_clean = 0;
1169                 count++;
1170
1171                 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
1172                         if (!(rrd->err_flg &
1173                                 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
1174                                 | ERR_FLAG_LEN))) {
1175                                 /* packet error, don't need upstream */
1176                                 buffer_info->alloced = 0;
1177                                 rrd->xsz.valid = 0;
1178                                 continue;
1179                         }
1180                 }
1181
1182                 /* Good Receive */
1183                 pci_unmap_page(adapter->pdev, buffer_info->dma,
1184                                buffer_info->length, PCI_DMA_FROMDEVICE);
1185                 skb = buffer_info->skb;
1186                 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
1187
1188                 skb_put(skb, length - ETH_FCS_LEN);
1189
1190                 /* Receive Checksum Offload */
1191                 atl1_rx_checksum(adapter, rrd, skb);
1192                 skb->protocol = eth_type_trans(skb, adapter->netdev);
1193
1194                 if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) {
1195                         u16 vlan_tag = (rrd->vlan_tag >> 4) |
1196                                         ((rrd->vlan_tag & 7) << 13) |
1197                                         ((rrd->vlan_tag & 8) << 9);
1198                         vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
1199                 } else
1200                         netif_rx(skb);
1201
1202                 /* let protocol layer free skb */
1203                 buffer_info->skb = NULL;
1204                 buffer_info->alloced = 0;
1205                 rrd->xsz.valid = 0;
1206
1207                 adapter->netdev->last_rx = jiffies;
1208         }
1209
1210         atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
1211
1212         atl1_alloc_rx_buffers(adapter);
1213
1214         /* update mailbox ? */
1215         if (count) {
1216                 u32 tpd_next_to_use;
1217                 u32 rfd_next_to_use;
1218
1219                 spin_lock(&adapter->mb_lock);
1220
1221                 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
1222                 rfd_next_to_use =
1223                     atomic_read(&adapter->rfd_ring.next_to_use);
1224                 rrd_next_to_clean =
1225                     atomic_read(&adapter->rrd_ring.next_to_clean);
1226                 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
1227                         MB_RFD_PROD_INDX_SHIFT) |
1228                         ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
1229                         MB_RRD_CONS_INDX_SHIFT) |
1230                         ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
1231                         MB_TPD_PROD_INDX_SHIFT);
1232                 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
1233                 spin_unlock(&adapter->mb_lock);
1234         }
1235 }
1236
1237 static void atl1_intr_tx(struct atl1_adapter *adapter)
1238 {
1239         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1240         struct atl1_buffer *buffer_info;
1241         u16 sw_tpd_next_to_clean;
1242         u16 cmb_tpd_next_to_clean;
1243
1244         sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1245         cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
1246
1247         while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
1248                 struct tx_packet_desc *tpd;
1249
1250                 tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
1251                 buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
1252                 if (buffer_info->dma) {
1253                         pci_unmap_page(adapter->pdev, buffer_info->dma,
1254                                        buffer_info->length, PCI_DMA_TODEVICE);
1255                         buffer_info->dma = 0;
1256                 }
1257
1258                 if (buffer_info->skb) {
1259                         dev_kfree_skb_irq(buffer_info->skb);
1260                         buffer_info->skb = NULL;
1261                 }
1262
1263                 if (++sw_tpd_next_to_clean == tpd_ring->count)
1264                         sw_tpd_next_to_clean = 0;
1265         }
1266         atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
1267
1268         if (netif_queue_stopped(adapter->netdev)
1269             && netif_carrier_ok(adapter->netdev))
1270                 netif_wake_queue(adapter->netdev);
1271 }
1272
1273 static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
1274 {
1275         u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1276         u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
1277         return ((next_to_clean > next_to_use) ?
1278                 next_to_clean - next_to_use - 1 :
1279                 tpd_ring->count + next_to_clean - next_to_use - 1);
1280 }
1281
1282 static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
1283         struct tx_packet_desc *ptpd)
1284 {
1285         /* spinlock held */
1286         u8 hdr_len, ip_off;
1287         u32 real_len;
1288         int err;
1289
1290         if (skb_shinfo(skb)->gso_size) {
1291                 if (skb_header_cloned(skb)) {
1292                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1293                         if (unlikely(err))
1294                                 return -1;
1295                 }
1296
1297                 if (skb->protocol == ntohs(ETH_P_IP)) {
1298                         struct iphdr *iph = ip_hdr(skb);
1299
1300                         real_len = (((unsigned char *)iph - skb->data) +
1301                                 ntohs(iph->tot_len));
1302                         if (real_len < skb->len)
1303                                 pskb_trim(skb, real_len);
1304                         hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1305                         if (skb->len == hdr_len) {
1306                                 iph->check = 0;
1307                                 tcp_hdr(skb)->check =
1308                                         ~csum_tcpudp_magic(iph->saddr,
1309                                         iph->daddr, tcp_hdrlen(skb),
1310                                         IPPROTO_TCP, 0);
1311                                 ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
1312                                         TPD_IPHL_SHIFT;
1313                                 ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1314                                         TPD_TCPHDRLEN_MASK) <<
1315                                         TPD_TCPHDRLEN_SHIFT;
1316                                 ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT;
1317                                 ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT;
1318                                 return 1;
1319                         }
1320
1321                         iph->check = 0;
1322                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1323                                         iph->daddr, 0, IPPROTO_TCP, 0);
1324                         ip_off = (unsigned char *)iph -
1325                                 (unsigned char *) skb_network_header(skb);
1326                         if (ip_off == 8) /* 802.3-SNAP frame */
1327                                 ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT;
1328                         else if (ip_off != 0)
1329                                 return -2;
1330
1331                         ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
1332                                 TPD_IPHL_SHIFT;
1333                         ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1334                                 TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT;
1335                         ptpd->word3 |= (skb_shinfo(skb)->gso_size &
1336                                 TPD_MSS_MASK) << TPD_MSS_SHIFT;
1337                         ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
1338                         return 3;
1339                 }
1340         }
1341         return false;
1342 }
1343
1344 static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1345         struct tx_packet_desc *ptpd)
1346 {
1347         u8 css, cso;
1348
1349         if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1350                 cso = skb_transport_offset(skb);
1351                 css = cso + skb->csum_offset;
1352                 if (unlikely(cso & 0x1)) {
1353                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1354                                 "payload offset not an even number\n");
1355                         return -1;
1356                 }
1357                 ptpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) <<
1358                         TPD_PLOADOFFSET_SHIFT;
1359                 ptpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) <<
1360                         TPD_CCSUMOFFSET_SHIFT;
1361                 ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT;
1362                 return true;
1363         }
1364         return 0;
1365 }
1366
1367 static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
1368         struct tx_packet_desc *ptpd)
1369 {
1370         /* spinlock held */
1371         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1372         struct atl1_buffer *buffer_info;
1373         u16 buf_len = skb->len;
1374         struct page *page;
1375         unsigned long offset;
1376         unsigned int nr_frags;
1377         unsigned int f;
1378         int retval;
1379         u16 next_to_use;
1380         u16 data_len;
1381         u8 hdr_len;
1382
1383         buf_len -= skb->data_len;
1384         nr_frags = skb_shinfo(skb)->nr_frags;
1385         next_to_use = atomic_read(&tpd_ring->next_to_use);
1386         buffer_info = &tpd_ring->buffer_info[next_to_use];
1387         if (unlikely(buffer_info->skb))
1388                 BUG();
1389         /* put skb in last TPD */
1390         buffer_info->skb = NULL;
1391
1392         retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
1393         if (retval) {
1394                 /* TSO */
1395                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1396                 buffer_info->length = hdr_len;
1397                 page = virt_to_page(skb->data);
1398                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1399                 buffer_info->dma = pci_map_page(adapter->pdev, page,
1400                                                 offset, hdr_len,
1401                                                 PCI_DMA_TODEVICE);
1402
1403                 if (++next_to_use == tpd_ring->count)
1404                         next_to_use = 0;
1405
1406                 if (buf_len > hdr_len) {
1407                         int i, nseg;
1408
1409                         data_len = buf_len - hdr_len;
1410                         nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) /
1411                                 ATL1_MAX_TX_BUF_LEN;
1412                         for (i = 0; i < nseg; i++) {
1413                                 buffer_info =
1414                                     &tpd_ring->buffer_info[next_to_use];
1415                                 buffer_info->skb = NULL;
1416                                 buffer_info->length =
1417                                     (ATL1_MAX_TX_BUF_LEN >=
1418                                      data_len) ? ATL1_MAX_TX_BUF_LEN : data_len;
1419                                 data_len -= buffer_info->length;
1420                                 page = virt_to_page(skb->data +
1421                                         (hdr_len + i * ATL1_MAX_TX_BUF_LEN));
1422                                 offset = (unsigned long)(skb->data +
1423                                         (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) &
1424                                         ~PAGE_MASK;
1425                                 buffer_info->dma = pci_map_page(adapter->pdev,
1426                                         page, offset, buffer_info->length,
1427                                         PCI_DMA_TODEVICE);
1428                                 if (++next_to_use == tpd_ring->count)
1429                                         next_to_use = 0;
1430                         }
1431                 }
1432         } else {
1433                 /* not TSO */
1434                 buffer_info->length = buf_len;
1435                 page = virt_to_page(skb->data);
1436                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1437                 buffer_info->dma = pci_map_page(adapter->pdev, page,
1438                         offset, buf_len, PCI_DMA_TODEVICE);
1439                 if (++next_to_use == tpd_ring->count)
1440                         next_to_use = 0;
1441         }
1442
1443         for (f = 0; f < nr_frags; f++) {
1444                 struct skb_frag_struct *frag;
1445                 u16 i, nseg;
1446
1447                 frag = &skb_shinfo(skb)->frags[f];
1448                 buf_len = frag->size;
1449
1450                 nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
1451                         ATL1_MAX_TX_BUF_LEN;
1452                 for (i = 0; i < nseg; i++) {
1453                         buffer_info = &tpd_ring->buffer_info[next_to_use];
1454                         if (unlikely(buffer_info->skb))
1455                                 BUG();
1456                         buffer_info->skb = NULL;
1457                         buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
1458                                 ATL1_MAX_TX_BUF_LEN : buf_len;
1459                         buf_len -= buffer_info->length;
1460                         buffer_info->dma = pci_map_page(adapter->pdev,
1461                                 frag->page,
1462                                 frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
1463                                 buffer_info->length, PCI_DMA_TODEVICE);
1464
1465                         if (++next_to_use == tpd_ring->count)
1466                                 next_to_use = 0;
1467                 }
1468         }
1469
1470         /* last tpd's buffer-info */
1471         buffer_info->skb = skb;
1472 }
1473
1474 static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
1475        struct tx_packet_desc *ptpd)
1476 {
1477         /* spinlock held */
1478         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1479         struct atl1_buffer *buffer_info;
1480         struct tx_packet_desc *tpd;
1481         u16 j;
1482         u32 val;
1483         u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use);
1484
1485         for (j = 0; j < count; j++) {
1486                 buffer_info = &tpd_ring->buffer_info[next_to_use];
1487                 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use);
1488                 if (tpd != ptpd)
1489                         memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
1490                 tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1491                 tpd->word2 = (cpu_to_le16(buffer_info->length) &
1492                         TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT;
1493
1494                 /*
1495                  * if this is the first packet in a TSO chain, set
1496                  * TPD_HDRFLAG, otherwise, clear it.
1497                  */
1498                 val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) &
1499                         TPD_SEGMENT_EN_MASK;
1500                 if (val) {
1501                         if (!j)
1502                                 tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
1503                         else
1504                                 tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT);
1505                 }
1506
1507                 if (j == (count - 1))
1508                         tpd->word3 |= 1 << TPD_EOP_SHIFT;
1509
1510                 if (++next_to_use == tpd_ring->count)
1511                         next_to_use = 0;
1512         }
1513         /*
1514          * Force memory writes to complete before letting h/w
1515          * know there are new descriptors to fetch.  (Only
1516          * applicable for weak-ordered memory model archs,
1517          * such as IA-64).
1518          */
1519         wmb();
1520
1521         atomic_set(&tpd_ring->next_to_use, next_to_use);
1522 }
1523
1524 static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1525 {
1526         struct atl1_adapter *adapter = netdev_priv(netdev);
1527         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1528         int len = skb->len;
1529         int tso;
1530         int count = 1;
1531         int ret_val;
1532         struct tx_packet_desc *ptpd;
1533         u16 frag_size;
1534         u16 vlan_tag;
1535         unsigned long flags;
1536         unsigned int nr_frags = 0;
1537         unsigned int mss = 0;
1538         unsigned int f;
1539         unsigned int proto_hdr_len;
1540
1541         len -= skb->data_len;
1542
1543         if (unlikely(skb->len <= 0)) {
1544                 dev_kfree_skb_any(skb);
1545                 return NETDEV_TX_OK;
1546         }
1547
1548         nr_frags = skb_shinfo(skb)->nr_frags;
1549         for (f = 0; f < nr_frags; f++) {
1550                 frag_size = skb_shinfo(skb)->frags[f].size;
1551                 if (frag_size)
1552                         count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
1553                                 ATL1_MAX_TX_BUF_LEN;
1554         }
1555
1556         mss = skb_shinfo(skb)->gso_size;
1557         if (mss) {
1558                 if (skb->protocol == ntohs(ETH_P_IP)) {
1559                         proto_hdr_len = (skb_transport_offset(skb) +
1560                                          tcp_hdrlen(skb));
1561                         if (unlikely(proto_hdr_len > len)) {
1562                                 dev_kfree_skb_any(skb);
1563                                 return NETDEV_TX_OK;
1564                         }
1565                         /* need additional TPD ? */
1566                         if (proto_hdr_len != len)
1567                                 count += (len - proto_hdr_len +
1568                                         ATL1_MAX_TX_BUF_LEN - 1) /
1569                                         ATL1_MAX_TX_BUF_LEN;
1570                 }
1571         }
1572
1573         if (!spin_trylock_irqsave(&adapter->lock, flags)) {
1574                 /* Can't get lock - tell upper layer to requeue */
1575                 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n");
1576                 return NETDEV_TX_LOCKED;
1577         }
1578
1579         if (atl1_tpd_avail(&adapter->tpd_ring) < count) {
1580                 /* not enough descriptors */
1581                 netif_stop_queue(netdev);
1582                 spin_unlock_irqrestore(&adapter->lock, flags);
1583                 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n");
1584                 return NETDEV_TX_BUSY;
1585         }
1586
1587         ptpd = ATL1_TPD_DESC(tpd_ring,
1588                 (u16) atomic_read(&tpd_ring->next_to_use));
1589         memset(ptpd, 0, sizeof(struct tx_packet_desc));
1590
1591         if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1592                 vlan_tag = vlan_tx_tag_get(skb);
1593                 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
1594                         ((vlan_tag >> 9) & 0x8);
1595                 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
1596                 ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) <<
1597                         TPD_VL_TAGGED_SHIFT;
1598         }
1599
1600         tso = atl1_tso(adapter, skb, ptpd);
1601         if (tso < 0) {
1602                 spin_unlock_irqrestore(&adapter->lock, flags);
1603                 dev_kfree_skb_any(skb);
1604                 return NETDEV_TX_OK;
1605         }
1606
1607         if (!tso) {
1608                 ret_val = atl1_tx_csum(adapter, skb, ptpd);
1609                 if (ret_val < 0) {
1610                         spin_unlock_irqrestore(&adapter->lock, flags);
1611                         dev_kfree_skb_any(skb);
1612                         return NETDEV_TX_OK;
1613                 }
1614         }
1615
1616         atl1_tx_map(adapter, skb, ptpd);
1617         atl1_tx_queue(adapter, count, ptpd);
1618         atl1_update_mailbox(adapter);
1619         spin_unlock_irqrestore(&adapter->lock, flags);
1620         netdev->trans_start = jiffies;
1621         return NETDEV_TX_OK;
1622 }
1623
1624 /*
1625  * atl1_intr - Interrupt Handler
1626  * @irq: interrupt number
1627  * @data: pointer to a network interface device structure
1628  * @pt_regs: CPU registers structure
1629  */
1630 static irqreturn_t atl1_intr(int irq, void *data)
1631 {
1632         struct atl1_adapter *adapter = netdev_priv(data);
1633         u32 status;
1634         u8 update_rx;
1635         int max_ints = 10;
1636
1637         status = adapter->cmb.cmb->int_stats;
1638         if (!status)
1639                 return IRQ_NONE;
1640
1641         update_rx = 0;
1642
1643         do {
1644                 /* clear CMB interrupt status at once */
1645                 adapter->cmb.cmb->int_stats = 0;
1646
1647                 if (status & ISR_GPHY)  /* clear phy status */
1648                         atlx_clear_phy_int(adapter);
1649
1650                 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
1651                 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
1652
1653                 /* check if SMB intr */
1654                 if (status & ISR_SMB)
1655                         atl1_inc_smb(adapter);
1656
1657                 /* check if PCIE PHY Link down */
1658                 if (status & ISR_PHY_LINKDOWN) {
1659                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1660                                 "pcie phy link down %x\n", status);
1661                         if (netif_running(adapter->netdev)) {   /* reset MAC */
1662                                 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1663                                 schedule_work(&adapter->pcie_dma_to_rst_task);
1664                                 return IRQ_HANDLED;
1665                         }
1666                 }
1667
1668                 /* check if DMA read/write error ? */
1669                 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
1670                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1671                                 "pcie DMA r/w error (status = 0x%x)\n",
1672                                 status);
1673                         iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1674                         schedule_work(&adapter->pcie_dma_to_rst_task);
1675                         return IRQ_HANDLED;
1676                 }
1677
1678                 /* link event */
1679                 if (status & ISR_GPHY) {
1680                         adapter->soft_stats.tx_carrier_errors++;
1681                         atl1_check_for_link(adapter);
1682                 }
1683
1684                 /* transmit event */
1685                 if (status & ISR_CMB_TX)
1686                         atl1_intr_tx(adapter);
1687
1688                 /* rx exception */
1689                 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1690                         ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1691                         ISR_HOST_RRD_OV | ISR_CMB_RX))) {
1692                         if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1693                                 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1694                                 ISR_HOST_RRD_OV))
1695                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1696                                         "rx exception, ISR = 0x%x\n", status);
1697                         atl1_intr_rx(adapter);
1698                 }
1699
1700                 if (--max_ints < 0)
1701                         break;
1702
1703         } while ((status = adapter->cmb.cmb->int_stats));
1704
1705         /* re-enable Interrupt */
1706         iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
1707         return IRQ_HANDLED;
1708 }
1709
1710 /*
1711  * atl1_watchdog - Timer Call-back
1712  * @data: pointer to netdev cast into an unsigned long
1713  */
1714 static void atl1_watchdog(unsigned long data)
1715 {
1716         struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1717
1718         /* Reset the timer */
1719         mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1720 }
1721
1722 /*
1723  * atl1_phy_config - Timer Call-back
1724  * @data: pointer to netdev cast into an unsigned long
1725  */
1726 static void atl1_phy_config(unsigned long data)
1727 {
1728         struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1729         struct atl1_hw *hw = &adapter->hw;
1730         unsigned long flags;
1731
1732         spin_lock_irqsave(&adapter->lock, flags);
1733         adapter->phy_timer_pending = false;
1734         atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1735         atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg);
1736         atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
1737         spin_unlock_irqrestore(&adapter->lock, flags);
1738 }
1739
1740 /*
1741  * Orphaned vendor comment left intact here:
1742  * <vendor comment>
1743  * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
1744  * will assert. We do soft reset <0x1400=1> according
1745  * with the SPEC. BUT, it seemes that PCIE or DMA
1746  * state-machine will not be reset. DMAR_TO_INT will
1747  * assert again and again.
1748  * </vendor comment>
1749  */
1750 static void atl1_tx_timeout_task(struct work_struct *work)
1751 {
1752         struct atl1_adapter *adapter =
1753                 container_of(work, struct atl1_adapter, tx_timeout_task);
1754         struct net_device *netdev = adapter->netdev;
1755
1756         netif_device_detach(netdev);
1757         atl1_down(adapter);
1758         atl1_up(adapter);
1759         netif_device_attach(netdev);
1760 }
1761
1762 int atl1_reset(struct atl1_adapter *adapter)
1763 {
1764         int ret;
1765         ret = atl1_reset_hw(&adapter->hw);
1766         if (ret)
1767                 return ret;
1768         return atl1_init_hw(&adapter->hw);
1769 }
1770
1771 s32 atl1_up(struct atl1_adapter *adapter)
1772 {
1773         struct net_device *netdev = adapter->netdev;
1774         int err;
1775         int irq_flags = IRQF_SAMPLE_RANDOM;
1776
1777         /* hardware has been reset, we need to reload some things */
1778         atlx_set_multi(netdev);
1779         atl1_init_ring_ptrs(adapter);
1780         atlx_restore_vlan(adapter);
1781         err = atl1_alloc_rx_buffers(adapter);
1782         if (unlikely(!err))
1783                 /* no RX BUFFER allocated */
1784                 return -ENOMEM;
1785
1786         if (unlikely(atl1_configure(adapter))) {
1787                 err = -EIO;
1788                 goto err_up;
1789         }
1790
1791         err = pci_enable_msi(adapter->pdev);
1792         if (err) {
1793                 dev_info(&adapter->pdev->dev,
1794                         "Unable to enable MSI: %d\n", err);
1795                 irq_flags |= IRQF_SHARED;
1796         }
1797
1798         err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags,
1799                         netdev->name, netdev);
1800         if (unlikely(err))
1801                 goto err_up;
1802
1803         mod_timer(&adapter->watchdog_timer, jiffies);
1804         atlx_irq_enable(adapter);
1805         atl1_check_link(adapter);
1806         return 0;
1807
1808 err_up:
1809         pci_disable_msi(adapter->pdev);
1810         /* free rx_buffers */
1811         atl1_clean_rx_ring(adapter);
1812         return err;
1813 }
1814
1815 void atl1_down(struct atl1_adapter *adapter)
1816 {
1817         struct net_device *netdev = adapter->netdev;
1818
1819         del_timer_sync(&adapter->watchdog_timer);
1820         del_timer_sync(&adapter->phy_config_timer);
1821         adapter->phy_timer_pending = false;
1822
1823         atlx_irq_disable(adapter);
1824         free_irq(adapter->pdev->irq, netdev);
1825         pci_disable_msi(adapter->pdev);
1826         atl1_reset_hw(&adapter->hw);
1827         adapter->cmb.cmb->int_stats = 0;
1828
1829         adapter->link_speed = SPEED_0;
1830         adapter->link_duplex = -1;
1831         netif_carrier_off(netdev);
1832         netif_stop_queue(netdev);
1833
1834         atl1_clean_tx_ring(adapter);
1835         atl1_clean_rx_ring(adapter);
1836 }
1837
1838 /*
1839  * atl1_open - Called when a network interface is made active
1840  * @netdev: network interface device structure
1841  *
1842  * Returns 0 on success, negative value on failure
1843  *
1844  * The open entry point is called when a network interface is made
1845  * active by the system (IFF_UP).  At this point all resources needed
1846  * for transmit and receive operations are allocated, the interrupt
1847  * handler is registered with the OS, the watchdog timer is started,
1848  * and the stack is notified that the interface is ready.
1849  */
1850 static int atl1_open(struct net_device *netdev)
1851 {
1852         struct atl1_adapter *adapter = netdev_priv(netdev);
1853         int err;
1854
1855         /* allocate transmit descriptors */
1856         err = atl1_setup_ring_resources(adapter);
1857         if (err)
1858                 return err;
1859
1860         err = atl1_up(adapter);
1861         if (err)
1862                 goto err_up;
1863
1864         return 0;
1865
1866 err_up:
1867         atl1_reset(adapter);
1868         return err;
1869 }
1870
1871 /*
1872  * atl1_close - Disables a network interface
1873  * @netdev: network interface device structure
1874  *
1875  * Returns 0, this is not allowed to fail
1876  *
1877  * The close entry point is called when an interface is de-activated
1878  * by the OS.  The hardware is still under the drivers control, but
1879  * needs to be disabled.  A global MAC reset is issued to stop the
1880  * hardware, and all transmit and receive resources are freed.
1881  */
1882 static int atl1_close(struct net_device *netdev)
1883 {
1884         struct atl1_adapter *adapter = netdev_priv(netdev);
1885         atl1_down(adapter);
1886         atl1_free_ring_resources(adapter);
1887         return 0;
1888 }
1889
1890 #ifdef CONFIG_PM
1891 static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
1892 {
1893         struct net_device *netdev = pci_get_drvdata(pdev);
1894         struct atl1_adapter *adapter = netdev_priv(netdev);
1895         struct atl1_hw *hw = &adapter->hw;
1896         u32 ctrl = 0;
1897         u32 wufc = adapter->wol;
1898
1899         netif_device_detach(netdev);
1900         if (netif_running(netdev))
1901                 atl1_down(adapter);
1902
1903         atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1904         atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1905         if (ctrl & BMSR_LSTATUS)
1906                 wufc &= ~ATLX_WUFC_LNKC;
1907
1908         /* reduce speed to 10/100M */
1909         if (wufc) {
1910                 atl1_phy_enter_power_saving(hw);
1911                 /* if resume, let driver to re- setup link */
1912                 hw->phy_configured = false;
1913                 atl1_set_mac_addr(hw);
1914                 atlx_set_multi(netdev);
1915
1916                 ctrl = 0;
1917                 /* turn on magic packet wol */
1918                 if (wufc & ATLX_WUFC_MAG)
1919                         ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
1920
1921                 /* turn on Link change WOL */
1922                 if (wufc & ATLX_WUFC_LNKC)
1923                         ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
1924                 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
1925
1926                 /* turn on all-multi mode if wake on multicast is enabled */
1927                 ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
1928                 ctrl &= ~MAC_CTRL_DBG;
1929                 ctrl &= ~MAC_CTRL_PROMIS_EN;
1930                 if (wufc & ATLX_WUFC_MC)
1931                         ctrl |= MAC_CTRL_MC_ALL_EN;
1932                 else
1933                         ctrl &= ~MAC_CTRL_MC_ALL_EN;
1934
1935                 /* turn on broadcast mode if wake on-BC is enabled */
1936                 if (wufc & ATLX_WUFC_BC)
1937                         ctrl |= MAC_CTRL_BC_EN;
1938                 else
1939                         ctrl &= ~MAC_CTRL_BC_EN;
1940
1941                 /* enable RX */
1942                 ctrl |= MAC_CTRL_RX_EN;
1943                 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
1944                 pci_enable_wake(pdev, PCI_D3hot, 1);
1945                 pci_enable_wake(pdev, PCI_D3cold, 1);
1946         } else {
1947                 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
1948                 pci_enable_wake(pdev, PCI_D3hot, 0);
1949                 pci_enable_wake(pdev, PCI_D3cold, 0);
1950         }
1951
1952         pci_save_state(pdev);
1953         pci_disable_device(pdev);
1954
1955         pci_set_power_state(pdev, PCI_D3hot);
1956
1957         return 0;
1958 }
1959
1960 static int atl1_resume(struct pci_dev *pdev)
1961 {
1962         struct net_device *netdev = pci_get_drvdata(pdev);
1963         struct atl1_adapter *adapter = netdev_priv(netdev);
1964         u32 err;
1965
1966         pci_set_power_state(pdev, PCI_D0);
1967         pci_restore_state(pdev);
1968
1969         /* FIXME: check and handle */
1970         err = pci_enable_device(pdev);
1971         pci_enable_wake(pdev, PCI_D3hot, 0);
1972         pci_enable_wake(pdev, PCI_D3cold, 0);
1973
1974         iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
1975         atl1_reset(adapter);
1976
1977         if (netif_running(netdev))
1978                 atl1_up(adapter);
1979         netif_device_attach(netdev);
1980
1981         atl1_via_workaround(adapter);
1982
1983         return 0;
1984 }
1985 #else
1986 #define atl1_suspend NULL
1987 #define atl1_resume NULL
1988 #endif
1989
1990 #ifdef CONFIG_NET_POLL_CONTROLLER
1991 static void atl1_poll_controller(struct net_device *netdev)
1992 {
1993         disable_irq(netdev->irq);
1994         atl1_intr(netdev->irq, netdev);
1995         enable_irq(netdev->irq);
1996 }
1997 #endif
1998
1999 /*
2000  * atl1_probe - Device Initialization Routine
2001  * @pdev: PCI device information struct
2002  * @ent: entry in atl1_pci_tbl
2003  *
2004  * Returns 0 on success, negative on failure
2005  *
2006  * atl1_probe initializes an adapter identified by a pci_dev structure.
2007  * The OS initialization, configuring of the adapter private structure,
2008  * and a hardware reset occur.
2009  */
2010 static int __devinit atl1_probe(struct pci_dev *pdev,
2011         const struct pci_device_id *ent)
2012 {
2013         struct net_device *netdev;
2014         struct atl1_adapter *adapter;
2015         static int cards_found = 0;
2016         int err;
2017
2018         err = pci_enable_device(pdev);
2019         if (err)
2020                 return err;
2021
2022         /*
2023          * The atl1 chip can DMA to 64-bit addresses, but it uses a single
2024          * shared register for the high 32 bits, so only a single, aligned,
2025          * 4 GB physical address range can be used at a time.
2026          *
2027          * Supporting 64-bit DMA on this hardware is more trouble than it's
2028          * worth.  It is far easier to limit to 32-bit DMA than update
2029          * various kernel subsystems to support the mechanics required by a
2030          * fixed-high-32-bit system.
2031          */
2032         err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2033         if (err) {
2034                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2035                 goto err_dma;
2036         }
2037         /*
2038          * Mark all PCI regions associated with PCI device
2039          * pdev as being reserved by owner atl1_driver_name
2040          */
2041         err = pci_request_regions(pdev, ATLX_DRIVER_NAME);
2042         if (err)
2043                 goto err_request_regions;
2044
2045         /*
2046          * Enables bus-mastering on the device and calls
2047          * pcibios_set_master to do the needed arch specific settings
2048          */
2049         pci_set_master(pdev);
2050
2051         netdev = alloc_etherdev(sizeof(struct atl1_adapter));
2052         if (!netdev) {
2053                 err = -ENOMEM;
2054                 goto err_alloc_etherdev;
2055         }
2056         SET_NETDEV_DEV(netdev, &pdev->dev);
2057
2058         pci_set_drvdata(pdev, netdev);
2059         adapter = netdev_priv(netdev);
2060         adapter->netdev = netdev;
2061         adapter->pdev = pdev;
2062         adapter->hw.back = adapter;
2063
2064         adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
2065         if (!adapter->hw.hw_addr) {
2066                 err = -EIO;
2067                 goto err_pci_iomap;
2068         }
2069         /* get device revision number */
2070         adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
2071                 (REG_MASTER_CTRL + 2));
2072         dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
2073
2074         /* set default ring resource counts */
2075         adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
2076         adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
2077
2078         adapter->mii.dev = netdev;
2079         adapter->mii.mdio_read = mdio_read;
2080         adapter->mii.mdio_write = mdio_write;
2081         adapter->mii.phy_id_mask = 0x1f;
2082         adapter->mii.reg_num_mask = 0x1f;
2083
2084         netdev->open = &atl1_open;
2085         netdev->stop = &atl1_close;
2086         netdev->hard_start_xmit = &atl1_xmit_frame;
2087         netdev->get_stats = &atlx_get_stats;
2088         netdev->set_multicast_list = &atlx_set_multi;
2089         netdev->set_mac_address = &atl1_set_mac;
2090         netdev->change_mtu = &atl1_change_mtu;
2091         netdev->do_ioctl = &atlx_ioctl;
2092         netdev->tx_timeout = &atlx_tx_timeout;
2093         netdev->watchdog_timeo = 5 * HZ;
2094 #ifdef CONFIG_NET_POLL_CONTROLLER
2095         netdev->poll_controller = atl1_poll_controller;
2096 #endif
2097         netdev->vlan_rx_register = atlx_vlan_rx_register;
2098
2099         netdev->ethtool_ops = &atl1_ethtool_ops;
2100         adapter->bd_number = cards_found;
2101
2102         /* setup the private structure */
2103         err = atl1_sw_init(adapter);
2104         if (err)
2105                 goto err_common;
2106
2107         netdev->features = NETIF_F_HW_CSUM;
2108         netdev->features |= NETIF_F_SG;
2109         netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2110         netdev->features |= NETIF_F_TSO;
2111         netdev->features |= NETIF_F_LLTX;
2112
2113         /*
2114          * patch for some L1 of old version,
2115          * the final version of L1 may not need these
2116          * patches
2117          */
2118         /* atl1_pcie_patch(adapter); */
2119
2120         /* really reset GPHY core */
2121         iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2122
2123         /*
2124          * reset the controller to
2125          * put the device in a known good starting state
2126          */
2127         if (atl1_reset_hw(&adapter->hw)) {
2128                 err = -EIO;
2129                 goto err_common;
2130         }
2131
2132         /* copy the MAC address out of the EEPROM */
2133         atl1_read_mac_addr(&adapter->hw);
2134         memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2135
2136         if (!is_valid_ether_addr(netdev->dev_addr)) {
2137                 err = -EIO;
2138                 goto err_common;
2139         }
2140
2141         atl1_check_options(adapter);
2142
2143         /* pre-init the MAC, and setup link */
2144         err = atl1_init_hw(&adapter->hw);
2145         if (err) {
2146                 err = -EIO;
2147                 goto err_common;
2148         }
2149
2150         atl1_pcie_patch(adapter);
2151         /* assume we have no link for now */
2152         netif_carrier_off(netdev);
2153         netif_stop_queue(netdev);
2154
2155         init_timer(&adapter->watchdog_timer);
2156         adapter->watchdog_timer.function = &atl1_watchdog;
2157         adapter->watchdog_timer.data = (unsigned long)adapter;
2158
2159         init_timer(&adapter->phy_config_timer);
2160         adapter->phy_config_timer.function = &atl1_phy_config;
2161         adapter->phy_config_timer.data = (unsigned long)adapter;
2162         adapter->phy_timer_pending = false;
2163
2164         INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
2165
2166         INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
2167
2168         INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
2169
2170         err = register_netdev(netdev);
2171         if (err)
2172                 goto err_common;
2173
2174         cards_found++;
2175         atl1_via_workaround(adapter);
2176         return 0;
2177
2178 err_common:
2179         pci_iounmap(pdev, adapter->hw.hw_addr);
2180 err_pci_iomap:
2181         free_netdev(netdev);
2182 err_alloc_etherdev:
2183         pci_release_regions(pdev);
2184 err_dma:
2185 err_request_regions:
2186         pci_disable_device(pdev);
2187         return err;
2188 }
2189
2190 /*
2191  * atl1_remove - Device Removal Routine
2192  * @pdev: PCI device information struct
2193  *
2194  * atl1_remove is called by the PCI subsystem to alert the driver
2195  * that it should release a PCI device.  The could be caused by a
2196  * Hot-Plug event, or because the driver is going to be removed from
2197  * memory.
2198  */
2199 static void __devexit atl1_remove(struct pci_dev *pdev)
2200 {
2201         struct net_device *netdev = pci_get_drvdata(pdev);
2202         struct atl1_adapter *adapter;
2203         /* Device not available. Return. */
2204         if (!netdev)
2205                 return;
2206
2207         adapter = netdev_priv(netdev);
2208
2209         /*
2210          * Some atl1 boards lack persistent storage for their MAC, and get it
2211          * from the BIOS during POST.  If we've been messing with the MAC
2212          * address, we need to save the permanent one.
2213          */
2214         if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
2215                 memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
2216                         ETH_ALEN);
2217                 atl1_set_mac_addr(&adapter->hw);
2218         }
2219
2220         iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2221         unregister_netdev(netdev);
2222         pci_iounmap(pdev, adapter->hw.hw_addr);
2223         pci_release_regions(pdev);
2224         free_netdev(netdev);
2225         pci_disable_device(pdev);
2226 }
2227
2228 static struct pci_driver atl1_driver = {
2229         .name = ATLX_DRIVER_NAME,
2230         .id_table = atl1_pci_tbl,
2231         .probe = atl1_probe,
2232         .remove = __devexit_p(atl1_remove),
2233         .suspend = atl1_suspend,
2234         .resume = atl1_resume
2235 };
2236
2237 /*
2238  * atl1_exit_module - Driver Exit Cleanup Routine
2239  *
2240  * atl1_exit_module is called just before the driver is removed
2241  * from memory.
2242  */
2243 static void __exit atl1_exit_module(void)
2244 {
2245         pci_unregister_driver(&atl1_driver);
2246 }
2247
2248 /*
2249  * atl1_init_module - Driver Registration Routine
2250  *
2251  * atl1_init_module is the first routine called when the driver is
2252  * loaded. All it does is register with the PCI subsystem.
2253  */
2254 static int __init atl1_init_module(void)
2255 {
2256         return pci_register_driver(&atl1_driver);
2257 }
2258
2259 module_init(atl1_init_module);
2260 module_exit(atl1_exit_module);
2261
2262 struct atl1_stats {
2263         char stat_string[ETH_GSTRING_LEN];
2264         int sizeof_stat;
2265         int stat_offset;
2266 };
2267
2268 #define ATL1_STAT(m) \
2269         sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m)
2270
2271 static struct atl1_stats atl1_gstrings_stats[] = {
2272         {"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
2273         {"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
2274         {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
2275         {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
2276         {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
2277         {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
2278         {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
2279         {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
2280         {"multicast", ATL1_STAT(soft_stats.multicast)},
2281         {"collisions", ATL1_STAT(soft_stats.collisions)},
2282         {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
2283         {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2284         {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
2285         {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
2286         {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
2287         {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2288         {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
2289         {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
2290         {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
2291         {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
2292         {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
2293         {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
2294         {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
2295         {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
2296         {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
2297         {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
2298         {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
2299         {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
2300         {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
2301         {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
2302         {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
2303 };
2304
2305 static void atl1_get_ethtool_stats(struct net_device *netdev,
2306         struct ethtool_stats *stats, u64 *data)
2307 {
2308         struct atl1_adapter *adapter = netdev_priv(netdev);
2309         int i;
2310         char *p;
2311
2312         for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2313                 p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
2314                 data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
2315                         sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2316         }
2317
2318 }
2319
2320 static int atl1_get_sset_count(struct net_device *netdev, int sset)
2321 {
2322         switch (sset) {
2323         case ETH_SS_STATS:
2324                 return ARRAY_SIZE(atl1_gstrings_stats);
2325         default:
2326                 return -EOPNOTSUPP;
2327         }
2328 }
2329
2330 static int atl1_get_settings(struct net_device *netdev,
2331         struct ethtool_cmd *ecmd)
2332 {
2333         struct atl1_adapter *adapter = netdev_priv(netdev);
2334         struct atl1_hw *hw = &adapter->hw;
2335
2336         ecmd->supported = (SUPPORTED_10baseT_Half |
2337                            SUPPORTED_10baseT_Full |
2338                            SUPPORTED_100baseT_Half |
2339                            SUPPORTED_100baseT_Full |
2340                            SUPPORTED_1000baseT_Full |
2341                            SUPPORTED_Autoneg | SUPPORTED_TP);
2342         ecmd->advertising = ADVERTISED_TP;
2343         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2344             hw->media_type == MEDIA_TYPE_1000M_FULL) {
2345                 ecmd->advertising |= ADVERTISED_Autoneg;
2346                 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
2347                         ecmd->advertising |= ADVERTISED_Autoneg;
2348                         ecmd->advertising |=
2349                             (ADVERTISED_10baseT_Half |
2350                              ADVERTISED_10baseT_Full |
2351                              ADVERTISED_100baseT_Half |
2352                              ADVERTISED_100baseT_Full |
2353                              ADVERTISED_1000baseT_Full);
2354                 } else
2355                         ecmd->advertising |= (ADVERTISED_1000baseT_Full);
2356         }
2357         ecmd->port = PORT_TP;
2358         ecmd->phy_address = 0;
2359         ecmd->transceiver = XCVR_INTERNAL;
2360
2361         if (netif_carrier_ok(adapter->netdev)) {
2362                 u16 link_speed, link_duplex;
2363                 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
2364                 ecmd->speed = link_speed;
2365                 if (link_duplex == FULL_DUPLEX)
2366                         ecmd->duplex = DUPLEX_FULL;
2367                 else
2368                         ecmd->duplex = DUPLEX_HALF;
2369         } else {
2370                 ecmd->speed = -1;
2371                 ecmd->duplex = -1;
2372         }
2373         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2374             hw->media_type == MEDIA_TYPE_1000M_FULL)
2375                 ecmd->autoneg = AUTONEG_ENABLE;
2376         else
2377                 ecmd->autoneg = AUTONEG_DISABLE;
2378
2379         return 0;
2380 }
2381
2382 static int atl1_set_settings(struct net_device *netdev,
2383         struct ethtool_cmd *ecmd)
2384 {
2385         struct atl1_adapter *adapter = netdev_priv(netdev);
2386         struct atl1_hw *hw = &adapter->hw;
2387         u16 phy_data;
2388         int ret_val = 0;
2389         u16 old_media_type = hw->media_type;
2390
2391         if (netif_running(adapter->netdev)) {
2392                 dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n");
2393                 atl1_down(adapter);
2394         }
2395
2396         if (ecmd->autoneg == AUTONEG_ENABLE)
2397                 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
2398         else {
2399                 if (ecmd->speed == SPEED_1000) {
2400                         if (ecmd->duplex != DUPLEX_FULL) {
2401                                 dev_warn(&adapter->pdev->dev,
2402                                         "can't force to 1000M half duplex\n");
2403                                 ret_val = -EINVAL;
2404                                 goto exit_sset;
2405                         }
2406                         hw->media_type = MEDIA_TYPE_1000M_FULL;
2407                 } else if (ecmd->speed == SPEED_100) {
2408                         if (ecmd->duplex == DUPLEX_FULL)
2409                                 hw->media_type = MEDIA_TYPE_100M_FULL;
2410                         else
2411                                 hw->media_type = MEDIA_TYPE_100M_HALF;
2412                 } else {
2413                         if (ecmd->duplex == DUPLEX_FULL)
2414                                 hw->media_type = MEDIA_TYPE_10M_FULL;
2415                         else
2416                                 hw->media_type = MEDIA_TYPE_10M_HALF;
2417                 }
2418         }
2419         switch (hw->media_type) {
2420         case MEDIA_TYPE_AUTO_SENSOR:
2421                 ecmd->advertising =
2422                     ADVERTISED_10baseT_Half |
2423                     ADVERTISED_10baseT_Full |
2424                     ADVERTISED_100baseT_Half |
2425                     ADVERTISED_100baseT_Full |
2426                     ADVERTISED_1000baseT_Full |
2427                     ADVERTISED_Autoneg | ADVERTISED_TP;
2428                 break;
2429         case MEDIA_TYPE_1000M_FULL:
2430                 ecmd->advertising =
2431                     ADVERTISED_1000baseT_Full |
2432                     ADVERTISED_Autoneg | ADVERTISED_TP;
2433                 break;
2434         default:
2435                 ecmd->advertising = 0;
2436                 break;
2437         }
2438         if (atl1_phy_setup_autoneg_adv(hw)) {
2439                 ret_val = -EINVAL;
2440                 dev_warn(&adapter->pdev->dev,
2441                         "invalid ethtool speed/duplex setting\n");
2442                 goto exit_sset;
2443         }
2444         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2445             hw->media_type == MEDIA_TYPE_1000M_FULL)
2446                 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2447         else {
2448                 switch (hw->media_type) {
2449                 case MEDIA_TYPE_100M_FULL:
2450                         phy_data =
2451                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
2452                             MII_CR_RESET;
2453                         break;
2454                 case MEDIA_TYPE_100M_HALF:
2455                         phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2456                         break;
2457                 case MEDIA_TYPE_10M_FULL:
2458                         phy_data =
2459                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
2460                         break;
2461                 default:
2462                         /* MEDIA_TYPE_10M_HALF: */
2463                         phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2464                         break;
2465                 }
2466         }
2467         atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2468 exit_sset:
2469         if (ret_val)
2470                 hw->media_type = old_media_type;
2471
2472         if (netif_running(adapter->netdev)) {
2473                 dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n");
2474                 atl1_up(adapter);
2475         } else if (!ret_val) {
2476                 dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n");
2477                 atl1_reset(adapter);
2478         }
2479         return ret_val;
2480 }
2481
2482 static void atl1_get_drvinfo(struct net_device *netdev,
2483         struct ethtool_drvinfo *drvinfo)
2484 {
2485         struct atl1_adapter *adapter = netdev_priv(netdev);
2486
2487         strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
2488         strncpy(drvinfo->version, ATLX_DRIVER_VERSION,
2489                 sizeof(drvinfo->version));
2490         strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2491         strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
2492                 sizeof(drvinfo->bus_info));
2493         drvinfo->eedump_len = ATL1_EEDUMP_LEN;
2494 }
2495
2496 static void atl1_get_wol(struct net_device *netdev,
2497         struct ethtool_wolinfo *wol)
2498 {
2499         struct atl1_adapter *adapter = netdev_priv(netdev);
2500
2501         wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
2502         wol->wolopts = 0;
2503         if (adapter->wol & ATLX_WUFC_EX)
2504                 wol->wolopts |= WAKE_UCAST;
2505         if (adapter->wol & ATLX_WUFC_MC)
2506                 wol->wolopts |= WAKE_MCAST;
2507         if (adapter->wol & ATLX_WUFC_BC)
2508                 wol->wolopts |= WAKE_BCAST;
2509         if (adapter->wol & ATLX_WUFC_MAG)
2510                 wol->wolopts |= WAKE_MAGIC;
2511         return;
2512 }
2513
2514 static int atl1_set_wol(struct net_device *netdev,
2515         struct ethtool_wolinfo *wol)
2516 {
2517         struct atl1_adapter *adapter = netdev_priv(netdev);
2518
2519         if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2520                 return -EOPNOTSUPP;
2521         adapter->wol = 0;
2522         if (wol->wolopts & WAKE_UCAST)
2523                 adapter->wol |= ATLX_WUFC_EX;
2524         if (wol->wolopts & WAKE_MCAST)
2525                 adapter->wol |= ATLX_WUFC_MC;
2526         if (wol->wolopts & WAKE_BCAST)
2527                 adapter->wol |= ATLX_WUFC_BC;
2528         if (wol->wolopts & WAKE_MAGIC)
2529                 adapter->wol |= ATLX_WUFC_MAG;
2530         return 0;
2531 }
2532
2533 static int atl1_get_regs_len(struct net_device *netdev)
2534 {
2535         return ATL1_REG_COUNT * sizeof(u32);
2536 }
2537
2538 static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2539         void *p)
2540 {
2541         struct atl1_adapter *adapter = netdev_priv(netdev);
2542         struct atl1_hw *hw = &adapter->hw;
2543         unsigned int i;
2544         u32 *regbuf = p;
2545
2546         for (i = 0; i < ATL1_REG_COUNT; i++) {
2547                 /*
2548                  * This switch statement avoids reserved regions
2549                  * of register space.
2550                  */
2551                 switch (i) {
2552                 case 6 ... 9:
2553                 case 14:
2554                 case 29 ... 31:
2555                 case 34 ... 63:
2556                 case 75 ... 127:
2557                 case 136 ... 1023:
2558                 case 1027 ... 1087:
2559                 case 1091 ... 1151:
2560                 case 1194 ... 1195:
2561                 case 1200 ... 1201:
2562                 case 1206 ... 1213:
2563                 case 1216 ... 1279:
2564                 case 1290 ... 1311:
2565                 case 1323 ... 1343:
2566                 case 1358 ... 1359:
2567                 case 1368 ... 1375:
2568                 case 1378 ... 1383:
2569                 case 1388 ... 1391:
2570                 case 1393 ... 1395:
2571                 case 1402 ... 1403:
2572                 case 1410 ... 1471:
2573                 case 1522 ... 1535:
2574                         /* reserved region; don't read it */
2575                         regbuf[i] = 0;
2576                         break;
2577                 default:
2578                         /* unreserved region */
2579                         regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32)));
2580                 }
2581         }
2582 }
2583
2584 static void atl1_get_ringparam(struct net_device *netdev,
2585         struct ethtool_ringparam *ring)
2586 {
2587         struct atl1_adapter *adapter = netdev_priv(netdev);
2588         struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
2589         struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
2590
2591         ring->rx_max_pending = ATL1_MAX_RFD;
2592         ring->tx_max_pending = ATL1_MAX_TPD;
2593         ring->rx_mini_max_pending = 0;
2594         ring->rx_jumbo_max_pending = 0;
2595         ring->rx_pending = rxdr->count;
2596         ring->tx_pending = txdr->count;
2597         ring->rx_mini_pending = 0;
2598         ring->rx_jumbo_pending = 0;
2599 }
2600
2601 static int atl1_set_ringparam(struct net_device *netdev,
2602         struct ethtool_ringparam *ring)
2603 {
2604         struct atl1_adapter *adapter = netdev_priv(netdev);
2605         struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
2606         struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
2607         struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
2608
2609         struct atl1_tpd_ring tpd_old, tpd_new;
2610         struct atl1_rfd_ring rfd_old, rfd_new;
2611         struct atl1_rrd_ring rrd_old, rrd_new;
2612         struct atl1_ring_header rhdr_old, rhdr_new;
2613         int err;
2614
2615         tpd_old = adapter->tpd_ring;
2616         rfd_old = adapter->rfd_ring;
2617         rrd_old = adapter->rrd_ring;
2618         rhdr_old = adapter->ring_header;
2619
2620         if (netif_running(adapter->netdev))
2621                 atl1_down(adapter);
2622
2623         rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
2624         rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
2625                         rfdr->count;
2626         rfdr->count = (rfdr->count + 3) & ~3;
2627         rrdr->count = rfdr->count;
2628
2629         tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
2630         tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
2631                         tpdr->count;
2632         tpdr->count = (tpdr->count + 3) & ~3;
2633
2634         if (netif_running(adapter->netdev)) {
2635                 /* try to get new resources before deleting old */
2636                 err = atl1_setup_ring_resources(adapter);
2637                 if (err)
2638                         goto err_setup_ring;
2639
2640                 /*
2641                  * save the new, restore the old in order to free it,
2642                  * then restore the new back again
2643                  */
2644
2645                 rfd_new = adapter->rfd_ring;
2646                 rrd_new = adapter->rrd_ring;
2647                 tpd_new = adapter->tpd_ring;
2648                 rhdr_new = adapter->ring_header;
2649                 adapter->rfd_ring = rfd_old;
2650                 adapter->rrd_ring = rrd_old;
2651                 adapter->tpd_ring = tpd_old;
2652                 adapter->ring_header = rhdr_old;
2653                 atl1_free_ring_resources(adapter);
2654                 adapter->rfd_ring = rfd_new;
2655                 adapter->rrd_ring = rrd_new;
2656                 adapter->tpd_ring = tpd_new;
2657                 adapter->ring_header = rhdr_new;
2658
2659                 err = atl1_up(adapter);
2660                 if (err)
2661                         return err;
2662         }
2663         return 0;
2664
2665 err_setup_ring:
2666         adapter->rfd_ring = rfd_old;
2667         adapter->rrd_ring = rrd_old;
2668         adapter->tpd_ring = tpd_old;
2669         adapter->ring_header = rhdr_old;
2670         atl1_up(adapter);
2671         return err;
2672 }
2673
2674 static void atl1_get_pauseparam(struct net_device *netdev,
2675         struct ethtool_pauseparam *epause)
2676 {
2677         struct atl1_adapter *adapter = netdev_priv(netdev);
2678         struct atl1_hw *hw = &adapter->hw;
2679
2680         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2681             hw->media_type == MEDIA_TYPE_1000M_FULL) {
2682                 epause->autoneg = AUTONEG_ENABLE;
2683         } else {
2684                 epause->autoneg = AUTONEG_DISABLE;
2685         }
2686         epause->rx_pause = 1;
2687         epause->tx_pause = 1;
2688 }
2689
2690 static int atl1_set_pauseparam(struct net_device *netdev,
2691         struct ethtool_pauseparam *epause)
2692 {
2693         struct atl1_adapter *adapter = netdev_priv(netdev);
2694         struct atl1_hw *hw = &adapter->hw;
2695
2696         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2697             hw->media_type == MEDIA_TYPE_1000M_FULL) {
2698                 epause->autoneg = AUTONEG_ENABLE;
2699         } else {
2700                 epause->autoneg = AUTONEG_DISABLE;
2701         }
2702
2703         epause->rx_pause = 1;
2704         epause->tx_pause = 1;
2705
2706         return 0;
2707 }
2708
2709 /* FIXME: is this right? -- CHS */
2710 static u32 atl1_get_rx_csum(struct net_device *netdev)
2711 {
2712         return 1;
2713 }
2714
2715 static void atl1_get_strings(struct net_device *netdev, u32 stringset,
2716         u8 *data)
2717 {
2718         u8 *p = data;
2719         int i;
2720
2721         switch (stringset) {
2722         case ETH_SS_STATS:
2723                 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2724                         memcpy(p, atl1_gstrings_stats[i].stat_string,
2725                                 ETH_GSTRING_LEN);
2726                         p += ETH_GSTRING_LEN;
2727                 }
2728                 break;
2729         }
2730 }
2731
2732 static int atl1_nway_reset(struct net_device *netdev)
2733 {
2734         struct atl1_adapter *adapter = netdev_priv(netdev);
2735         struct atl1_hw *hw = &adapter->hw;
2736
2737         if (netif_running(netdev)) {
2738                 u16 phy_data;
2739                 atl1_down(adapter);
2740
2741                 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2742                         hw->media_type == MEDIA_TYPE_1000M_FULL) {
2743                         phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2744                 } else {
2745                         switch (hw->media_type) {
2746                         case MEDIA_TYPE_100M_FULL:
2747                                 phy_data = MII_CR_FULL_DUPLEX |
2748                                         MII_CR_SPEED_100 | MII_CR_RESET;
2749                                 break;
2750                         case MEDIA_TYPE_100M_HALF:
2751                                 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2752                                 break;
2753                         case MEDIA_TYPE_10M_FULL:
2754                                 phy_data = MII_CR_FULL_DUPLEX |
2755                                         MII_CR_SPEED_10 | MII_CR_RESET;
2756                                 break;
2757                         default:
2758                                 /* MEDIA_TYPE_10M_HALF */
2759                                 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2760                         }
2761                 }
2762                 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2763                 atl1_up(adapter);
2764         }
2765         return 0;
2766 }
2767
2768 const struct ethtool_ops atl1_ethtool_ops = {
2769         .get_settings           = atl1_get_settings,
2770         .set_settings           = atl1_set_settings,
2771         .get_drvinfo            = atl1_get_drvinfo,
2772         .get_wol                = atl1_get_wol,
2773         .set_wol                = atl1_set_wol,
2774         .get_regs_len           = atl1_get_regs_len,
2775         .get_regs               = atl1_get_regs,
2776         .get_ringparam          = atl1_get_ringparam,
2777         .set_ringparam          = atl1_set_ringparam,
2778         .get_pauseparam         = atl1_get_pauseparam,
2779         .set_pauseparam         = atl1_set_pauseparam,
2780         .get_rx_csum            = atl1_get_rx_csum,
2781         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
2782         .get_link               = ethtool_op_get_link,
2783         .set_sg                 = ethtool_op_set_sg,
2784         .get_strings            = atl1_get_strings,
2785         .nway_reset             = atl1_nway_reset,
2786         .get_ethtool_stats      = atl1_get_ethtool_stats,
2787         .get_sset_count         = atl1_get_sset_count,
2788         .set_tso                = ethtool_op_set_tso,
2789 };
2790
2791 /*
2792  * Reset the transmit and receive units; mask and clear all interrupts.
2793  * hw - Struct containing variables accessed by shared code
2794  * return : 0  or  idle status (if error)
2795  */
2796 s32 atl1_reset_hw(struct atl1_hw *hw)
2797 {
2798         struct pci_dev *pdev = hw->back->pdev;
2799         u32 icr;
2800         int i;
2801
2802         /*
2803          * Clear Interrupt mask to stop board from generating
2804          * interrupts & Clear any pending interrupt events
2805          */
2806         /*
2807          * iowrite32(0, hw->hw_addr + REG_IMR);
2808          * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
2809          */
2810
2811         /*
2812          * Issue Soft Reset to the MAC.  This will reset the chip's
2813          * transmit, receive, DMA.  It will not effect
2814          * the current PCI configuration.  The global reset bit is self-
2815          * clearing, and should clear within a microsecond.
2816          */
2817         iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
2818         ioread32(hw->hw_addr + REG_MASTER_CTRL);
2819
2820         iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
2821         ioread16(hw->hw_addr + REG_PHY_ENABLE);
2822
2823         /* delay about 1ms */
2824         msleep(1);
2825
2826         /* Wait at least 10ms for All module to be Idle */
2827         for (i = 0; i < 10; i++) {
2828                 icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
2829                 if (!icr)
2830                         break;
2831                 /* delay 1 ms */
2832                 msleep(1);
2833                 /* FIXME: still the right way to do this? */
2834                 cpu_relax();
2835         }
2836
2837         if (icr) {
2838                 dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
2839                 return icr;
2840         }
2841
2842         return 0;
2843 }
2844
2845 /* function about EEPROM
2846  *
2847  * check_eeprom_exist
2848  * return 0 if eeprom exist
2849  */
2850 static int atl1_check_eeprom_exist(struct atl1_hw *hw)
2851 {
2852         u32 value;
2853         value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2854         if (value & SPI_FLASH_CTRL_EN_VPD) {
2855                 value &= ~SPI_FLASH_CTRL_EN_VPD;
2856                 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2857         }
2858
2859         value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
2860         return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
2861 }
2862
2863 static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
2864 {
2865         int i;
2866         u32 control;
2867
2868         if (offset & 3)
2869                 /* address do not align */
2870                 return false;
2871
2872         iowrite32(0, hw->hw_addr + REG_VPD_DATA);
2873         control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
2874         iowrite32(control, hw->hw_addr + REG_VPD_CAP);
2875         ioread32(hw->hw_addr + REG_VPD_CAP);
2876
2877         for (i = 0; i < 10; i++) {
2878                 msleep(2);
2879                 control = ioread32(hw->hw_addr + REG_VPD_CAP);
2880                 if (control & VPD_CAP_VPD_FLAG)
2881                         break;
2882         }
2883         if (control & VPD_CAP_VPD_FLAG) {
2884                 *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
2885                 return true;
2886         }
2887         /* timeout */
2888         return false;
2889 }
2890
2891 /*
2892  * Reads the value from a PHY register
2893  * hw - Struct containing variables accessed by shared code
2894  * reg_addr - address of the PHY register to read
2895  */
2896 s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
2897 {
2898         u32 val;
2899         int i;
2900
2901         val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
2902                 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
2903                 MDIO_CLK_SEL_SHIFT;
2904         iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
2905         ioread32(hw->hw_addr + REG_MDIO_CTRL);
2906
2907         for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2908                 udelay(2);
2909                 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
2910                 if (!(val & (MDIO_START | MDIO_BUSY)))
2911                         break;
2912         }
2913         if (!(val & (MDIO_START | MDIO_BUSY))) {
2914                 *phy_data = (u16) val;
2915                 return 0;
2916         }
2917         return ATLX_ERR_PHY;
2918 }
2919
2920 #define CUSTOM_SPI_CS_SETUP     2
2921 #define CUSTOM_SPI_CLK_HI       2
2922 #define CUSTOM_SPI_CLK_LO       2
2923 #define CUSTOM_SPI_CS_HOLD      2
2924 #define CUSTOM_SPI_CS_HI        3
2925
2926 static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
2927 {
2928         int i;
2929         u32 value;
2930
2931         iowrite32(0, hw->hw_addr + REG_SPI_DATA);
2932         iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
2933
2934         value = SPI_FLASH_CTRL_WAIT_READY |
2935             (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
2936             SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
2937                                              SPI_FLASH_CTRL_CLK_HI_MASK) <<
2938             SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
2939                                            SPI_FLASH_CTRL_CLK_LO_MASK) <<
2940             SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
2941                                            SPI_FLASH_CTRL_CS_HOLD_MASK) <<
2942             SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
2943                                             SPI_FLASH_CTRL_CS_HI_MASK) <<
2944             SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
2945             SPI_FLASH_CTRL_INS_SHIFT;
2946
2947         iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2948
2949         value |= SPI_FLASH_CTRL_START;
2950         iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2951         ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2952
2953         for (i = 0; i < 10; i++) {
2954                 msleep(1);
2955                 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2956                 if (!(value & SPI_FLASH_CTRL_START))
2957                         break;
2958         }
2959
2960         if (value & SPI_FLASH_CTRL_START)
2961                 return false;
2962
2963         *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
2964
2965         return true;
2966 }
2967
2968 /*
2969  * get_permanent_address
2970  * return 0 if get valid mac address,
2971  */
2972 static int atl1_get_permanent_address(struct atl1_hw *hw)
2973 {
2974         u32 addr[2];
2975         u32 i, control;
2976         u16 reg;
2977         u8 eth_addr[ETH_ALEN];
2978         bool key_valid;
2979
2980         if (is_valid_ether_addr(hw->perm_mac_addr))
2981                 return 0;
2982
2983         /* init */
2984         addr[0] = addr[1] = 0;
2985
2986         if (!atl1_check_eeprom_exist(hw)) {
2987                 reg = 0;
2988                 key_valid = false;
2989                 /* Read out all EEPROM content */
2990                 i = 0;
2991                 while (1) {
2992                         if (atl1_read_eeprom(hw, i + 0x100, &control)) {
2993                                 if (key_valid) {
2994                                         if (reg == REG_MAC_STA_ADDR)
2995                                                 addr[0] = control;
2996                                         else if (reg == (REG_MAC_STA_ADDR + 4))
2997                                                 addr[1] = control;
2998                                         key_valid = false;
2999                                 } else if ((control & 0xff) == 0x5A) {
3000                                         key_valid = true;
3001                                         reg = (u16) (control >> 16);
3002                                 } else
3003                                         break;
3004                         } else
3005                                 /* read error */
3006                                 break;
3007                         i += 4;
3008                 }
3009
3010                 *(u32 *) &eth_addr[2] = swab32(addr[0]);
3011                 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
3012                 if (is_valid_ether_addr(eth_addr)) {
3013                         memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3014                         return 0;
3015                 }
3016                 return 1;
3017         }
3018
3019         /* see if SPI FLAGS exist ? */
3020         addr[0] = addr[1] = 0;
3021         reg = 0;
3022         key_valid = false;
3023         i = 0;
3024         while (1) {
3025                 if (atl1_spi_read(hw, i + 0x1f000, &control)) {
3026                         if (key_valid) {
3027                                 if (reg == REG_MAC_STA_ADDR)
3028                                         addr[0] = control;
3029                                 else if (reg == (REG_MAC_STA_ADDR + 4))
3030                                         addr[1] = control;
3031                                 key_valid = false;
3032                         } else if ((control & 0xff) == 0x5A) {
3033                                 key_valid = true;
3034                                 reg = (u16) (control >> 16);
3035                         } else
3036                                 /* data end */
3037                                 break;
3038                 } else
3039                         /* read error */
3040                         break;
3041                 i += 4;
3042         }
3043
3044         *(u32 *) &eth_addr[2] = swab32(addr[0]);
3045         *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
3046         if (is_valid_ether_addr(eth_addr)) {
3047                 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3048                 return 0;
3049         }
3050
3051         /*
3052          * On some motherboards, the MAC address is written by the
3053          * BIOS directly to the MAC register during POST, and is
3054          * not stored in eeprom.  If all else thus far has failed
3055          * to fetch the permanent MAC address, try reading it directly.
3056          */
3057         addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
3058         addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
3059         *(u32 *) &eth_addr[2] = swab32(addr[0]);
3060         *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
3061         if (is_valid_ether_addr(eth_addr)) {
3062                 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3063                 return 0;
3064         }
3065
3066         return 1;
3067 }
3068
3069 /*
3070  * Reads the adapter's MAC address from the EEPROM
3071  * hw - Struct containing variables accessed by shared code
3072  */
3073 s32 atl1_read_mac_addr(struct atl1_hw *hw)
3074 {
3075         u16 i;
3076
3077         if (atl1_get_permanent_address(hw))
3078                 random_ether_addr(hw->perm_mac_addr);
3079
3080         for (i = 0; i < ETH_ALEN; i++)
3081                 hw->mac_addr[i] = hw->perm_mac_addr[i];
3082         return 0;
3083 }
3084
3085 /*
3086  * Hashes an address to determine its location in the multicast table
3087  * hw - Struct containing variables accessed by shared code
3088  * mc_addr - the multicast address to hash
3089  *
3090  * atl1_hash_mc_addr
3091  *  purpose
3092  *      set hash value for a multicast address
3093  *      hash calcu processing :
3094  *          1. calcu 32bit CRC for multicast address
3095  *          2. reverse crc with MSB to LSB
3096  */
3097 u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
3098 {
3099         u32 crc32, value = 0;
3100         int i;
3101
3102         crc32 = ether_crc_le(6, mc_addr);
3103         for (i = 0; i < 32; i++)
3104                 value |= (((crc32 >> i) & 1) << (31 - i));
3105
3106         return value;
3107 }
3108
3109 /*
3110  * Sets the bit in the multicast table corresponding to the hash value.
3111  * hw - Struct containing variables accessed by shared code
3112  * hash_value - Multicast address hash value
3113  */
3114 void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
3115 {
3116         u32 hash_bit, hash_reg;
3117         u32 mta;
3118
3119         /*
3120          * The HASH Table  is a register array of 2 32-bit registers.
3121          * It is treated like an array of 64 bits.  We want to set
3122          * bit BitArray[hash_value]. So we figure out what register
3123          * the bit is in, read it, OR in the new bit, then write
3124          * back the new value.  The register is determined by the
3125          * upper 7 bits of the hash value and the bit within that
3126          * register are determined by the lower 5 bits of the value.
3127          */
3128         hash_reg = (hash_value >> 31) & 0x1;
3129         hash_bit = (hash_value >> 26) & 0x1F;
3130         mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3131         mta |= (1 << hash_bit);
3132         iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3133 }
3134
3135 /*
3136  * Writes a value to a PHY register
3137  * hw - Struct containing variables accessed by shared code
3138  * reg_addr - address of the PHY register to write
3139  * data - data to write to the PHY
3140  */
3141 s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
3142 {
3143         int i;
3144         u32 val;
3145
3146         val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
3147             (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
3148             MDIO_SUP_PREAMBLE |
3149             MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
3150         iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
3151         ioread32(hw->hw_addr + REG_MDIO_CTRL);
3152
3153         for (i = 0; i < MDIO_WAIT_TIMES; i++) {
3154                 udelay(2);
3155                 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3156                 if (!(val & (MDIO_START | MDIO_BUSY)))
3157                         break;
3158         }
3159
3160         if (!(val & (MDIO_START | MDIO_BUSY)))
3161                 return 0;
3162
3163         return ATLX_ERR_PHY;
3164 }
3165
3166 /*
3167  * Make L001's PHY out of Power Saving State (bug)
3168  * hw - Struct containing variables accessed by shared code
3169  * when power on, L001's PHY always on Power saving State
3170  * (Gigabit Link forbidden)
3171  */
3172 static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
3173 {
3174         s32 ret;
3175         ret = atl1_write_phy_reg(hw, 29, 0x0029);
3176         if (ret)
3177                 return ret;
3178         return atl1_write_phy_reg(hw, 30, 0);
3179 }
3180
3181 /*
3182  *TODO: do something or get rid of this
3183  */
3184 s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
3185 {
3186 /*    s32 ret_val;
3187  *    u16 phy_data;
3188  */
3189
3190 /*
3191     ret_val = atl1_write_phy_reg(hw, ...);
3192     ret_val = atl1_write_phy_reg(hw, ...);
3193     ....
3194 */
3195         return 0;
3196 }
3197
3198 /*
3199  * Resets the PHY and make all config validate
3200  * hw - Struct containing variables accessed by shared code
3201  *
3202  * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
3203  */
3204 static s32 atl1_phy_reset(struct atl1_hw *hw)
3205 {
3206         struct pci_dev *pdev = hw->back->pdev;
3207         s32 ret_val;
3208         u16 phy_data;
3209
3210         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3211             hw->media_type == MEDIA_TYPE_1000M_FULL)
3212                 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
3213         else {
3214                 switch (hw->media_type) {
3215                 case MEDIA_TYPE_100M_FULL:
3216                         phy_data =
3217                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
3218                             MII_CR_RESET;
3219                         break;
3220                 case MEDIA_TYPE_100M_HALF:
3221                         phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
3222                         break;
3223                 case MEDIA_TYPE_10M_FULL:
3224                         phy_data =
3225                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
3226                         break;
3227                 default:
3228                         /* MEDIA_TYPE_10M_HALF: */
3229                         phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
3230                         break;
3231                 }
3232         }
3233
3234         ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
3235         if (ret_val) {
3236                 u32 val;
3237                 int i;
3238                 /* pcie serdes link may be down! */
3239                 dev_dbg(&pdev->dev, "pcie phy link down\n");
3240
3241                 for (i = 0; i < 25; i++) {
3242                         msleep(1);
3243                         val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3244                         if (!(val & (MDIO_START | MDIO_BUSY)))
3245                                 break;
3246                 }
3247
3248                 if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
3249                         dev_warn(&pdev->dev, "pcie link down at least 25ms\n");
3250                         return ret_val;
3251                 }
3252         }
3253         return 0;
3254 }
3255
3256 /*
3257  * Configures PHY autoneg and flow control advertisement settings
3258  * hw - Struct containing variables accessed by shared code
3259  */
3260 s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
3261 {
3262         s32 ret_val;
3263         s16 mii_autoneg_adv_reg;
3264         s16 mii_1000t_ctrl_reg;
3265
3266         /* Read the MII Auto-Neg Advertisement Register (Address 4). */
3267         mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
3268
3269         /* Read the MII 1000Base-T Control Register (Address 9). */
3270         mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
3271
3272         /*
3273          * First we clear all the 10/100 mb speed bits in the Auto-Neg
3274          * Advertisement Register (Address 4) and the 1000 mb speed bits in
3275          * the  1000Base-T Control Register (Address 9).
3276          */
3277         mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
3278         mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
3279
3280         /*
3281          * Need to parse media_type  and set up
3282          * the appropriate PHY registers.
3283          */
3284         switch (hw->media_type) {
3285         case MEDIA_TYPE_AUTO_SENSOR:
3286                 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
3287                                         MII_AR_10T_FD_CAPS |
3288                                         MII_AR_100TX_HD_CAPS |
3289                                         MII_AR_100TX_FD_CAPS);
3290                 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3291                 break;
3292
3293         case MEDIA_TYPE_1000M_FULL:
3294                 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3295                 break;
3296
3297         case MEDIA_TYPE_100M_FULL:
3298                 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
3299                 break;
3300
3301         case MEDIA_TYPE_100M_HALF:
3302                 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
3303                 break;
3304
3305         case MEDIA_TYPE_10M_FULL:
3306                 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
3307                 break;
3308
3309         default:
3310                 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
3311                 break;
3312         }
3313
3314         /* flow control fixed to enable all */
3315         mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
3316
3317         hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
3318         hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
3319
3320         ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
3321         if (ret_val)
3322                 return ret_val;
3323
3324         ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
3325         if (ret_val)
3326                 return ret_val;
3327
3328         return 0;
3329 }
3330
3331 /*
3332  * Configures link settings.
3333  * hw - Struct containing variables accessed by shared code
3334  * Assumes the hardware has previously been reset and the
3335  * transmitter and receiver are not enabled.
3336  */
3337 static s32 atl1_setup_link(struct atl1_hw *hw)
3338 {
3339         struct pci_dev *pdev = hw->back->pdev;
3340         s32 ret_val;
3341
3342         /*
3343          * Options:
3344          *  PHY will advertise value(s) parsed from
3345          *  autoneg_advertised and fc
3346          *  no matter what autoneg is , We will not wait link result.
3347          */
3348         ret_val = atl1_phy_setup_autoneg_adv(hw);
3349         if (ret_val) {
3350                 dev_dbg(&pdev->dev, "error setting up autonegotiation\n");
3351                 return ret_val;
3352         }
3353         /* SW.Reset , En-Auto-Neg if needed */
3354         ret_val = atl1_phy_reset(hw);
3355         if (ret_val) {
3356                 dev_dbg(&pdev->dev, "error resetting phy\n");
3357                 return ret_val;
3358         }
3359         hw->phy_configured = true;
3360         return ret_val;
3361 }
3362
3363 static void atl1_init_flash_opcode(struct atl1_hw *hw)
3364 {
3365         if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
3366                 /* Atmel */
3367                 hw->flash_vendor = 0;
3368
3369         /* Init OP table */
3370         iowrite8(flash_table[hw->flash_vendor].cmd_program,
3371                 hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
3372         iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
3373                 hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
3374         iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
3375                 hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
3376         iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
3377                 hw->hw_addr + REG_SPI_FLASH_OP_RDID);
3378         iowrite8(flash_table[hw->flash_vendor].cmd_wren,
3379                 hw->hw_addr + REG_SPI_FLASH_OP_WREN);
3380         iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
3381                 hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
3382         iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
3383                 hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
3384         iowrite8(flash_table[hw->flash_vendor].cmd_read,
3385                 hw->hw_addr + REG_SPI_FLASH_OP_READ);
3386 }
3387
3388 /*
3389  * Performs basic configuration of the adapter.
3390  * hw - Struct containing variables accessed by shared code
3391  * Assumes that the controller has previously been reset and is in a
3392  * post-reset uninitialized state. Initializes multicast table,
3393  * and  Calls routines to setup link
3394  * Leaves the transmit and receive units disabled and uninitialized.
3395  */
3396 s32 atl1_init_hw(struct atl1_hw *hw)
3397 {
3398         u32 ret_val = 0;
3399
3400         /* Zero out the Multicast HASH table */
3401         iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
3402         /* clear the old settings from the multicast hash table */
3403         iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
3404
3405         atl1_init_flash_opcode(hw);
3406
3407         if (!hw->phy_configured) {
3408                 /* enable GPHY LinkChange Interrrupt */
3409                 ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
3410                 if (ret_val)
3411                         return ret_val;
3412                 /* make PHY out of power-saving state */
3413                 ret_val = atl1_phy_leave_power_saving(hw);
3414                 if (ret_val)
3415                         return ret_val;
3416                 /* Call a subroutine to configure the link */
3417                 ret_val = atl1_setup_link(hw);
3418         }
3419         return ret_val;
3420 }
3421
3422 /*
3423  * Detects the current speed and duplex settings of the hardware.
3424  * hw - Struct containing variables accessed by shared code
3425  * speed - Speed of the connection
3426  * duplex - Duplex setting of the connection
3427  */
3428 s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
3429 {
3430         struct pci_dev *pdev = hw->back->pdev;
3431         s32 ret_val;
3432         u16 phy_data;
3433
3434         /* ; --- Read   PHY Specific Status Register (17) */
3435         ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
3436         if (ret_val)
3437                 return ret_val;
3438
3439         if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
3440                 return ATLX_ERR_PHY_RES;
3441
3442         switch (phy_data & MII_ATLX_PSSR_SPEED) {
3443         case MII_ATLX_PSSR_1000MBS:
3444                 *speed = SPEED_1000;
3445                 break;
3446         case MII_ATLX_PSSR_100MBS:
3447                 *speed = SPEED_100;
3448                 break;
3449         case MII_ATLX_PSSR_10MBS:
3450                 *speed = SPEED_10;
3451                 break;
3452         default:
3453                 dev_dbg(&pdev->dev, "error getting speed\n");
3454                 return ATLX_ERR_PHY_SPEED;
3455                 break;
3456         }
3457         if (phy_data & MII_ATLX_PSSR_DPLX)
3458                 *duplex = FULL_DUPLEX;
3459         else
3460                 *duplex = HALF_DUPLEX;
3461
3462         return 0;
3463 }
3464
3465 void atl1_set_mac_addr(struct atl1_hw *hw)
3466 {
3467         u32 value;
3468         /*
3469          * 00-0B-6A-F6-00-DC
3470          * 0:  6AF600DC   1: 000B
3471          * low dword
3472          */
3473         value = (((u32) hw->mac_addr[2]) << 24) |
3474             (((u32) hw->mac_addr[3]) << 16) |
3475             (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
3476         iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
3477         /* high dword */
3478         value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
3479         iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
3480 }