]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/atlx/atl1.c
atl1: add ethtool register dump
[linux-2.6-omap-h63xx.git] / drivers / net / atlx / atl1.c
1 /*
2  * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3  * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
4  * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5  *
6  * Derived from Intel e1000 driver
7  * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along with
20  * this program; if not, write to the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
22  *
23  * The full GNU General Public License is included in this distribution in the
24  * file called COPYING.
25  *
26  * Contact Information:
27  * Xiong Huang <xiong_huang@attansic.com>
28  * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei,
29  * Xinzhu  302, TAIWAN, REPUBLIC OF CHINA
30  *
31  * Chris Snook <csnook@redhat.com>
32  * Jay Cliburn <jcliburn@gmail.com>
33  *
34  * This version is adapted from the Attansic reference driver for
35  * inclusion in the Linux kernel.  It is currently under heavy development.
36  * A very incomplete list of things that need to be dealt with:
37  *
38  * TODO:
39  * Wake on LAN.
40  * Add more ethtool functions.
41  * Fix abstruse irq enable/disable condition described here:
42  *      http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
43  *
44  * NEEDS TESTING:
45  * VLAN
46  * multicast
47  * promiscuous mode
48  * interrupt coalescing
49  * SMP torture testing
50  */
51
52 #include <asm/atomic.h>
53 #include <asm/byteorder.h>
54
55 #include <linux/compiler.h>
56 #include <linux/crc32.h>
57 #include <linux/delay.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/etherdevice.h>
60 #include <linux/hardirq.h>
61 #include <linux/if_ether.h>
62 #include <linux/if_vlan.h>
63 #include <linux/in.h>
64 #include <linux/interrupt.h>
65 #include <linux/ip.h>
66 #include <linux/irqflags.h>
67 #include <linux/irqreturn.h>
68 #include <linux/jiffies.h>
69 #include <linux/mii.h>
70 #include <linux/module.h>
71 #include <linux/moduleparam.h>
72 #include <linux/net.h>
73 #include <linux/netdevice.h>
74 #include <linux/pci.h>
75 #include <linux/pci_ids.h>
76 #include <linux/pm.h>
77 #include <linux/skbuff.h>
78 #include <linux/slab.h>
79 #include <linux/spinlock.h>
80 #include <linux/string.h>
81 #include <linux/tcp.h>
82 #include <linux/timer.h>
83 #include <linux/types.h>
84 #include <linux/workqueue.h>
85
86 #include <net/checksum.h>
87
88 #include "atl1.h"
89
90 /* Temporary hack for merging atl1 and atl2 */
91 #include "atlx.c"
92
93 /*
94  * atl1_pci_tbl - PCI Device ID Table
95  */
96 static const struct pci_device_id atl1_pci_tbl[] = {
97         {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
98         /* required last entry */
99         {0,}
100 };
101 MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
102
103 /*
104  * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
105  * @adapter: board private structure to initialize
106  *
107  * atl1_sw_init initializes the Adapter private data structure.
108  * Fields are initialized based on PCI device information and
109  * OS network device settings (MTU size).
110  */
111 static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
112 {
113         struct atl1_hw *hw = &adapter->hw;
114         struct net_device *netdev = adapter->netdev;
115
116         hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
117         hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
118
119         adapter->wol = 0;
120         adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
121         adapter->ict = 50000;           /* 100ms */
122         adapter->link_speed = SPEED_0;  /* hardware init */
123         adapter->link_duplex = FULL_DUPLEX;
124
125         hw->phy_configured = false;
126         hw->preamble_len = 7;
127         hw->ipgt = 0x60;
128         hw->min_ifg = 0x50;
129         hw->ipgr1 = 0x40;
130         hw->ipgr2 = 0x60;
131         hw->max_retry = 0xf;
132         hw->lcol = 0x37;
133         hw->jam_ipg = 7;
134         hw->rfd_burst = 8;
135         hw->rrd_burst = 8;
136         hw->rfd_fetch_gap = 1;
137         hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
138         hw->rx_jumbo_lkah = 1;
139         hw->rrd_ret_timer = 16;
140         hw->tpd_burst = 4;
141         hw->tpd_fetch_th = 16;
142         hw->txf_burst = 0x100;
143         hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
144         hw->tpd_fetch_gap = 1;
145         hw->rcb_value = atl1_rcb_64;
146         hw->dma_ord = atl1_dma_ord_enh;
147         hw->dmar_block = atl1_dma_req_256;
148         hw->dmaw_block = atl1_dma_req_256;
149         hw->cmb_rrd = 4;
150         hw->cmb_tpd = 4;
151         hw->cmb_rx_timer = 1;   /* about 2us */
152         hw->cmb_tx_timer = 1;   /* about 2us */
153         hw->smb_timer = 100000; /* about 200ms */
154
155         spin_lock_init(&adapter->lock);
156         spin_lock_init(&adapter->mb_lock);
157
158         return 0;
159 }
160
161 static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
162 {
163         struct atl1_adapter *adapter = netdev_priv(netdev);
164         u16 result;
165
166         atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
167
168         return result;
169 }
170
171 static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
172         int val)
173 {
174         struct atl1_adapter *adapter = netdev_priv(netdev);
175
176         atl1_write_phy_reg(&adapter->hw, reg_num, val);
177 }
178
179 /*
180  * atl1_mii_ioctl -
181  * @netdev:
182  * @ifreq:
183  * @cmd:
184  */
185 static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
186 {
187         struct atl1_adapter *adapter = netdev_priv(netdev);
188         unsigned long flags;
189         int retval;
190
191         if (!netif_running(netdev))
192                 return -EINVAL;
193
194         spin_lock_irqsave(&adapter->lock, flags);
195         retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
196         spin_unlock_irqrestore(&adapter->lock, flags);
197
198         return retval;
199 }
200
201 /*
202  * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
203  * @adapter: board private structure
204  *
205  * Return 0 on success, negative on failure
206  */
207 s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
208 {
209         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
210         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
211         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
212         struct atl1_ring_header *ring_header = &adapter->ring_header;
213         struct pci_dev *pdev = adapter->pdev;
214         int size;
215         u8 offset = 0;
216
217         size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
218         tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
219         if (unlikely(!tpd_ring->buffer_info)) {
220                 dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size);
221                 goto err_nomem;
222         }
223         rfd_ring->buffer_info =
224                 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
225
226         /*
227          * real ring DMA buffer
228          * each ring/block may need up to 8 bytes for alignment, hence the
229          * additional 40 bytes tacked onto the end.
230          */
231         ring_header->size = size =
232                 sizeof(struct tx_packet_desc) * tpd_ring->count
233                 + sizeof(struct rx_free_desc) * rfd_ring->count
234                 + sizeof(struct rx_return_desc) * rrd_ring->count
235                 + sizeof(struct coals_msg_block)
236                 + sizeof(struct stats_msg_block)
237                 + 40;
238
239         ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
240                 &ring_header->dma);
241         if (unlikely(!ring_header->desc)) {
242                 dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
243                 goto err_nomem;
244         }
245
246         memset(ring_header->desc, 0, ring_header->size);
247
248         /* init TPD ring */
249         tpd_ring->dma = ring_header->dma;
250         offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
251         tpd_ring->dma += offset;
252         tpd_ring->desc = (u8 *) ring_header->desc + offset;
253         tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
254
255         /* init RFD ring */
256         rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
257         offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
258         rfd_ring->dma += offset;
259         rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
260         rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
261
262
263         /* init RRD ring */
264         rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
265         offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
266         rrd_ring->dma += offset;
267         rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
268         rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
269
270
271         /* init CMB */
272         adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
273         offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
274         adapter->cmb.dma += offset;
275         adapter->cmb.cmb = (struct coals_msg_block *)
276                 ((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
277
278         /* init SMB */
279         adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
280         offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
281         adapter->smb.dma += offset;
282         adapter->smb.smb = (struct stats_msg_block *)
283                 ((u8 *) adapter->cmb.cmb +
284                 (sizeof(struct coals_msg_block) + offset));
285
286         return 0;
287
288 err_nomem:
289         kfree(tpd_ring->buffer_info);
290         return -ENOMEM;
291 }
292
293 static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
294 {
295         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
296         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
297         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
298
299         atomic_set(&tpd_ring->next_to_use, 0);
300         atomic_set(&tpd_ring->next_to_clean, 0);
301
302         rfd_ring->next_to_clean = 0;
303         atomic_set(&rfd_ring->next_to_use, 0);
304
305         rrd_ring->next_to_use = 0;
306         atomic_set(&rrd_ring->next_to_clean, 0);
307 }
308
309 /*
310  * atl1_clean_rx_ring - Free RFD Buffers
311  * @adapter: board private structure
312  */
313 static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
314 {
315         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
316         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
317         struct atl1_buffer *buffer_info;
318         struct pci_dev *pdev = adapter->pdev;
319         unsigned long size;
320         unsigned int i;
321
322         /* Free all the Rx ring sk_buffs */
323         for (i = 0; i < rfd_ring->count; i++) {
324                 buffer_info = &rfd_ring->buffer_info[i];
325                 if (buffer_info->dma) {
326                         pci_unmap_page(pdev, buffer_info->dma,
327                                 buffer_info->length, PCI_DMA_FROMDEVICE);
328                         buffer_info->dma = 0;
329                 }
330                 if (buffer_info->skb) {
331                         dev_kfree_skb(buffer_info->skb);
332                         buffer_info->skb = NULL;
333                 }
334         }
335
336         size = sizeof(struct atl1_buffer) * rfd_ring->count;
337         memset(rfd_ring->buffer_info, 0, size);
338
339         /* Zero out the descriptor ring */
340         memset(rfd_ring->desc, 0, rfd_ring->size);
341
342         rfd_ring->next_to_clean = 0;
343         atomic_set(&rfd_ring->next_to_use, 0);
344
345         rrd_ring->next_to_use = 0;
346         atomic_set(&rrd_ring->next_to_clean, 0);
347 }
348
349 /*
350  * atl1_clean_tx_ring - Free Tx Buffers
351  * @adapter: board private structure
352  */
353 static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
354 {
355         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
356         struct atl1_buffer *buffer_info;
357         struct pci_dev *pdev = adapter->pdev;
358         unsigned long size;
359         unsigned int i;
360
361         /* Free all the Tx ring sk_buffs */
362         for (i = 0; i < tpd_ring->count; i++) {
363                 buffer_info = &tpd_ring->buffer_info[i];
364                 if (buffer_info->dma) {
365                         pci_unmap_page(pdev, buffer_info->dma,
366                                 buffer_info->length, PCI_DMA_TODEVICE);
367                         buffer_info->dma = 0;
368                 }
369         }
370
371         for (i = 0; i < tpd_ring->count; i++) {
372                 buffer_info = &tpd_ring->buffer_info[i];
373                 if (buffer_info->skb) {
374                         dev_kfree_skb_any(buffer_info->skb);
375                         buffer_info->skb = NULL;
376                 }
377         }
378
379         size = sizeof(struct atl1_buffer) * tpd_ring->count;
380         memset(tpd_ring->buffer_info, 0, size);
381
382         /* Zero out the descriptor ring */
383         memset(tpd_ring->desc, 0, tpd_ring->size);
384
385         atomic_set(&tpd_ring->next_to_use, 0);
386         atomic_set(&tpd_ring->next_to_clean, 0);
387 }
388
389 /*
390  * atl1_free_ring_resources - Free Tx / RX descriptor Resources
391  * @adapter: board private structure
392  *
393  * Free all transmit software resources
394  */
395 void atl1_free_ring_resources(struct atl1_adapter *adapter)
396 {
397         struct pci_dev *pdev = adapter->pdev;
398         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
399         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
400         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
401         struct atl1_ring_header *ring_header = &adapter->ring_header;
402
403         atl1_clean_tx_ring(adapter);
404         atl1_clean_rx_ring(adapter);
405
406         kfree(tpd_ring->buffer_info);
407         pci_free_consistent(pdev, ring_header->size, ring_header->desc,
408                 ring_header->dma);
409
410         tpd_ring->buffer_info = NULL;
411         tpd_ring->desc = NULL;
412         tpd_ring->dma = 0;
413
414         rfd_ring->buffer_info = NULL;
415         rfd_ring->desc = NULL;
416         rfd_ring->dma = 0;
417
418         rrd_ring->desc = NULL;
419         rrd_ring->dma = 0;
420 }
421
422 static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
423 {
424         u32 value;
425         struct atl1_hw *hw = &adapter->hw;
426         struct net_device *netdev = adapter->netdev;
427         /* Config MAC CTRL Register */
428         value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
429         /* duplex */
430         if (FULL_DUPLEX == adapter->link_duplex)
431                 value |= MAC_CTRL_DUPLX;
432         /* speed */
433         value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
434                          MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
435                   MAC_CTRL_SPEED_SHIFT);
436         /* flow control */
437         value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
438         /* PAD & CRC */
439         value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
440         /* preamble length */
441         value |= (((u32) adapter->hw.preamble_len
442                    & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
443         /* vlan */
444         if (adapter->vlgrp)
445                 value |= MAC_CTRL_RMV_VLAN;
446         /* rx checksum
447            if (adapter->rx_csum)
448            value |= MAC_CTRL_RX_CHKSUM_EN;
449          */
450         /* filter mode */
451         value |= MAC_CTRL_BC_EN;
452         if (netdev->flags & IFF_PROMISC)
453                 value |= MAC_CTRL_PROMIS_EN;
454         else if (netdev->flags & IFF_ALLMULTI)
455                 value |= MAC_CTRL_MC_ALL_EN;
456         /* value |= MAC_CTRL_LOOPBACK; */
457         iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
458 }
459
460 static u32 atl1_check_link(struct atl1_adapter *adapter)
461 {
462         struct atl1_hw *hw = &adapter->hw;
463         struct net_device *netdev = adapter->netdev;
464         u32 ret_val;
465         u16 speed, duplex, phy_data;
466         int reconfig = 0;
467
468         /* MII_BMSR must read twice */
469         atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
470         atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
471         if (!(phy_data & BMSR_LSTATUS)) {
472                 /* link down */
473                 if (netif_carrier_ok(netdev)) {
474                         /* old link state: Up */
475                         dev_info(&adapter->pdev->dev, "link is down\n");
476                         adapter->link_speed = SPEED_0;
477                         netif_carrier_off(netdev);
478                         netif_stop_queue(netdev);
479                 }
480                 return 0;
481         }
482
483         /* Link Up */
484         ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
485         if (ret_val)
486                 return ret_val;
487
488         switch (hw->media_type) {
489         case MEDIA_TYPE_1000M_FULL:
490                 if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
491                         reconfig = 1;
492                 break;
493         case MEDIA_TYPE_100M_FULL:
494                 if (speed != SPEED_100 || duplex != FULL_DUPLEX)
495                         reconfig = 1;
496                 break;
497         case MEDIA_TYPE_100M_HALF:
498                 if (speed != SPEED_100 || duplex != HALF_DUPLEX)
499                         reconfig = 1;
500                 break;
501         case MEDIA_TYPE_10M_FULL:
502                 if (speed != SPEED_10 || duplex != FULL_DUPLEX)
503                         reconfig = 1;
504                 break;
505         case MEDIA_TYPE_10M_HALF:
506                 if (speed != SPEED_10 || duplex != HALF_DUPLEX)
507                         reconfig = 1;
508                 break;
509         }
510
511         /* link result is our setting */
512         if (!reconfig) {
513                 if (adapter->link_speed != speed
514                     || adapter->link_duplex != duplex) {
515                         adapter->link_speed = speed;
516                         adapter->link_duplex = duplex;
517                         atl1_setup_mac_ctrl(adapter);
518                         dev_info(&adapter->pdev->dev,
519                                 "%s link is up %d Mbps %s\n",
520                                 netdev->name, adapter->link_speed,
521                                 adapter->link_duplex == FULL_DUPLEX ?
522                                 "full duplex" : "half duplex");
523                 }
524                 if (!netif_carrier_ok(netdev)) {
525                         /* Link down -> Up */
526                         netif_carrier_on(netdev);
527                         netif_wake_queue(netdev);
528                 }
529                 return 0;
530         }
531
532         /* change original link status */
533         if (netif_carrier_ok(netdev)) {
534                 adapter->link_speed = SPEED_0;
535                 netif_carrier_off(netdev);
536                 netif_stop_queue(netdev);
537         }
538
539         if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
540             hw->media_type != MEDIA_TYPE_1000M_FULL) {
541                 switch (hw->media_type) {
542                 case MEDIA_TYPE_100M_FULL:
543                         phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
544                                    MII_CR_RESET;
545                         break;
546                 case MEDIA_TYPE_100M_HALF:
547                         phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
548                         break;
549                 case MEDIA_TYPE_10M_FULL:
550                         phy_data =
551                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
552                         break;
553                 default:
554                         /* MEDIA_TYPE_10M_HALF: */
555                         phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
556                         break;
557                 }
558                 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
559                 return 0;
560         }
561
562         /* auto-neg, insert timer to re-config phy */
563         if (!adapter->phy_timer_pending) {
564                 adapter->phy_timer_pending = true;
565                 mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
566         }
567
568         return 0;
569 }
570
571 /*
572  * atl1_change_mtu - Change the Maximum Transfer Unit
573  * @netdev: network interface device structure
574  * @new_mtu: new value for maximum frame size
575  *
576  * Returns 0 on success, negative on failure
577  */
578 static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
579 {
580         struct atl1_adapter *adapter = netdev_priv(netdev);
581         int old_mtu = netdev->mtu;
582         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
583
584         if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
585             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
586                 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
587                 return -EINVAL;
588         }
589
590         adapter->hw.max_frame_size = max_frame;
591         adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
592         adapter->rx_buffer_len = (max_frame + 7) & ~7;
593         adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
594
595         netdev->mtu = new_mtu;
596         if ((old_mtu != new_mtu) && netif_running(netdev)) {
597                 atl1_down(adapter);
598                 atl1_up(adapter);
599         }
600
601         return 0;
602 }
603
604 static void set_flow_ctrl_old(struct atl1_adapter *adapter)
605 {
606         u32 hi, lo, value;
607
608         /* RFD Flow Control */
609         value = adapter->rfd_ring.count;
610         hi = value / 16;
611         if (hi < 2)
612                 hi = 2;
613         lo = value * 7 / 8;
614
615         value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
616                 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
617         iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
618
619         /* RRD Flow Control */
620         value = adapter->rrd_ring.count;
621         lo = value / 16;
622         hi = value * 7 / 8;
623         if (lo < 2)
624                 lo = 2;
625         value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
626                 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
627         iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
628 }
629
630 static void set_flow_ctrl_new(struct atl1_hw *hw)
631 {
632         u32 hi, lo, value;
633
634         /* RXF Flow Control */
635         value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
636         lo = value / 16;
637         if (lo < 192)
638                 lo = 192;
639         hi = value * 7 / 8;
640         if (hi < lo)
641                 hi = lo + 16;
642         value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
643                 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
644         iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
645
646         /* RRD Flow Control */
647         value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
648         lo = value / 8;
649         hi = value * 7 / 8;
650         if (lo < 2)
651                 lo = 2;
652         if (hi < lo)
653                 hi = lo + 3;
654         value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
655                 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
656         iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
657 }
658
659 /*
660  * atl1_configure - Configure Transmit&Receive Unit after Reset
661  * @adapter: board private structure
662  *
663  * Configure the Tx /Rx unit of the MAC after a reset.
664  */
665 static u32 atl1_configure(struct atl1_adapter *adapter)
666 {
667         struct atl1_hw *hw = &adapter->hw;
668         u32 value;
669
670         /* clear interrupt status */
671         iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
672
673         /* set MAC Address */
674         value = (((u32) hw->mac_addr[2]) << 24) |
675                 (((u32) hw->mac_addr[3]) << 16) |
676                 (((u32) hw->mac_addr[4]) << 8) |
677                 (((u32) hw->mac_addr[5]));
678         iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
679         value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
680         iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
681
682         /* tx / rx ring */
683
684         /* HI base address */
685         iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
686                 hw->hw_addr + REG_DESC_BASE_ADDR_HI);
687         /* LO base address */
688         iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
689                 hw->hw_addr + REG_DESC_RFD_ADDR_LO);
690         iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
691                 hw->hw_addr + REG_DESC_RRD_ADDR_LO);
692         iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
693                 hw->hw_addr + REG_DESC_TPD_ADDR_LO);
694         iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
695                 hw->hw_addr + REG_DESC_CMB_ADDR_LO);
696         iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
697                 hw->hw_addr + REG_DESC_SMB_ADDR_LO);
698
699         /* element count */
700         value = adapter->rrd_ring.count;
701         value <<= 16;
702         value += adapter->rfd_ring.count;
703         iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
704         iowrite32(adapter->tpd_ring.count, hw->hw_addr +
705                 REG_DESC_TPD_RING_SIZE);
706
707         /* Load Ptr */
708         iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
709
710         /* config Mailbox */
711         value = ((atomic_read(&adapter->tpd_ring.next_to_use)
712                   & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
713                 ((atomic_read(&adapter->rrd_ring.next_to_clean)
714                 & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
715                 ((atomic_read(&adapter->rfd_ring.next_to_use)
716                 & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
717         iowrite32(value, hw->hw_addr + REG_MAILBOX);
718
719         /* config IPG/IFG */
720         value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
721                  << MAC_IPG_IFG_IPGT_SHIFT) |
722                 (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
723                 << MAC_IPG_IFG_MIFG_SHIFT) |
724                 (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
725                 << MAC_IPG_IFG_IPGR1_SHIFT) |
726                 (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
727                 << MAC_IPG_IFG_IPGR2_SHIFT);
728         iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
729
730         /* config  Half-Duplex Control */
731         value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
732                 (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
733                 << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
734                 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
735                 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
736                 (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
737                 << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
738         iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
739
740         /* set Interrupt Moderator Timer */
741         iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
742         iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
743
744         /* set Interrupt Clear Timer */
745         iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
746
747         /* set max frame size hw will accept */
748         iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU);
749
750         /* jumbo size & rrd retirement timer */
751         value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
752                  << RXQ_JMBOSZ_TH_SHIFT) |
753                 (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
754                 << RXQ_JMBO_LKAH_SHIFT) |
755                 (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
756                 << RXQ_RRD_TIMER_SHIFT);
757         iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
758
759         /* Flow Control */
760         switch (hw->dev_rev) {
761         case 0x8001:
762         case 0x9001:
763         case 0x9002:
764         case 0x9003:
765                 set_flow_ctrl_old(adapter);
766                 break;
767         default:
768                 set_flow_ctrl_new(hw);
769                 break;
770         }
771
772         /* config TXQ */
773         value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
774                  << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
775                 (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
776                 << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
777                 (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
778                 << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
779                 TXQ_CTRL_EN;
780         iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
781
782         /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
783         value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
784                 << TX_JUMBO_TASK_TH_SHIFT) |
785                 (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
786                 << TX_TPD_MIN_IPG_SHIFT);
787         iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
788
789         /* config RXQ */
790         value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
791                 << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
792                 (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
793                 << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
794                 (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
795                 << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
796                 RXQ_CTRL_EN;
797         iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
798
799         /* config DMA Engine */
800         value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
801                 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
802                 ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
803                 << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
804                 DMA_CTRL_DMAW_EN;
805         value |= (u32) hw->dma_ord;
806         if (atl1_rcb_128 == hw->rcb_value)
807                 value |= DMA_CTRL_RCB_VALUE;
808         iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
809
810         /* config CMB / SMB */
811         value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
812                 hw->cmb_tpd : adapter->tpd_ring.count;
813         value <<= 16;
814         value |= hw->cmb_rrd;
815         iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
816         value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
817         iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
818         iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
819
820         /* --- enable CMB / SMB */
821         value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
822         iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
823
824         value = ioread32(adapter->hw.hw_addr + REG_ISR);
825         if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
826                 value = 1;      /* config failed */
827         else
828                 value = 0;
829
830         /* clear all interrupt status */
831         iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
832         iowrite32(0, adapter->hw.hw_addr + REG_ISR);
833         return value;
834 }
835
836 /*
837  * atl1_pcie_patch - Patch for PCIE module
838  */
839 static void atl1_pcie_patch(struct atl1_adapter *adapter)
840 {
841         u32 value;
842
843         /* much vendor magic here */
844         value = 0x6500;
845         iowrite32(value, adapter->hw.hw_addr + 0x12FC);
846         /* pcie flow control mode change */
847         value = ioread32(adapter->hw.hw_addr + 0x1008);
848         value |= 0x8000;
849         iowrite32(value, adapter->hw.hw_addr + 0x1008);
850 }
851
852 /*
853  * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
854  * on PCI Command register is disable.
855  * The function enable this bit.
856  * Brackett, 2006/03/15
857  */
858 static void atl1_via_workaround(struct atl1_adapter *adapter)
859 {
860         unsigned long value;
861
862         value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
863         if (value & PCI_COMMAND_INTX_DISABLE)
864                 value &= ~PCI_COMMAND_INTX_DISABLE;
865         iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
866 }
867
868 static void atl1_inc_smb(struct atl1_adapter *adapter)
869 {
870         struct stats_msg_block *smb = adapter->smb.smb;
871
872         /* Fill out the OS statistics structure */
873         adapter->soft_stats.rx_packets += smb->rx_ok;
874         adapter->soft_stats.tx_packets += smb->tx_ok;
875         adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
876         adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
877         adapter->soft_stats.multicast += smb->rx_mcast;
878         adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
879                 smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
880
881         /* Rx Errors */
882         adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
883                 smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
884                 smb->rx_rrd_ov + smb->rx_align_err);
885         adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
886         adapter->soft_stats.rx_length_errors += smb->rx_len_err;
887         adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
888         adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
889         adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
890                 smb->rx_rxf_ov);
891
892         adapter->soft_stats.rx_pause += smb->rx_pause;
893         adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
894         adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
895
896         /* Tx Errors */
897         adapter->soft_stats.tx_errors += (smb->tx_late_col +
898                 smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
899         adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
900         adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
901         adapter->soft_stats.tx_window_errors += smb->tx_late_col;
902
903         adapter->soft_stats.excecol += smb->tx_abort_col;
904         adapter->soft_stats.deffer += smb->tx_defer;
905         adapter->soft_stats.scc += smb->tx_1_col;
906         adapter->soft_stats.mcc += smb->tx_2_col;
907         adapter->soft_stats.latecol += smb->tx_late_col;
908         adapter->soft_stats.tx_underun += smb->tx_underrun;
909         adapter->soft_stats.tx_trunc += smb->tx_trunc;
910         adapter->soft_stats.tx_pause += smb->tx_pause;
911
912         adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets;
913         adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets;
914         adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes;
915         adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes;
916         adapter->net_stats.multicast = adapter->soft_stats.multicast;
917         adapter->net_stats.collisions = adapter->soft_stats.collisions;
918         adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors;
919         adapter->net_stats.rx_over_errors =
920                 adapter->soft_stats.rx_missed_errors;
921         adapter->net_stats.rx_length_errors =
922                 adapter->soft_stats.rx_length_errors;
923         adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
924         adapter->net_stats.rx_frame_errors =
925                 adapter->soft_stats.rx_frame_errors;
926         adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
927         adapter->net_stats.rx_missed_errors =
928                 adapter->soft_stats.rx_missed_errors;
929         adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors;
930         adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
931         adapter->net_stats.tx_aborted_errors =
932                 adapter->soft_stats.tx_aborted_errors;
933         adapter->net_stats.tx_window_errors =
934                 adapter->soft_stats.tx_window_errors;
935         adapter->net_stats.tx_carrier_errors =
936                 adapter->soft_stats.tx_carrier_errors;
937 }
938
939 static void atl1_update_mailbox(struct atl1_adapter *adapter)
940 {
941         unsigned long flags;
942         u32 tpd_next_to_use;
943         u32 rfd_next_to_use;
944         u32 rrd_next_to_clean;
945         u32 value;
946
947         spin_lock_irqsave(&adapter->mb_lock, flags);
948
949         tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
950         rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
951         rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
952
953         value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
954                 MB_RFD_PROD_INDX_SHIFT) |
955                 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
956                 MB_RRD_CONS_INDX_SHIFT) |
957                 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
958                 MB_TPD_PROD_INDX_SHIFT);
959         iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
960
961         spin_unlock_irqrestore(&adapter->mb_lock, flags);
962 }
963
964 static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
965         struct rx_return_desc *rrd, u16 offset)
966 {
967         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
968
969         while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
970                 rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
971                 if (++rfd_ring->next_to_clean == rfd_ring->count) {
972                         rfd_ring->next_to_clean = 0;
973                 }
974         }
975 }
976
977 static void atl1_update_rfd_index(struct atl1_adapter *adapter,
978         struct rx_return_desc *rrd)
979 {
980         u16 num_buf;
981
982         num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
983                 adapter->rx_buffer_len;
984         if (rrd->num_buf == num_buf)
985                 /* clean alloc flag for bad rrd */
986                 atl1_clean_alloc_flag(adapter, rrd, num_buf);
987 }
988
989 static void atl1_rx_checksum(struct atl1_adapter *adapter,
990         struct rx_return_desc *rrd, struct sk_buff *skb)
991 {
992         struct pci_dev *pdev = adapter->pdev;
993
994         skb->ip_summed = CHECKSUM_NONE;
995
996         if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
997                 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
998                                         ERR_FLAG_CODE | ERR_FLAG_OV)) {
999                         adapter->hw_csum_err++;
1000                         dev_printk(KERN_DEBUG, &pdev->dev,
1001                                 "rx checksum error\n");
1002                         return;
1003                 }
1004         }
1005
1006         /* not IPv4 */
1007         if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
1008                 /* checksum is invalid, but it's not an IPv4 pkt, so ok */
1009                 return;
1010
1011         /* IPv4 packet */
1012         if (likely(!(rrd->err_flg &
1013                 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
1014                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1015                 adapter->hw_csum_good++;
1016                 return;
1017         }
1018
1019         /* IPv4, but hardware thinks its checksum is wrong */
1020         dev_printk(KERN_DEBUG, &pdev->dev,
1021                 "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
1022                 rrd->pkt_flg, rrd->err_flg);
1023         skb->ip_summed = CHECKSUM_COMPLETE;
1024         skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
1025         adapter->hw_csum_err++;
1026         return;
1027 }
1028
1029 /*
1030  * atl1_alloc_rx_buffers - Replace used receive buffers
1031  * @adapter: address of board private structure
1032  */
1033 static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1034 {
1035         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1036         struct pci_dev *pdev = adapter->pdev;
1037         struct page *page;
1038         unsigned long offset;
1039         struct atl1_buffer *buffer_info, *next_info;
1040         struct sk_buff *skb;
1041         u16 num_alloc = 0;
1042         u16 rfd_next_to_use, next_next;
1043         struct rx_free_desc *rfd_desc;
1044
1045         next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
1046         if (++next_next == rfd_ring->count)
1047                 next_next = 0;
1048         buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1049         next_info = &rfd_ring->buffer_info[next_next];
1050
1051         while (!buffer_info->alloced && !next_info->alloced) {
1052                 if (buffer_info->skb) {
1053                         buffer_info->alloced = 1;
1054                         goto next;
1055                 }
1056
1057                 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1058
1059                 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
1060                 if (unlikely(!skb)) {
1061                         /* Better luck next round */
1062                         adapter->net_stats.rx_dropped++;
1063                         break;
1064                 }
1065
1066                 /*
1067                  * Make buffer alignment 2 beyond a 16 byte boundary
1068                  * this will result in a 16 byte aligned IP header after
1069                  * the 14 byte MAC header is removed
1070                  */
1071                 skb_reserve(skb, NET_IP_ALIGN);
1072
1073                 buffer_info->alloced = 1;
1074                 buffer_info->skb = skb;
1075                 buffer_info->length = (u16) adapter->rx_buffer_len;
1076                 page = virt_to_page(skb->data);
1077                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1078                 buffer_info->dma = pci_map_page(pdev, page, offset,
1079                                                 adapter->rx_buffer_len,
1080                                                 PCI_DMA_FROMDEVICE);
1081                 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1082                 rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
1083                 rfd_desc->coalese = 0;
1084
1085 next:
1086                 rfd_next_to_use = next_next;
1087                 if (unlikely(++next_next == rfd_ring->count))
1088                         next_next = 0;
1089
1090                 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1091                 next_info = &rfd_ring->buffer_info[next_next];
1092                 num_alloc++;
1093         }
1094
1095         if (num_alloc) {
1096                 /*
1097                  * Force memory writes to complete before letting h/w
1098                  * know there are new descriptors to fetch.  (Only
1099                  * applicable for weak-ordered memory model archs,
1100                  * such as IA-64).
1101                  */
1102                 wmb();
1103                 atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
1104         }
1105         return num_alloc;
1106 }
1107
1108 static void atl1_intr_rx(struct atl1_adapter *adapter)
1109 {
1110         int i, count;
1111         u16 length;
1112         u16 rrd_next_to_clean;
1113         u32 value;
1114         struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1115         struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1116         struct atl1_buffer *buffer_info;
1117         struct rx_return_desc *rrd;
1118         struct sk_buff *skb;
1119
1120         count = 0;
1121
1122         rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
1123
1124         while (1) {
1125                 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
1126                 i = 1;
1127                 if (likely(rrd->xsz.valid)) {   /* packet valid */
1128 chk_rrd:
1129                         /* check rrd status */
1130                         if (likely(rrd->num_buf == 1))
1131                                 goto rrd_ok;
1132
1133                         /* rrd seems to be bad */
1134                         if (unlikely(i-- > 0)) {
1135                                 /* rrd may not be DMAed completely */
1136                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1137                                         "incomplete RRD DMA transfer\n");
1138                                 udelay(1);
1139                                 goto chk_rrd;
1140                         }
1141                         /* bad rrd */
1142                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1143                                 "bad RRD\n");
1144                         /* see if update RFD index */
1145                         if (rrd->num_buf > 1)
1146                                 atl1_update_rfd_index(adapter, rrd);
1147
1148                         /* update rrd */
1149                         rrd->xsz.valid = 0;
1150                         if (++rrd_next_to_clean == rrd_ring->count)
1151                                 rrd_next_to_clean = 0;
1152                         count++;
1153                         continue;
1154                 } else {        /* current rrd still not be updated */
1155
1156                         break;
1157                 }
1158 rrd_ok:
1159                 /* clean alloc flag for bad rrd */
1160                 atl1_clean_alloc_flag(adapter, rrd, 0);
1161
1162                 buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
1163                 if (++rfd_ring->next_to_clean == rfd_ring->count)
1164                         rfd_ring->next_to_clean = 0;
1165
1166                 /* update rrd next to clean */
1167                 if (++rrd_next_to_clean == rrd_ring->count)
1168                         rrd_next_to_clean = 0;
1169                 count++;
1170
1171                 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
1172                         if (!(rrd->err_flg &
1173                                 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
1174                                 | ERR_FLAG_LEN))) {
1175                                 /* packet error, don't need upstream */
1176                                 buffer_info->alloced = 0;
1177                                 rrd->xsz.valid = 0;
1178                                 continue;
1179                         }
1180                 }
1181
1182                 /* Good Receive */
1183                 pci_unmap_page(adapter->pdev, buffer_info->dma,
1184                                buffer_info->length, PCI_DMA_FROMDEVICE);
1185                 skb = buffer_info->skb;
1186                 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
1187
1188                 skb_put(skb, length - ETH_FCS_LEN);
1189
1190                 /* Receive Checksum Offload */
1191                 atl1_rx_checksum(adapter, rrd, skb);
1192                 skb->protocol = eth_type_trans(skb, adapter->netdev);
1193
1194                 if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) {
1195                         u16 vlan_tag = (rrd->vlan_tag >> 4) |
1196                                         ((rrd->vlan_tag & 7) << 13) |
1197                                         ((rrd->vlan_tag & 8) << 9);
1198                         vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
1199                 } else
1200                         netif_rx(skb);
1201
1202                 /* let protocol layer free skb */
1203                 buffer_info->skb = NULL;
1204                 buffer_info->alloced = 0;
1205                 rrd->xsz.valid = 0;
1206
1207                 adapter->netdev->last_rx = jiffies;
1208         }
1209
1210         atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
1211
1212         atl1_alloc_rx_buffers(adapter);
1213
1214         /* update mailbox ? */
1215         if (count) {
1216                 u32 tpd_next_to_use;
1217                 u32 rfd_next_to_use;
1218
1219                 spin_lock(&adapter->mb_lock);
1220
1221                 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
1222                 rfd_next_to_use =
1223                     atomic_read(&adapter->rfd_ring.next_to_use);
1224                 rrd_next_to_clean =
1225                     atomic_read(&adapter->rrd_ring.next_to_clean);
1226                 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
1227                         MB_RFD_PROD_INDX_SHIFT) |
1228                         ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
1229                         MB_RRD_CONS_INDX_SHIFT) |
1230                         ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
1231                         MB_TPD_PROD_INDX_SHIFT);
1232                 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
1233                 spin_unlock(&adapter->mb_lock);
1234         }
1235 }
1236
1237 static void atl1_intr_tx(struct atl1_adapter *adapter)
1238 {
1239         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1240         struct atl1_buffer *buffer_info;
1241         u16 sw_tpd_next_to_clean;
1242         u16 cmb_tpd_next_to_clean;
1243
1244         sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1245         cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
1246
1247         while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
1248                 struct tx_packet_desc *tpd;
1249
1250                 tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
1251                 buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
1252                 if (buffer_info->dma) {
1253                         pci_unmap_page(adapter->pdev, buffer_info->dma,
1254                                        buffer_info->length, PCI_DMA_TODEVICE);
1255                         buffer_info->dma = 0;
1256                 }
1257
1258                 if (buffer_info->skb) {
1259                         dev_kfree_skb_irq(buffer_info->skb);
1260                         buffer_info->skb = NULL;
1261                 }
1262                 tpd->buffer_addr = 0;
1263                 tpd->desc.data = 0;
1264
1265                 if (++sw_tpd_next_to_clean == tpd_ring->count)
1266                         sw_tpd_next_to_clean = 0;
1267         }
1268         atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
1269
1270         if (netif_queue_stopped(adapter->netdev)
1271             && netif_carrier_ok(adapter->netdev))
1272                 netif_wake_queue(adapter->netdev);
1273 }
1274
1275 static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
1276 {
1277         u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1278         u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
1279         return ((next_to_clean > next_to_use) ?
1280                 next_to_clean - next_to_use - 1 :
1281                 tpd_ring->count + next_to_clean - next_to_use - 1);
1282 }
1283
1284 static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
1285                          struct tso_param *tso)
1286 {
1287         /* We enter this function holding a spinlock. */
1288         u8 ipofst;
1289         int err;
1290
1291         if (skb_shinfo(skb)->gso_size) {
1292                 if (skb_header_cloned(skb)) {
1293                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1294                         if (unlikely(err))
1295                                 return err;
1296                 }
1297
1298                 if (skb->protocol == ntohs(ETH_P_IP)) {
1299                         struct iphdr *iph = ip_hdr(skb);
1300
1301                         iph->tot_len = 0;
1302                         iph->check = 0;
1303                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1304                                 iph->daddr, 0, IPPROTO_TCP, 0);
1305                         ipofst = skb_network_offset(skb);
1306                         if (ipofst != ETH_HLEN) /* 802.3 frame */
1307                                 tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
1308
1309                         tso->tsopl |= (iph->ihl &
1310                                 TSO_PARAM_IPHL_MASK) << TSO_PARAM_IPHL_SHIFT;
1311                         tso->tsopl |= ((tcp_hdrlen(skb) >> 2) &
1312                                 TSO_PARAM_TCPHDRLEN_MASK) <<
1313                                 TSO_PARAM_TCPHDRLEN_SHIFT;
1314                         tso->tsopl |= (skb_shinfo(skb)->gso_size &
1315                                 TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT;
1316                         tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT;
1317                         tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT;
1318                         tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT;
1319                         return true;
1320                 }
1321         }
1322         return false;
1323 }
1324
1325 static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1326         struct csum_param *csum)
1327 {
1328         u8 css, cso;
1329
1330         if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1331                 cso = skb_transport_offset(skb);
1332                 css = cso + skb->csum_offset;
1333                 if (unlikely(cso & 0x1)) {
1334                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1335                                 "payload offset not an even number\n");
1336                         return -1;
1337                 }
1338                 csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) <<
1339                         CSUM_PARAM_PLOADOFFSET_SHIFT;
1340                 csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) <<
1341                         CSUM_PARAM_XSUMOFFSET_SHIFT;
1342                 csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT;
1343                 return true;
1344         }
1345
1346         return true;
1347 }
1348
1349 static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
1350         bool tcp_seg)
1351 {
1352         /* We enter this function holding a spinlock. */
1353         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1354         struct atl1_buffer *buffer_info;
1355         struct page *page;
1356         int first_buf_len = skb->len;
1357         unsigned long offset;
1358         unsigned int nr_frags;
1359         unsigned int f;
1360         u16 tpd_next_to_use;
1361         u16 proto_hdr_len;
1362         u16 len12;
1363
1364         first_buf_len -= skb->data_len;
1365         nr_frags = skb_shinfo(skb)->nr_frags;
1366         tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
1367         buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1368         if (unlikely(buffer_info->skb))
1369                 BUG();
1370         /* put skb in last TPD */
1371         buffer_info->skb = NULL;
1372
1373         if (tcp_seg) {
1374                 /* TSO/GSO */
1375                 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1376                 buffer_info->length = proto_hdr_len;
1377                 page = virt_to_page(skb->data);
1378                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1379                 buffer_info->dma = pci_map_page(adapter->pdev, page,
1380                                                 offset, proto_hdr_len,
1381                                                 PCI_DMA_TODEVICE);
1382
1383                 if (++tpd_next_to_use == tpd_ring->count)
1384                         tpd_next_to_use = 0;
1385
1386                 if (first_buf_len > proto_hdr_len) {
1387                         int i, m;
1388
1389                         len12 = first_buf_len - proto_hdr_len;
1390                         m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) /
1391                                 ATL1_MAX_TX_BUF_LEN;
1392                         for (i = 0; i < m; i++) {
1393                                 buffer_info =
1394                                     &tpd_ring->buffer_info[tpd_next_to_use];
1395                                 buffer_info->skb = NULL;
1396                                 buffer_info->length =
1397                                     (ATL1_MAX_TX_BUF_LEN >=
1398                                      len12) ? ATL1_MAX_TX_BUF_LEN : len12;
1399                                 len12 -= buffer_info->length;
1400                                 page = virt_to_page(skb->data +
1401                                         (proto_hdr_len +
1402                                         i * ATL1_MAX_TX_BUF_LEN));
1403                                 offset = (unsigned long)(skb->data +
1404                                         (proto_hdr_len +
1405                                         i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK;
1406                                 buffer_info->dma = pci_map_page(adapter->pdev,
1407                                         page, offset, buffer_info->length,
1408                                         PCI_DMA_TODEVICE);
1409                                 if (++tpd_next_to_use == tpd_ring->count)
1410                                         tpd_next_to_use = 0;
1411                         }
1412                 }
1413         } else {
1414                 /* not TSO/GSO */
1415                 buffer_info->length = first_buf_len;
1416                 page = virt_to_page(skb->data);
1417                 offset = (unsigned long)skb->data & ~PAGE_MASK;
1418                 buffer_info->dma = pci_map_page(adapter->pdev, page,
1419                         offset, first_buf_len, PCI_DMA_TODEVICE);
1420                 if (++tpd_next_to_use == tpd_ring->count)
1421                         tpd_next_to_use = 0;
1422         }
1423
1424         for (f = 0; f < nr_frags; f++) {
1425                 struct skb_frag_struct *frag;
1426                 u16 lenf, i, m;
1427
1428                 frag = &skb_shinfo(skb)->frags[f];
1429                 lenf = frag->size;
1430
1431                 m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN;
1432                 for (i = 0; i < m; i++) {
1433                         buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1434                         if (unlikely(buffer_info->skb))
1435                                 BUG();
1436                         buffer_info->skb = NULL;
1437                         buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ?
1438                                 ATL1_MAX_TX_BUF_LEN : lenf;
1439                         lenf -= buffer_info->length;
1440                         buffer_info->dma = pci_map_page(adapter->pdev,
1441                                 frag->page,
1442                                 frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
1443                                 buffer_info->length, PCI_DMA_TODEVICE);
1444
1445                         if (++tpd_next_to_use == tpd_ring->count)
1446                                 tpd_next_to_use = 0;
1447                 }
1448         }
1449
1450         /* last tpd's buffer-info */
1451         buffer_info->skb = skb;
1452 }
1453
1454 static void atl1_tx_queue(struct atl1_adapter *adapter, int count,
1455        union tpd_descr *descr)
1456 {
1457         /* We enter this function holding a spinlock. */
1458         struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1459         int j;
1460         u32 val;
1461         struct atl1_buffer *buffer_info;
1462         struct tx_packet_desc *tpd;
1463         u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
1464
1465         for (j = 0; j < count; j++) {
1466                 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1467                 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use);
1468                 tpd->desc.csum.csumpu = descr->csum.csumpu;
1469                 tpd->desc.csum.csumpl = descr->csum.csumpl;
1470                 tpd->desc.tso.tsopu = descr->tso.tsopu;
1471                 tpd->desc.tso.tsopl = descr->tso.tsopl;
1472                 tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1473                 tpd->desc.data = descr->data;
1474                 tpd->desc.tso.tsopu |= (cpu_to_le16(buffer_info->length) &
1475                         TSO_PARAM_BUFLEN_MASK) << TSO_PARAM_BUFLEN_SHIFT;
1476
1477                 val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) &
1478                         TSO_PARAM_SEGMENT_MASK;
1479                 if (val && !j)
1480                         tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT;
1481
1482                 if (j == (count - 1))
1483                         tpd->desc.tso.tsopl |= 1 << TSO_PARAM_EOP_SHIFT;
1484
1485                 if (++tpd_next_to_use == tpd_ring->count)
1486                         tpd_next_to_use = 0;
1487         }
1488         /*
1489          * Force memory writes to complete before letting h/w
1490          * know there are new descriptors to fetch.  (Only
1491          * applicable for weak-ordered memory model archs,
1492          * such as IA-64).
1493          */
1494         wmb();
1495
1496         atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use);
1497 }
1498
1499 static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1500 {
1501         struct atl1_adapter *adapter = netdev_priv(netdev);
1502         int len = skb->len;
1503         int tso;
1504         int count = 1;
1505         int ret_val;
1506         u32 val;
1507         union tpd_descr param;
1508         u16 frag_size;
1509         u16 vlan_tag;
1510         unsigned long flags;
1511         unsigned int nr_frags = 0;
1512         unsigned int mss = 0;
1513         unsigned int f;
1514         unsigned int proto_hdr_len;
1515
1516         len -= skb->data_len;
1517
1518         if (unlikely(skb->len == 0)) {
1519                 dev_kfree_skb_any(skb);
1520                 return NETDEV_TX_OK;
1521         }
1522
1523         param.data = 0;
1524         param.tso.tsopu = 0;
1525         param.tso.tsopl = 0;
1526         param.csum.csumpu = 0;
1527         param.csum.csumpl = 0;
1528
1529         /* nr_frags will be nonzero if we're doing scatter/gather (SG) */
1530         nr_frags = skb_shinfo(skb)->nr_frags;
1531         for (f = 0; f < nr_frags; f++) {
1532                 frag_size = skb_shinfo(skb)->frags[f].size;
1533                 if (frag_size)
1534                         count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
1535                                 ATL1_MAX_TX_BUF_LEN;
1536         }
1537
1538         /* mss will be nonzero if we're doing segment offload (TSO/GSO) */
1539         mss = skb_shinfo(skb)->gso_size;
1540         if (mss) {
1541                 if (skb->protocol == htons(ETH_P_IP)) {
1542                         proto_hdr_len = (skb_transport_offset(skb) +
1543                                          tcp_hdrlen(skb));
1544                         if (unlikely(proto_hdr_len > len)) {
1545                                 dev_kfree_skb_any(skb);
1546                                 return NETDEV_TX_OK;
1547                         }
1548                         /* need additional TPD ? */
1549                         if (proto_hdr_len != len)
1550                                 count += (len - proto_hdr_len +
1551                                         ATL1_MAX_TX_BUF_LEN - 1) /
1552                                         ATL1_MAX_TX_BUF_LEN;
1553                 }
1554         }
1555
1556         if (!spin_trylock_irqsave(&adapter->lock, flags)) {
1557                 /* Can't get lock - tell upper layer to requeue */
1558                 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n");
1559                 return NETDEV_TX_LOCKED;
1560         }
1561
1562         if (atl1_tpd_avail(&adapter->tpd_ring) < count) {
1563                 /* not enough descriptors */
1564                 netif_stop_queue(netdev);
1565                 spin_unlock_irqrestore(&adapter->lock, flags);
1566                 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n");
1567                 return NETDEV_TX_BUSY;
1568         }
1569
1570         param.data = 0;
1571
1572         if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1573                 vlan_tag = vlan_tx_tag_get(skb);
1574                 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
1575                         ((vlan_tag >> 9) & 0x8);
1576                 param.tso.tsopl |= 1 << TSO_PARAM_INSVLAG_SHIFT;
1577                 param.tso.tsopu |= (vlan_tag & TSO_PARAM_VLANTAG_MASK) <<
1578                         TSO_PARAM_VLAN_SHIFT;
1579         }
1580
1581         tso = atl1_tso(adapter, skb, &param.tso);
1582         if (tso < 0) {
1583                 spin_unlock_irqrestore(&adapter->lock, flags);
1584                 dev_kfree_skb_any(skb);
1585                 return NETDEV_TX_OK;
1586         }
1587
1588         if (!tso) {
1589                 ret_val = atl1_tx_csum(adapter, skb, &param.csum);
1590                 if (ret_val < 0) {
1591                         spin_unlock_irqrestore(&adapter->lock, flags);
1592                         dev_kfree_skb_any(skb);
1593                         return NETDEV_TX_OK;
1594                 }
1595         }
1596
1597         val = (param.tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) &
1598                 TSO_PARAM_SEGMENT_MASK;
1599         atl1_tx_map(adapter, skb, 1 == val);
1600         atl1_tx_queue(adapter, count, &param);
1601         netdev->trans_start = jiffies;
1602         spin_unlock_irqrestore(&adapter->lock, flags);
1603         atl1_update_mailbox(adapter);
1604         return NETDEV_TX_OK;
1605 }
1606
1607 /*
1608  * atl1_intr - Interrupt Handler
1609  * @irq: interrupt number
1610  * @data: pointer to a network interface device structure
1611  * @pt_regs: CPU registers structure
1612  */
1613 static irqreturn_t atl1_intr(int irq, void *data)
1614 {
1615         struct atl1_adapter *adapter = netdev_priv(data);
1616         u32 status;
1617         u8 update_rx;
1618         int max_ints = 10;
1619
1620         status = adapter->cmb.cmb->int_stats;
1621         if (!status)
1622                 return IRQ_NONE;
1623
1624         update_rx = 0;
1625
1626         do {
1627                 /* clear CMB interrupt status at once */
1628                 adapter->cmb.cmb->int_stats = 0;
1629
1630                 if (status & ISR_GPHY)  /* clear phy status */
1631                         atlx_clear_phy_int(adapter);
1632
1633                 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
1634                 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
1635
1636                 /* check if SMB intr */
1637                 if (status & ISR_SMB)
1638                         atl1_inc_smb(adapter);
1639
1640                 /* check if PCIE PHY Link down */
1641                 if (status & ISR_PHY_LINKDOWN) {
1642                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1643                                 "pcie phy link down %x\n", status);
1644                         if (netif_running(adapter->netdev)) {   /* reset MAC */
1645                                 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1646                                 schedule_work(&adapter->pcie_dma_to_rst_task);
1647                                 return IRQ_HANDLED;
1648                         }
1649                 }
1650
1651                 /* check if DMA read/write error ? */
1652                 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
1653                         dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1654                                 "pcie DMA r/w error (status = 0x%x)\n",
1655                                 status);
1656                         iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1657                         schedule_work(&adapter->pcie_dma_to_rst_task);
1658                         return IRQ_HANDLED;
1659                 }
1660
1661                 /* link event */
1662                 if (status & ISR_GPHY) {
1663                         adapter->soft_stats.tx_carrier_errors++;
1664                         atl1_check_for_link(adapter);
1665                 }
1666
1667                 /* transmit event */
1668                 if (status & ISR_CMB_TX)
1669                         atl1_intr_tx(adapter);
1670
1671                 /* rx exception */
1672                 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1673                         ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1674                         ISR_HOST_RRD_OV | ISR_CMB_RX))) {
1675                         if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1676                                 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1677                                 ISR_HOST_RRD_OV))
1678                                 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1679                                         "rx exception, ISR = 0x%x\n", status);
1680                         atl1_intr_rx(adapter);
1681                 }
1682
1683                 if (--max_ints < 0)
1684                         break;
1685
1686         } while ((status = adapter->cmb.cmb->int_stats));
1687
1688         /* re-enable Interrupt */
1689         iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
1690         return IRQ_HANDLED;
1691 }
1692
1693 /*
1694  * atl1_watchdog - Timer Call-back
1695  * @data: pointer to netdev cast into an unsigned long
1696  */
1697 static void atl1_watchdog(unsigned long data)
1698 {
1699         struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1700
1701         /* Reset the timer */
1702         mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1703 }
1704
1705 /*
1706  * atl1_phy_config - Timer Call-back
1707  * @data: pointer to netdev cast into an unsigned long
1708  */
1709 static void atl1_phy_config(unsigned long data)
1710 {
1711         struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1712         struct atl1_hw *hw = &adapter->hw;
1713         unsigned long flags;
1714
1715         spin_lock_irqsave(&adapter->lock, flags);
1716         adapter->phy_timer_pending = false;
1717         atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1718         atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg);
1719         atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
1720         spin_unlock_irqrestore(&adapter->lock, flags);
1721 }
1722
1723 /*
1724  * Orphaned vendor comment left intact here:
1725  * <vendor comment>
1726  * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
1727  * will assert. We do soft reset <0x1400=1> according
1728  * with the SPEC. BUT, it seemes that PCIE or DMA
1729  * state-machine will not be reset. DMAR_TO_INT will
1730  * assert again and again.
1731  * </vendor comment>
1732  */
1733 static void atl1_tx_timeout_task(struct work_struct *work)
1734 {
1735         struct atl1_adapter *adapter =
1736                 container_of(work, struct atl1_adapter, tx_timeout_task);
1737         struct net_device *netdev = adapter->netdev;
1738
1739         netif_device_detach(netdev);
1740         atl1_down(adapter);
1741         atl1_up(adapter);
1742         netif_device_attach(netdev);
1743 }
1744
1745 int atl1_reset(struct atl1_adapter *adapter)
1746 {
1747         int ret;
1748         ret = atl1_reset_hw(&adapter->hw);
1749         if (ret)
1750                 return ret;
1751         return atl1_init_hw(&adapter->hw);
1752 }
1753
1754 s32 atl1_up(struct atl1_adapter *adapter)
1755 {
1756         struct net_device *netdev = adapter->netdev;
1757         int err;
1758         int irq_flags = IRQF_SAMPLE_RANDOM;
1759
1760         /* hardware has been reset, we need to reload some things */
1761         atlx_set_multi(netdev);
1762         atl1_init_ring_ptrs(adapter);
1763         atlx_restore_vlan(adapter);
1764         err = atl1_alloc_rx_buffers(adapter);
1765         if (unlikely(!err))
1766                 /* no RX BUFFER allocated */
1767                 return -ENOMEM;
1768
1769         if (unlikely(atl1_configure(adapter))) {
1770                 err = -EIO;
1771                 goto err_up;
1772         }
1773
1774         err = pci_enable_msi(adapter->pdev);
1775         if (err) {
1776                 dev_info(&adapter->pdev->dev,
1777                         "Unable to enable MSI: %d\n", err);
1778                 irq_flags |= IRQF_SHARED;
1779         }
1780
1781         err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags,
1782                         netdev->name, netdev);
1783         if (unlikely(err))
1784                 goto err_up;
1785
1786         mod_timer(&adapter->watchdog_timer, jiffies);
1787         atlx_irq_enable(adapter);
1788         atl1_check_link(adapter);
1789         return 0;
1790
1791 err_up:
1792         pci_disable_msi(adapter->pdev);
1793         /* free rx_buffers */
1794         atl1_clean_rx_ring(adapter);
1795         return err;
1796 }
1797
1798 void atl1_down(struct atl1_adapter *adapter)
1799 {
1800         struct net_device *netdev = adapter->netdev;
1801
1802         del_timer_sync(&adapter->watchdog_timer);
1803         del_timer_sync(&adapter->phy_config_timer);
1804         adapter->phy_timer_pending = false;
1805
1806         atlx_irq_disable(adapter);
1807         free_irq(adapter->pdev->irq, netdev);
1808         pci_disable_msi(adapter->pdev);
1809         atl1_reset_hw(&adapter->hw);
1810         adapter->cmb.cmb->int_stats = 0;
1811
1812         adapter->link_speed = SPEED_0;
1813         adapter->link_duplex = -1;
1814         netif_carrier_off(netdev);
1815         netif_stop_queue(netdev);
1816
1817         atl1_clean_tx_ring(adapter);
1818         atl1_clean_rx_ring(adapter);
1819 }
1820
1821 /*
1822  * atl1_open - Called when a network interface is made active
1823  * @netdev: network interface device structure
1824  *
1825  * Returns 0 on success, negative value on failure
1826  *
1827  * The open entry point is called when a network interface is made
1828  * active by the system (IFF_UP).  At this point all resources needed
1829  * for transmit and receive operations are allocated, the interrupt
1830  * handler is registered with the OS, the watchdog timer is started,
1831  * and the stack is notified that the interface is ready.
1832  */
1833 static int atl1_open(struct net_device *netdev)
1834 {
1835         struct atl1_adapter *adapter = netdev_priv(netdev);
1836         int err;
1837
1838         /* allocate transmit descriptors */
1839         err = atl1_setup_ring_resources(adapter);
1840         if (err)
1841                 return err;
1842
1843         err = atl1_up(adapter);
1844         if (err)
1845                 goto err_up;
1846
1847         return 0;
1848
1849 err_up:
1850         atl1_reset(adapter);
1851         return err;
1852 }
1853
1854 /*
1855  * atl1_close - Disables a network interface
1856  * @netdev: network interface device structure
1857  *
1858  * Returns 0, this is not allowed to fail
1859  *
1860  * The close entry point is called when an interface is de-activated
1861  * by the OS.  The hardware is still under the drivers control, but
1862  * needs to be disabled.  A global MAC reset is issued to stop the
1863  * hardware, and all transmit and receive resources are freed.
1864  */
1865 static int atl1_close(struct net_device *netdev)
1866 {
1867         struct atl1_adapter *adapter = netdev_priv(netdev);
1868         atl1_down(adapter);
1869         atl1_free_ring_resources(adapter);
1870         return 0;
1871 }
1872
1873 #ifdef CONFIG_PM
1874 static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
1875 {
1876         struct net_device *netdev = pci_get_drvdata(pdev);
1877         struct atl1_adapter *adapter = netdev_priv(netdev);
1878         struct atl1_hw *hw = &adapter->hw;
1879         u32 ctrl = 0;
1880         u32 wufc = adapter->wol;
1881
1882         netif_device_detach(netdev);
1883         if (netif_running(netdev))
1884                 atl1_down(adapter);
1885
1886         atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1887         atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
1888         if (ctrl & BMSR_LSTATUS)
1889                 wufc &= ~ATLX_WUFC_LNKC;
1890
1891         /* reduce speed to 10/100M */
1892         if (wufc) {
1893                 atl1_phy_enter_power_saving(hw);
1894                 /* if resume, let driver to re- setup link */
1895                 hw->phy_configured = false;
1896                 atl1_set_mac_addr(hw);
1897                 atlx_set_multi(netdev);
1898
1899                 ctrl = 0;
1900                 /* turn on magic packet wol */
1901                 if (wufc & ATLX_WUFC_MAG)
1902                         ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
1903
1904                 /* turn on Link change WOL */
1905                 if (wufc & ATLX_WUFC_LNKC)
1906                         ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
1907                 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
1908
1909                 /* turn on all-multi mode if wake on multicast is enabled */
1910                 ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
1911                 ctrl &= ~MAC_CTRL_DBG;
1912                 ctrl &= ~MAC_CTRL_PROMIS_EN;
1913                 if (wufc & ATLX_WUFC_MC)
1914                         ctrl |= MAC_CTRL_MC_ALL_EN;
1915                 else
1916                         ctrl &= ~MAC_CTRL_MC_ALL_EN;
1917
1918                 /* turn on broadcast mode if wake on-BC is enabled */
1919                 if (wufc & ATLX_WUFC_BC)
1920                         ctrl |= MAC_CTRL_BC_EN;
1921                 else
1922                         ctrl &= ~MAC_CTRL_BC_EN;
1923
1924                 /* enable RX */
1925                 ctrl |= MAC_CTRL_RX_EN;
1926                 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
1927                 pci_enable_wake(pdev, PCI_D3hot, 1);
1928                 pci_enable_wake(pdev, PCI_D3cold, 1);
1929         } else {
1930                 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
1931                 pci_enable_wake(pdev, PCI_D3hot, 0);
1932                 pci_enable_wake(pdev, PCI_D3cold, 0);
1933         }
1934
1935         pci_save_state(pdev);
1936         pci_disable_device(pdev);
1937
1938         pci_set_power_state(pdev, PCI_D3hot);
1939
1940         return 0;
1941 }
1942
1943 static int atl1_resume(struct pci_dev *pdev)
1944 {
1945         struct net_device *netdev = pci_get_drvdata(pdev);
1946         struct atl1_adapter *adapter = netdev_priv(netdev);
1947         u32 err;
1948
1949         pci_set_power_state(pdev, PCI_D0);
1950         pci_restore_state(pdev);
1951
1952         /* FIXME: check and handle */
1953         err = pci_enable_device(pdev);
1954         pci_enable_wake(pdev, PCI_D3hot, 0);
1955         pci_enable_wake(pdev, PCI_D3cold, 0);
1956
1957         iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
1958         atl1_reset(adapter);
1959
1960         if (netif_running(netdev))
1961                 atl1_up(adapter);
1962         netif_device_attach(netdev);
1963
1964         atl1_via_workaround(adapter);
1965
1966         return 0;
1967 }
1968 #else
1969 #define atl1_suspend NULL
1970 #define atl1_resume NULL
1971 #endif
1972
1973 #ifdef CONFIG_NET_POLL_CONTROLLER
1974 static void atl1_poll_controller(struct net_device *netdev)
1975 {
1976         disable_irq(netdev->irq);
1977         atl1_intr(netdev->irq, netdev);
1978         enable_irq(netdev->irq);
1979 }
1980 #endif
1981
1982 /*
1983  * atl1_probe - Device Initialization Routine
1984  * @pdev: PCI device information struct
1985  * @ent: entry in atl1_pci_tbl
1986  *
1987  * Returns 0 on success, negative on failure
1988  *
1989  * atl1_probe initializes an adapter identified by a pci_dev structure.
1990  * The OS initialization, configuring of the adapter private structure,
1991  * and a hardware reset occur.
1992  */
1993 static int __devinit atl1_probe(struct pci_dev *pdev,
1994         const struct pci_device_id *ent)
1995 {
1996         struct net_device *netdev;
1997         struct atl1_adapter *adapter;
1998         static int cards_found = 0;
1999         int err;
2000
2001         err = pci_enable_device(pdev);
2002         if (err)
2003                 return err;
2004
2005         /*
2006          * The atl1 chip can DMA to 64-bit addresses, but it uses a single
2007          * shared register for the high 32 bits, so only a single, aligned,
2008          * 4 GB physical address range can be used at a time.
2009          *
2010          * Supporting 64-bit DMA on this hardware is more trouble than it's
2011          * worth.  It is far easier to limit to 32-bit DMA than update
2012          * various kernel subsystems to support the mechanics required by a
2013          * fixed-high-32-bit system.
2014          */
2015         err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2016         if (err) {
2017                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2018                 goto err_dma;
2019         }
2020         /*
2021          * Mark all PCI regions associated with PCI device
2022          * pdev as being reserved by owner atl1_driver_name
2023          */
2024         err = pci_request_regions(pdev, ATLX_DRIVER_NAME);
2025         if (err)
2026                 goto err_request_regions;
2027
2028         /*
2029          * Enables bus-mastering on the device and calls
2030          * pcibios_set_master to do the needed arch specific settings
2031          */
2032         pci_set_master(pdev);
2033
2034         netdev = alloc_etherdev(sizeof(struct atl1_adapter));
2035         if (!netdev) {
2036                 err = -ENOMEM;
2037                 goto err_alloc_etherdev;
2038         }
2039         SET_NETDEV_DEV(netdev, &pdev->dev);
2040
2041         pci_set_drvdata(pdev, netdev);
2042         adapter = netdev_priv(netdev);
2043         adapter->netdev = netdev;
2044         adapter->pdev = pdev;
2045         adapter->hw.back = adapter;
2046
2047         adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
2048         if (!adapter->hw.hw_addr) {
2049                 err = -EIO;
2050                 goto err_pci_iomap;
2051         }
2052         /* get device revision number */
2053         adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
2054                 (REG_MASTER_CTRL + 2));
2055         dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
2056
2057         /* set default ring resource counts */
2058         adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
2059         adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
2060
2061         adapter->mii.dev = netdev;
2062         adapter->mii.mdio_read = mdio_read;
2063         adapter->mii.mdio_write = mdio_write;
2064         adapter->mii.phy_id_mask = 0x1f;
2065         adapter->mii.reg_num_mask = 0x1f;
2066
2067         netdev->open = &atl1_open;
2068         netdev->stop = &atl1_close;
2069         netdev->hard_start_xmit = &atl1_xmit_frame;
2070         netdev->get_stats = &atlx_get_stats;
2071         netdev->set_multicast_list = &atlx_set_multi;
2072         netdev->set_mac_address = &atl1_set_mac;
2073         netdev->change_mtu = &atl1_change_mtu;
2074         netdev->do_ioctl = &atlx_ioctl;
2075         netdev->tx_timeout = &atlx_tx_timeout;
2076         netdev->watchdog_timeo = 5 * HZ;
2077 #ifdef CONFIG_NET_POLL_CONTROLLER
2078         netdev->poll_controller = atl1_poll_controller;
2079 #endif
2080         netdev->vlan_rx_register = atlx_vlan_rx_register;
2081
2082         netdev->ethtool_ops = &atl1_ethtool_ops;
2083         adapter->bd_number = cards_found;
2084
2085         /* setup the private structure */
2086         err = atl1_sw_init(adapter);
2087         if (err)
2088                 goto err_common;
2089
2090         netdev->features = NETIF_F_HW_CSUM;
2091         netdev->features |= NETIF_F_SG;
2092         netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2093         netdev->features |= NETIF_F_TSO;
2094         netdev->features |= NETIF_F_LLTX;
2095
2096         /*
2097          * patch for some L1 of old version,
2098          * the final version of L1 may not need these
2099          * patches
2100          */
2101         /* atl1_pcie_patch(adapter); */
2102
2103         /* really reset GPHY core */
2104         iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2105
2106         /*
2107          * reset the controller to
2108          * put the device in a known good starting state
2109          */
2110         if (atl1_reset_hw(&adapter->hw)) {
2111                 err = -EIO;
2112                 goto err_common;
2113         }
2114
2115         /* copy the MAC address out of the EEPROM */
2116         atl1_read_mac_addr(&adapter->hw);
2117         memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2118
2119         if (!is_valid_ether_addr(netdev->dev_addr)) {
2120                 err = -EIO;
2121                 goto err_common;
2122         }
2123
2124         atl1_check_options(adapter);
2125
2126         /* pre-init the MAC, and setup link */
2127         err = atl1_init_hw(&adapter->hw);
2128         if (err) {
2129                 err = -EIO;
2130                 goto err_common;
2131         }
2132
2133         atl1_pcie_patch(adapter);
2134         /* assume we have no link for now */
2135         netif_carrier_off(netdev);
2136         netif_stop_queue(netdev);
2137
2138         init_timer(&adapter->watchdog_timer);
2139         adapter->watchdog_timer.function = &atl1_watchdog;
2140         adapter->watchdog_timer.data = (unsigned long)adapter;
2141
2142         init_timer(&adapter->phy_config_timer);
2143         adapter->phy_config_timer.function = &atl1_phy_config;
2144         adapter->phy_config_timer.data = (unsigned long)adapter;
2145         adapter->phy_timer_pending = false;
2146
2147         INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
2148
2149         INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
2150
2151         INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
2152
2153         err = register_netdev(netdev);
2154         if (err)
2155                 goto err_common;
2156
2157         cards_found++;
2158         atl1_via_workaround(adapter);
2159         return 0;
2160
2161 err_common:
2162         pci_iounmap(pdev, adapter->hw.hw_addr);
2163 err_pci_iomap:
2164         free_netdev(netdev);
2165 err_alloc_etherdev:
2166         pci_release_regions(pdev);
2167 err_dma:
2168 err_request_regions:
2169         pci_disable_device(pdev);
2170         return err;
2171 }
2172
2173 /*
2174  * atl1_remove - Device Removal Routine
2175  * @pdev: PCI device information struct
2176  *
2177  * atl1_remove is called by the PCI subsystem to alert the driver
2178  * that it should release a PCI device.  The could be caused by a
2179  * Hot-Plug event, or because the driver is going to be removed from
2180  * memory.
2181  */
2182 static void __devexit atl1_remove(struct pci_dev *pdev)
2183 {
2184         struct net_device *netdev = pci_get_drvdata(pdev);
2185         struct atl1_adapter *adapter;
2186         /* Device not available. Return. */
2187         if (!netdev)
2188                 return;
2189
2190         adapter = netdev_priv(netdev);
2191
2192         /*
2193          * Some atl1 boards lack persistent storage for their MAC, and get it
2194          * from the BIOS during POST.  If we've been messing with the MAC
2195          * address, we need to save the permanent one.
2196          */
2197         if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
2198                 memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
2199                         ETH_ALEN);
2200                 atl1_set_mac_addr(&adapter->hw);
2201         }
2202
2203         iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2204         unregister_netdev(netdev);
2205         pci_iounmap(pdev, adapter->hw.hw_addr);
2206         pci_release_regions(pdev);
2207         free_netdev(netdev);
2208         pci_disable_device(pdev);
2209 }
2210
2211 static struct pci_driver atl1_driver = {
2212         .name = ATLX_DRIVER_NAME,
2213         .id_table = atl1_pci_tbl,
2214         .probe = atl1_probe,
2215         .remove = __devexit_p(atl1_remove),
2216         .suspend = atl1_suspend,
2217         .resume = atl1_resume
2218 };
2219
2220 /*
2221  * atl1_exit_module - Driver Exit Cleanup Routine
2222  *
2223  * atl1_exit_module is called just before the driver is removed
2224  * from memory.
2225  */
2226 static void __exit atl1_exit_module(void)
2227 {
2228         pci_unregister_driver(&atl1_driver);
2229 }
2230
2231 /*
2232  * atl1_init_module - Driver Registration Routine
2233  *
2234  * atl1_init_module is the first routine called when the driver is
2235  * loaded. All it does is register with the PCI subsystem.
2236  */
2237 static int __init atl1_init_module(void)
2238 {
2239         return pci_register_driver(&atl1_driver);
2240 }
2241
2242 module_init(atl1_init_module);
2243 module_exit(atl1_exit_module);
2244
2245 struct atl1_stats {
2246         char stat_string[ETH_GSTRING_LEN];
2247         int sizeof_stat;
2248         int stat_offset;
2249 };
2250
2251 #define ATL1_STAT(m) \
2252         sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m)
2253
2254 static struct atl1_stats atl1_gstrings_stats[] = {
2255         {"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
2256         {"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
2257         {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
2258         {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
2259         {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
2260         {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
2261         {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
2262         {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
2263         {"multicast", ATL1_STAT(soft_stats.multicast)},
2264         {"collisions", ATL1_STAT(soft_stats.collisions)},
2265         {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
2266         {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2267         {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
2268         {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
2269         {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
2270         {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
2271         {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
2272         {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
2273         {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
2274         {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
2275         {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
2276         {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
2277         {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
2278         {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
2279         {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
2280         {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
2281         {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
2282         {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
2283         {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
2284         {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
2285         {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
2286 };
2287
2288 static void atl1_get_ethtool_stats(struct net_device *netdev,
2289         struct ethtool_stats *stats, u64 *data)
2290 {
2291         struct atl1_adapter *adapter = netdev_priv(netdev);
2292         int i;
2293         char *p;
2294
2295         for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2296                 p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
2297                 data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
2298                         sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2299         }
2300
2301 }
2302
2303 static int atl1_get_sset_count(struct net_device *netdev, int sset)
2304 {
2305         switch (sset) {
2306         case ETH_SS_STATS:
2307                 return ARRAY_SIZE(atl1_gstrings_stats);
2308         default:
2309                 return -EOPNOTSUPP;
2310         }
2311 }
2312
2313 static int atl1_get_settings(struct net_device *netdev,
2314         struct ethtool_cmd *ecmd)
2315 {
2316         struct atl1_adapter *adapter = netdev_priv(netdev);
2317         struct atl1_hw *hw = &adapter->hw;
2318
2319         ecmd->supported = (SUPPORTED_10baseT_Half |
2320                            SUPPORTED_10baseT_Full |
2321                            SUPPORTED_100baseT_Half |
2322                            SUPPORTED_100baseT_Full |
2323                            SUPPORTED_1000baseT_Full |
2324                            SUPPORTED_Autoneg | SUPPORTED_TP);
2325         ecmd->advertising = ADVERTISED_TP;
2326         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2327             hw->media_type == MEDIA_TYPE_1000M_FULL) {
2328                 ecmd->advertising |= ADVERTISED_Autoneg;
2329                 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
2330                         ecmd->advertising |= ADVERTISED_Autoneg;
2331                         ecmd->advertising |=
2332                             (ADVERTISED_10baseT_Half |
2333                              ADVERTISED_10baseT_Full |
2334                              ADVERTISED_100baseT_Half |
2335                              ADVERTISED_100baseT_Full |
2336                              ADVERTISED_1000baseT_Full);
2337                 } else
2338                         ecmd->advertising |= (ADVERTISED_1000baseT_Full);
2339         }
2340         ecmd->port = PORT_TP;
2341         ecmd->phy_address = 0;
2342         ecmd->transceiver = XCVR_INTERNAL;
2343
2344         if (netif_carrier_ok(adapter->netdev)) {
2345                 u16 link_speed, link_duplex;
2346                 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
2347                 ecmd->speed = link_speed;
2348                 if (link_duplex == FULL_DUPLEX)
2349                         ecmd->duplex = DUPLEX_FULL;
2350                 else
2351                         ecmd->duplex = DUPLEX_HALF;
2352         } else {
2353                 ecmd->speed = -1;
2354                 ecmd->duplex = -1;
2355         }
2356         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2357             hw->media_type == MEDIA_TYPE_1000M_FULL)
2358                 ecmd->autoneg = AUTONEG_ENABLE;
2359         else
2360                 ecmd->autoneg = AUTONEG_DISABLE;
2361
2362         return 0;
2363 }
2364
2365 static int atl1_set_settings(struct net_device *netdev,
2366         struct ethtool_cmd *ecmd)
2367 {
2368         struct atl1_adapter *adapter = netdev_priv(netdev);
2369         struct atl1_hw *hw = &adapter->hw;
2370         u16 phy_data;
2371         int ret_val = 0;
2372         u16 old_media_type = hw->media_type;
2373
2374         if (netif_running(adapter->netdev)) {
2375                 dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n");
2376                 atl1_down(adapter);
2377         }
2378
2379         if (ecmd->autoneg == AUTONEG_ENABLE)
2380                 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
2381         else {
2382                 if (ecmd->speed == SPEED_1000) {
2383                         if (ecmd->duplex != DUPLEX_FULL) {
2384                                 dev_warn(&adapter->pdev->dev,
2385                                         "can't force to 1000M half duplex\n");
2386                                 ret_val = -EINVAL;
2387                                 goto exit_sset;
2388                         }
2389                         hw->media_type = MEDIA_TYPE_1000M_FULL;
2390                 } else if (ecmd->speed == SPEED_100) {
2391                         if (ecmd->duplex == DUPLEX_FULL)
2392                                 hw->media_type = MEDIA_TYPE_100M_FULL;
2393                         else
2394                                 hw->media_type = MEDIA_TYPE_100M_HALF;
2395                 } else {
2396                         if (ecmd->duplex == DUPLEX_FULL)
2397                                 hw->media_type = MEDIA_TYPE_10M_FULL;
2398                         else
2399                                 hw->media_type = MEDIA_TYPE_10M_HALF;
2400                 }
2401         }
2402         switch (hw->media_type) {
2403         case MEDIA_TYPE_AUTO_SENSOR:
2404                 ecmd->advertising =
2405                     ADVERTISED_10baseT_Half |
2406                     ADVERTISED_10baseT_Full |
2407                     ADVERTISED_100baseT_Half |
2408                     ADVERTISED_100baseT_Full |
2409                     ADVERTISED_1000baseT_Full |
2410                     ADVERTISED_Autoneg | ADVERTISED_TP;
2411                 break;
2412         case MEDIA_TYPE_1000M_FULL:
2413                 ecmd->advertising =
2414                     ADVERTISED_1000baseT_Full |
2415                     ADVERTISED_Autoneg | ADVERTISED_TP;
2416                 break;
2417         default:
2418                 ecmd->advertising = 0;
2419                 break;
2420         }
2421         if (atl1_phy_setup_autoneg_adv(hw)) {
2422                 ret_val = -EINVAL;
2423                 dev_warn(&adapter->pdev->dev,
2424                         "invalid ethtool speed/duplex setting\n");
2425                 goto exit_sset;
2426         }
2427         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2428             hw->media_type == MEDIA_TYPE_1000M_FULL)
2429                 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2430         else {
2431                 switch (hw->media_type) {
2432                 case MEDIA_TYPE_100M_FULL:
2433                         phy_data =
2434                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
2435                             MII_CR_RESET;
2436                         break;
2437                 case MEDIA_TYPE_100M_HALF:
2438                         phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2439                         break;
2440                 case MEDIA_TYPE_10M_FULL:
2441                         phy_data =
2442                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
2443                         break;
2444                 default:
2445                         /* MEDIA_TYPE_10M_HALF: */
2446                         phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2447                         break;
2448                 }
2449         }
2450         atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2451 exit_sset:
2452         if (ret_val)
2453                 hw->media_type = old_media_type;
2454
2455         if (netif_running(adapter->netdev)) {
2456                 dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n");
2457                 atl1_up(adapter);
2458         } else if (!ret_val) {
2459                 dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n");
2460                 atl1_reset(adapter);
2461         }
2462         return ret_val;
2463 }
2464
2465 static void atl1_get_drvinfo(struct net_device *netdev,
2466         struct ethtool_drvinfo *drvinfo)
2467 {
2468         struct atl1_adapter *adapter = netdev_priv(netdev);
2469
2470         strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
2471         strncpy(drvinfo->version, ATLX_DRIVER_VERSION,
2472                 sizeof(drvinfo->version));
2473         strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2474         strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
2475                 sizeof(drvinfo->bus_info));
2476         drvinfo->eedump_len = ATL1_EEDUMP_LEN;
2477 }
2478
2479 static void atl1_get_wol(struct net_device *netdev,
2480         struct ethtool_wolinfo *wol)
2481 {
2482         struct atl1_adapter *adapter = netdev_priv(netdev);
2483
2484         wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
2485         wol->wolopts = 0;
2486         if (adapter->wol & ATLX_WUFC_EX)
2487                 wol->wolopts |= WAKE_UCAST;
2488         if (adapter->wol & ATLX_WUFC_MC)
2489                 wol->wolopts |= WAKE_MCAST;
2490         if (adapter->wol & ATLX_WUFC_BC)
2491                 wol->wolopts |= WAKE_BCAST;
2492         if (adapter->wol & ATLX_WUFC_MAG)
2493                 wol->wolopts |= WAKE_MAGIC;
2494         return;
2495 }
2496
2497 static int atl1_set_wol(struct net_device *netdev,
2498         struct ethtool_wolinfo *wol)
2499 {
2500         struct atl1_adapter *adapter = netdev_priv(netdev);
2501
2502         if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2503                 return -EOPNOTSUPP;
2504         adapter->wol = 0;
2505         if (wol->wolopts & WAKE_UCAST)
2506                 adapter->wol |= ATLX_WUFC_EX;
2507         if (wol->wolopts & WAKE_MCAST)
2508                 adapter->wol |= ATLX_WUFC_MC;
2509         if (wol->wolopts & WAKE_BCAST)
2510                 adapter->wol |= ATLX_WUFC_BC;
2511         if (wol->wolopts & WAKE_MAGIC)
2512                 adapter->wol |= ATLX_WUFC_MAG;
2513         return 0;
2514 }
2515
2516 static int atl1_get_regs_len(struct net_device *netdev)
2517 {
2518         return ATL1_REG_COUNT * sizeof(u32);
2519 }
2520
2521 static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2522         void *p)
2523 {
2524         struct atl1_adapter *adapter = netdev_priv(netdev);
2525         struct atl1_hw *hw = &adapter->hw;
2526         unsigned int i;
2527         u32 *regbuf = p;
2528
2529         for (i = 0; i < ATL1_REG_COUNT; i++) {
2530                 /*
2531                  * This switch statement avoids reserved regions
2532                  * of register space.
2533                  */
2534                 switch (i) {
2535                 case 6 ... 9:
2536                 case 14:
2537                 case 29 ... 31:
2538                 case 34 ... 63:
2539                 case 75 ... 127:
2540                 case 136 ... 1023:
2541                 case 1027 ... 1087:
2542                 case 1091 ... 1151:
2543                 case 1194 ... 1195:
2544                 case 1200 ... 1201:
2545                 case 1206 ... 1213:
2546                 case 1216 ... 1279:
2547                 case 1290 ... 1311:
2548                 case 1323 ... 1343:
2549                 case 1358 ... 1359:
2550                 case 1368 ... 1375:
2551                 case 1378 ... 1383:
2552                 case 1388 ... 1391:
2553                 case 1393 ... 1395:
2554                 case 1402 ... 1403:
2555                 case 1410 ... 1471:
2556                 case 1522 ... 1535:
2557                         /* reserved region; don't read it */
2558                         regbuf[i] = 0;
2559                         break;
2560                 default:
2561                         /* unreserved region */
2562                         regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32)));
2563                 }
2564         }
2565 }
2566
2567 static void atl1_get_ringparam(struct net_device *netdev,
2568         struct ethtool_ringparam *ring)
2569 {
2570         struct atl1_adapter *adapter = netdev_priv(netdev);
2571         struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
2572         struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
2573
2574         ring->rx_max_pending = ATL1_MAX_RFD;
2575         ring->tx_max_pending = ATL1_MAX_TPD;
2576         ring->rx_mini_max_pending = 0;
2577         ring->rx_jumbo_max_pending = 0;
2578         ring->rx_pending = rxdr->count;
2579         ring->tx_pending = txdr->count;
2580         ring->rx_mini_pending = 0;
2581         ring->rx_jumbo_pending = 0;
2582 }
2583
2584 static int atl1_set_ringparam(struct net_device *netdev,
2585         struct ethtool_ringparam *ring)
2586 {
2587         struct atl1_adapter *adapter = netdev_priv(netdev);
2588         struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
2589         struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
2590         struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
2591
2592         struct atl1_tpd_ring tpd_old, tpd_new;
2593         struct atl1_rfd_ring rfd_old, rfd_new;
2594         struct atl1_rrd_ring rrd_old, rrd_new;
2595         struct atl1_ring_header rhdr_old, rhdr_new;
2596         int err;
2597
2598         tpd_old = adapter->tpd_ring;
2599         rfd_old = adapter->rfd_ring;
2600         rrd_old = adapter->rrd_ring;
2601         rhdr_old = adapter->ring_header;
2602
2603         if (netif_running(adapter->netdev))
2604                 atl1_down(adapter);
2605
2606         rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
2607         rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
2608                         rfdr->count;
2609         rfdr->count = (rfdr->count + 3) & ~3;
2610         rrdr->count = rfdr->count;
2611
2612         tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
2613         tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
2614                         tpdr->count;
2615         tpdr->count = (tpdr->count + 3) & ~3;
2616
2617         if (netif_running(adapter->netdev)) {
2618                 /* try to get new resources before deleting old */
2619                 err = atl1_setup_ring_resources(adapter);
2620                 if (err)
2621                         goto err_setup_ring;
2622
2623                 /*
2624                  * save the new, restore the old in order to free it,
2625                  * then restore the new back again
2626                  */
2627
2628                 rfd_new = adapter->rfd_ring;
2629                 rrd_new = adapter->rrd_ring;
2630                 tpd_new = adapter->tpd_ring;
2631                 rhdr_new = adapter->ring_header;
2632                 adapter->rfd_ring = rfd_old;
2633                 adapter->rrd_ring = rrd_old;
2634                 adapter->tpd_ring = tpd_old;
2635                 adapter->ring_header = rhdr_old;
2636                 atl1_free_ring_resources(adapter);
2637                 adapter->rfd_ring = rfd_new;
2638                 adapter->rrd_ring = rrd_new;
2639                 adapter->tpd_ring = tpd_new;
2640                 adapter->ring_header = rhdr_new;
2641
2642                 err = atl1_up(adapter);
2643                 if (err)
2644                         return err;
2645         }
2646         return 0;
2647
2648 err_setup_ring:
2649         adapter->rfd_ring = rfd_old;
2650         adapter->rrd_ring = rrd_old;
2651         adapter->tpd_ring = tpd_old;
2652         adapter->ring_header = rhdr_old;
2653         atl1_up(adapter);
2654         return err;
2655 }
2656
2657 static void atl1_get_pauseparam(struct net_device *netdev,
2658         struct ethtool_pauseparam *epause)
2659 {
2660         struct atl1_adapter *adapter = netdev_priv(netdev);
2661         struct atl1_hw *hw = &adapter->hw;
2662
2663         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2664             hw->media_type == MEDIA_TYPE_1000M_FULL) {
2665                 epause->autoneg = AUTONEG_ENABLE;
2666         } else {
2667                 epause->autoneg = AUTONEG_DISABLE;
2668         }
2669         epause->rx_pause = 1;
2670         epause->tx_pause = 1;
2671 }
2672
2673 static int atl1_set_pauseparam(struct net_device *netdev,
2674         struct ethtool_pauseparam *epause)
2675 {
2676         struct atl1_adapter *adapter = netdev_priv(netdev);
2677         struct atl1_hw *hw = &adapter->hw;
2678
2679         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2680             hw->media_type == MEDIA_TYPE_1000M_FULL) {
2681                 epause->autoneg = AUTONEG_ENABLE;
2682         } else {
2683                 epause->autoneg = AUTONEG_DISABLE;
2684         }
2685
2686         epause->rx_pause = 1;
2687         epause->tx_pause = 1;
2688
2689         return 0;
2690 }
2691
2692 /* FIXME: is this right? -- CHS */
2693 static u32 atl1_get_rx_csum(struct net_device *netdev)
2694 {
2695         return 1;
2696 }
2697
2698 static void atl1_get_strings(struct net_device *netdev, u32 stringset,
2699         u8 *data)
2700 {
2701         u8 *p = data;
2702         int i;
2703
2704         switch (stringset) {
2705         case ETH_SS_STATS:
2706                 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
2707                         memcpy(p, atl1_gstrings_stats[i].stat_string,
2708                                 ETH_GSTRING_LEN);
2709                         p += ETH_GSTRING_LEN;
2710                 }
2711                 break;
2712         }
2713 }
2714
2715 static int atl1_nway_reset(struct net_device *netdev)
2716 {
2717         struct atl1_adapter *adapter = netdev_priv(netdev);
2718         struct atl1_hw *hw = &adapter->hw;
2719
2720         if (netif_running(netdev)) {
2721                 u16 phy_data;
2722                 atl1_down(adapter);
2723
2724                 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
2725                         hw->media_type == MEDIA_TYPE_1000M_FULL) {
2726                         phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
2727                 } else {
2728                         switch (hw->media_type) {
2729                         case MEDIA_TYPE_100M_FULL:
2730                                 phy_data = MII_CR_FULL_DUPLEX |
2731                                         MII_CR_SPEED_100 | MII_CR_RESET;
2732                                 break;
2733                         case MEDIA_TYPE_100M_HALF:
2734                                 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
2735                                 break;
2736                         case MEDIA_TYPE_10M_FULL:
2737                                 phy_data = MII_CR_FULL_DUPLEX |
2738                                         MII_CR_SPEED_10 | MII_CR_RESET;
2739                                 break;
2740                         default:
2741                                 /* MEDIA_TYPE_10M_HALF */
2742                                 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
2743                         }
2744                 }
2745                 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
2746                 atl1_up(adapter);
2747         }
2748         return 0;
2749 }
2750
2751 const struct ethtool_ops atl1_ethtool_ops = {
2752         .get_settings           = atl1_get_settings,
2753         .set_settings           = atl1_set_settings,
2754         .get_drvinfo            = atl1_get_drvinfo,
2755         .get_wol                = atl1_get_wol,
2756         .set_wol                = atl1_set_wol,
2757         .get_regs_len           = atl1_get_regs_len,
2758         .get_regs               = atl1_get_regs,
2759         .get_ringparam          = atl1_get_ringparam,
2760         .set_ringparam          = atl1_set_ringparam,
2761         .get_pauseparam         = atl1_get_pauseparam,
2762         .set_pauseparam         = atl1_set_pauseparam,
2763         .get_rx_csum            = atl1_get_rx_csum,
2764         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
2765         .get_link               = ethtool_op_get_link,
2766         .set_sg                 = ethtool_op_set_sg,
2767         .get_strings            = atl1_get_strings,
2768         .nway_reset             = atl1_nway_reset,
2769         .get_ethtool_stats      = atl1_get_ethtool_stats,
2770         .get_sset_count         = atl1_get_sset_count,
2771         .set_tso                = ethtool_op_set_tso,
2772 };
2773
2774 /*
2775  * Reset the transmit and receive units; mask and clear all interrupts.
2776  * hw - Struct containing variables accessed by shared code
2777  * return : 0  or  idle status (if error)
2778  */
2779 s32 atl1_reset_hw(struct atl1_hw *hw)
2780 {
2781         struct pci_dev *pdev = hw->back->pdev;
2782         u32 icr;
2783         int i;
2784
2785         /*
2786          * Clear Interrupt mask to stop board from generating
2787          * interrupts & Clear any pending interrupt events
2788          */
2789         /*
2790          * iowrite32(0, hw->hw_addr + REG_IMR);
2791          * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
2792          */
2793
2794         /*
2795          * Issue Soft Reset to the MAC.  This will reset the chip's
2796          * transmit, receive, DMA.  It will not effect
2797          * the current PCI configuration.  The global reset bit is self-
2798          * clearing, and should clear within a microsecond.
2799          */
2800         iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
2801         ioread32(hw->hw_addr + REG_MASTER_CTRL);
2802
2803         iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
2804         ioread16(hw->hw_addr + REG_PHY_ENABLE);
2805
2806         /* delay about 1ms */
2807         msleep(1);
2808
2809         /* Wait at least 10ms for All module to be Idle */
2810         for (i = 0; i < 10; i++) {
2811                 icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
2812                 if (!icr)
2813                         break;
2814                 /* delay 1 ms */
2815                 msleep(1);
2816                 /* FIXME: still the right way to do this? */
2817                 cpu_relax();
2818         }
2819
2820         if (icr) {
2821                 dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
2822                 return icr;
2823         }
2824
2825         return 0;
2826 }
2827
2828 /* function about EEPROM
2829  *
2830  * check_eeprom_exist
2831  * return 0 if eeprom exist
2832  */
2833 static int atl1_check_eeprom_exist(struct atl1_hw *hw)
2834 {
2835         u32 value;
2836         value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2837         if (value & SPI_FLASH_CTRL_EN_VPD) {
2838                 value &= ~SPI_FLASH_CTRL_EN_VPD;
2839                 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2840         }
2841
2842         value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
2843         return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
2844 }
2845
2846 static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
2847 {
2848         int i;
2849         u32 control;
2850
2851         if (offset & 3)
2852                 /* address do not align */
2853                 return false;
2854
2855         iowrite32(0, hw->hw_addr + REG_VPD_DATA);
2856         control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
2857         iowrite32(control, hw->hw_addr + REG_VPD_CAP);
2858         ioread32(hw->hw_addr + REG_VPD_CAP);
2859
2860         for (i = 0; i < 10; i++) {
2861                 msleep(2);
2862                 control = ioread32(hw->hw_addr + REG_VPD_CAP);
2863                 if (control & VPD_CAP_VPD_FLAG)
2864                         break;
2865         }
2866         if (control & VPD_CAP_VPD_FLAG) {
2867                 *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
2868                 return true;
2869         }
2870         /* timeout */
2871         return false;
2872 }
2873
2874 /*
2875  * Reads the value from a PHY register
2876  * hw - Struct containing variables accessed by shared code
2877  * reg_addr - address of the PHY register to read
2878  */
2879 s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
2880 {
2881         u32 val;
2882         int i;
2883
2884         val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
2885                 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
2886                 MDIO_CLK_SEL_SHIFT;
2887         iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
2888         ioread32(hw->hw_addr + REG_MDIO_CTRL);
2889
2890         for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2891                 udelay(2);
2892                 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
2893                 if (!(val & (MDIO_START | MDIO_BUSY)))
2894                         break;
2895         }
2896         if (!(val & (MDIO_START | MDIO_BUSY))) {
2897                 *phy_data = (u16) val;
2898                 return 0;
2899         }
2900         return ATLX_ERR_PHY;
2901 }
2902
2903 #define CUSTOM_SPI_CS_SETUP     2
2904 #define CUSTOM_SPI_CLK_HI       2
2905 #define CUSTOM_SPI_CLK_LO       2
2906 #define CUSTOM_SPI_CS_HOLD      2
2907 #define CUSTOM_SPI_CS_HI        3
2908
2909 static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
2910 {
2911         int i;
2912         u32 value;
2913
2914         iowrite32(0, hw->hw_addr + REG_SPI_DATA);
2915         iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
2916
2917         value = SPI_FLASH_CTRL_WAIT_READY |
2918             (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
2919             SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
2920                                              SPI_FLASH_CTRL_CLK_HI_MASK) <<
2921             SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
2922                                            SPI_FLASH_CTRL_CLK_LO_MASK) <<
2923             SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
2924                                            SPI_FLASH_CTRL_CS_HOLD_MASK) <<
2925             SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
2926                                             SPI_FLASH_CTRL_CS_HI_MASK) <<
2927             SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
2928             SPI_FLASH_CTRL_INS_SHIFT;
2929
2930         iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2931
2932         value |= SPI_FLASH_CTRL_START;
2933         iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
2934         ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2935
2936         for (i = 0; i < 10; i++) {
2937                 msleep(1);
2938                 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
2939                 if (!(value & SPI_FLASH_CTRL_START))
2940                         break;
2941         }
2942
2943         if (value & SPI_FLASH_CTRL_START)
2944                 return false;
2945
2946         *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
2947
2948         return true;
2949 }
2950
2951 /*
2952  * get_permanent_address
2953  * return 0 if get valid mac address,
2954  */
2955 static int atl1_get_permanent_address(struct atl1_hw *hw)
2956 {
2957         u32 addr[2];
2958         u32 i, control;
2959         u16 reg;
2960         u8 eth_addr[ETH_ALEN];
2961         bool key_valid;
2962
2963         if (is_valid_ether_addr(hw->perm_mac_addr))
2964                 return 0;
2965
2966         /* init */
2967         addr[0] = addr[1] = 0;
2968
2969         if (!atl1_check_eeprom_exist(hw)) {
2970                 reg = 0;
2971                 key_valid = false;
2972                 /* Read out all EEPROM content */
2973                 i = 0;
2974                 while (1) {
2975                         if (atl1_read_eeprom(hw, i + 0x100, &control)) {
2976                                 if (key_valid) {
2977                                         if (reg == REG_MAC_STA_ADDR)
2978                                                 addr[0] = control;
2979                                         else if (reg == (REG_MAC_STA_ADDR + 4))
2980                                                 addr[1] = control;
2981                                         key_valid = false;
2982                                 } else if ((control & 0xff) == 0x5A) {
2983                                         key_valid = true;
2984                                         reg = (u16) (control >> 16);
2985                                 } else
2986                                         break;
2987                         } else
2988                                 /* read error */
2989                                 break;
2990                         i += 4;
2991                 }
2992
2993                 *(u32 *) &eth_addr[2] = swab32(addr[0]);
2994                 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
2995                 if (is_valid_ether_addr(eth_addr)) {
2996                         memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
2997                         return 0;
2998                 }
2999                 return 1;
3000         }
3001
3002         /* see if SPI FLAGS exist ? */
3003         addr[0] = addr[1] = 0;
3004         reg = 0;
3005         key_valid = false;
3006         i = 0;
3007         while (1) {
3008                 if (atl1_spi_read(hw, i + 0x1f000, &control)) {
3009                         if (key_valid) {
3010                                 if (reg == REG_MAC_STA_ADDR)
3011                                         addr[0] = control;
3012                                 else if (reg == (REG_MAC_STA_ADDR + 4))
3013                                         addr[1] = control;
3014                                 key_valid = false;
3015                         } else if ((control & 0xff) == 0x5A) {
3016                                 key_valid = true;
3017                                 reg = (u16) (control >> 16);
3018                         } else
3019                                 /* data end */
3020                                 break;
3021                 } else
3022                         /* read error */
3023                         break;
3024                 i += 4;
3025         }
3026
3027         *(u32 *) &eth_addr[2] = swab32(addr[0]);
3028         *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
3029         if (is_valid_ether_addr(eth_addr)) {
3030                 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3031                 return 0;
3032         }
3033
3034         /*
3035          * On some motherboards, the MAC address is written by the
3036          * BIOS directly to the MAC register during POST, and is
3037          * not stored in eeprom.  If all else thus far has failed
3038          * to fetch the permanent MAC address, try reading it directly.
3039          */
3040         addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
3041         addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
3042         *(u32 *) &eth_addr[2] = swab32(addr[0]);
3043         *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
3044         if (is_valid_ether_addr(eth_addr)) {
3045                 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
3046                 return 0;
3047         }
3048
3049         return 1;
3050 }
3051
3052 /*
3053  * Reads the adapter's MAC address from the EEPROM
3054  * hw - Struct containing variables accessed by shared code
3055  */
3056 s32 atl1_read_mac_addr(struct atl1_hw *hw)
3057 {
3058         u16 i;
3059
3060         if (atl1_get_permanent_address(hw))
3061                 random_ether_addr(hw->perm_mac_addr);
3062
3063         for (i = 0; i < ETH_ALEN; i++)
3064                 hw->mac_addr[i] = hw->perm_mac_addr[i];
3065         return 0;
3066 }
3067
3068 /*
3069  * Hashes an address to determine its location in the multicast table
3070  * hw - Struct containing variables accessed by shared code
3071  * mc_addr - the multicast address to hash
3072  *
3073  * atl1_hash_mc_addr
3074  *  purpose
3075  *      set hash value for a multicast address
3076  *      hash calcu processing :
3077  *          1. calcu 32bit CRC for multicast address
3078  *          2. reverse crc with MSB to LSB
3079  */
3080 u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
3081 {
3082         u32 crc32, value = 0;
3083         int i;
3084
3085         crc32 = ether_crc_le(6, mc_addr);
3086         for (i = 0; i < 32; i++)
3087                 value |= (((crc32 >> i) & 1) << (31 - i));
3088
3089         return value;
3090 }
3091
3092 /*
3093  * Sets the bit in the multicast table corresponding to the hash value.
3094  * hw - Struct containing variables accessed by shared code
3095  * hash_value - Multicast address hash value
3096  */
3097 void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
3098 {
3099         u32 hash_bit, hash_reg;
3100         u32 mta;
3101
3102         /*
3103          * The HASH Table  is a register array of 2 32-bit registers.
3104          * It is treated like an array of 64 bits.  We want to set
3105          * bit BitArray[hash_value]. So we figure out what register
3106          * the bit is in, read it, OR in the new bit, then write
3107          * back the new value.  The register is determined by the
3108          * upper 7 bits of the hash value and the bit within that
3109          * register are determined by the lower 5 bits of the value.
3110          */
3111         hash_reg = (hash_value >> 31) & 0x1;
3112         hash_bit = (hash_value >> 26) & 0x1F;
3113         mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3114         mta |= (1 << hash_bit);
3115         iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
3116 }
3117
3118 /*
3119  * Writes a value to a PHY register
3120  * hw - Struct containing variables accessed by shared code
3121  * reg_addr - address of the PHY register to write
3122  * data - data to write to the PHY
3123  */
3124 s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
3125 {
3126         int i;
3127         u32 val;
3128
3129         val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
3130             (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
3131             MDIO_SUP_PREAMBLE |
3132             MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
3133         iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
3134         ioread32(hw->hw_addr + REG_MDIO_CTRL);
3135
3136         for (i = 0; i < MDIO_WAIT_TIMES; i++) {
3137                 udelay(2);
3138                 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3139                 if (!(val & (MDIO_START | MDIO_BUSY)))
3140                         break;
3141         }
3142
3143         if (!(val & (MDIO_START | MDIO_BUSY)))
3144                 return 0;
3145
3146         return ATLX_ERR_PHY;
3147 }
3148
3149 /*
3150  * Make L001's PHY out of Power Saving State (bug)
3151  * hw - Struct containing variables accessed by shared code
3152  * when power on, L001's PHY always on Power saving State
3153  * (Gigabit Link forbidden)
3154  */
3155 static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
3156 {
3157         s32 ret;
3158         ret = atl1_write_phy_reg(hw, 29, 0x0029);
3159         if (ret)
3160                 return ret;
3161         return atl1_write_phy_reg(hw, 30, 0);
3162 }
3163
3164 /*
3165  *TODO: do something or get rid of this
3166  */
3167 s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
3168 {
3169 /*    s32 ret_val;
3170  *    u16 phy_data;
3171  */
3172
3173 /*
3174     ret_val = atl1_write_phy_reg(hw, ...);
3175     ret_val = atl1_write_phy_reg(hw, ...);
3176     ....
3177 */
3178         return 0;
3179 }
3180
3181 /*
3182  * Resets the PHY and make all config validate
3183  * hw - Struct containing variables accessed by shared code
3184  *
3185  * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
3186  */
3187 static s32 atl1_phy_reset(struct atl1_hw *hw)
3188 {
3189         struct pci_dev *pdev = hw->back->pdev;
3190         s32 ret_val;
3191         u16 phy_data;
3192
3193         if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3194             hw->media_type == MEDIA_TYPE_1000M_FULL)
3195                 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
3196         else {
3197                 switch (hw->media_type) {
3198                 case MEDIA_TYPE_100M_FULL:
3199                         phy_data =
3200                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
3201                             MII_CR_RESET;
3202                         break;
3203                 case MEDIA_TYPE_100M_HALF:
3204                         phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
3205                         break;
3206                 case MEDIA_TYPE_10M_FULL:
3207                         phy_data =
3208                             MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
3209                         break;
3210                 default:
3211                         /* MEDIA_TYPE_10M_HALF: */
3212                         phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
3213                         break;
3214                 }
3215         }
3216
3217         ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
3218         if (ret_val) {
3219                 u32 val;
3220                 int i;
3221                 /* pcie serdes link may be down! */
3222                 dev_dbg(&pdev->dev, "pcie phy link down\n");
3223
3224                 for (i = 0; i < 25; i++) {
3225                         msleep(1);
3226                         val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
3227                         if (!(val & (MDIO_START | MDIO_BUSY)))
3228                                 break;
3229                 }
3230
3231                 if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
3232                         dev_warn(&pdev->dev, "pcie link down at least 25ms\n");
3233                         return ret_val;
3234                 }
3235         }
3236         return 0;
3237 }
3238
3239 /*
3240  * Configures PHY autoneg and flow control advertisement settings
3241  * hw - Struct containing variables accessed by shared code
3242  */
3243 s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
3244 {
3245         s32 ret_val;
3246         s16 mii_autoneg_adv_reg;
3247         s16 mii_1000t_ctrl_reg;
3248
3249         /* Read the MII Auto-Neg Advertisement Register (Address 4). */
3250         mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
3251
3252         /* Read the MII 1000Base-T Control Register (Address 9). */
3253         mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
3254
3255         /*
3256          * First we clear all the 10/100 mb speed bits in the Auto-Neg
3257          * Advertisement Register (Address 4) and the 1000 mb speed bits in
3258          * the  1000Base-T Control Register (Address 9).
3259          */
3260         mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
3261         mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
3262
3263         /*
3264          * Need to parse media_type  and set up
3265          * the appropriate PHY registers.
3266          */
3267         switch (hw->media_type) {
3268         case MEDIA_TYPE_AUTO_SENSOR:
3269                 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
3270                                         MII_AR_10T_FD_CAPS |
3271                                         MII_AR_100TX_HD_CAPS |
3272                                         MII_AR_100TX_FD_CAPS);
3273                 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3274                 break;
3275
3276         case MEDIA_TYPE_1000M_FULL:
3277                 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
3278                 break;
3279
3280         case MEDIA_TYPE_100M_FULL:
3281                 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
3282                 break;
3283
3284         case MEDIA_TYPE_100M_HALF:
3285                 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
3286                 break;
3287
3288         case MEDIA_TYPE_10M_FULL:
3289                 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
3290                 break;
3291
3292         default:
3293                 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
3294                 break;
3295         }
3296
3297         /* flow control fixed to enable all */
3298         mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
3299
3300         hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
3301         hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
3302
3303         ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
3304         if (ret_val)
3305                 return ret_val;
3306
3307         ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
3308         if (ret_val)
3309                 return ret_val;
3310
3311         return 0;
3312 }
3313
3314 /*
3315  * Configures link settings.
3316  * hw - Struct containing variables accessed by shared code
3317  * Assumes the hardware has previously been reset and the
3318  * transmitter and receiver are not enabled.
3319  */
3320 static s32 atl1_setup_link(struct atl1_hw *hw)
3321 {
3322         struct pci_dev *pdev = hw->back->pdev;
3323         s32 ret_val;
3324
3325         /*
3326          * Options:
3327          *  PHY will advertise value(s) parsed from
3328          *  autoneg_advertised and fc
3329          *  no matter what autoneg is , We will not wait link result.
3330          */
3331         ret_val = atl1_phy_setup_autoneg_adv(hw);
3332         if (ret_val) {
3333                 dev_dbg(&pdev->dev, "error setting up autonegotiation\n");
3334                 return ret_val;
3335         }
3336         /* SW.Reset , En-Auto-Neg if needed */
3337         ret_val = atl1_phy_reset(hw);
3338         if (ret_val) {
3339                 dev_dbg(&pdev->dev, "error resetting phy\n");
3340                 return ret_val;
3341         }
3342         hw->phy_configured = true;
3343         return ret_val;
3344 }
3345
3346 static void atl1_init_flash_opcode(struct atl1_hw *hw)
3347 {
3348         if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
3349                 /* Atmel */
3350                 hw->flash_vendor = 0;
3351
3352         /* Init OP table */
3353         iowrite8(flash_table[hw->flash_vendor].cmd_program,
3354                 hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
3355         iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
3356                 hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
3357         iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
3358                 hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
3359         iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
3360                 hw->hw_addr + REG_SPI_FLASH_OP_RDID);
3361         iowrite8(flash_table[hw->flash_vendor].cmd_wren,
3362                 hw->hw_addr + REG_SPI_FLASH_OP_WREN);
3363         iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
3364                 hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
3365         iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
3366                 hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
3367         iowrite8(flash_table[hw->flash_vendor].cmd_read,
3368                 hw->hw_addr + REG_SPI_FLASH_OP_READ);
3369 }
3370
3371 /*
3372  * Performs basic configuration of the adapter.
3373  * hw - Struct containing variables accessed by shared code
3374  * Assumes that the controller has previously been reset and is in a
3375  * post-reset uninitialized state. Initializes multicast table,
3376  * and  Calls routines to setup link
3377  * Leaves the transmit and receive units disabled and uninitialized.
3378  */
3379 s32 atl1_init_hw(struct atl1_hw *hw)
3380 {
3381         u32 ret_val = 0;
3382
3383         /* Zero out the Multicast HASH table */
3384         iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
3385         /* clear the old settings from the multicast hash table */
3386         iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
3387
3388         atl1_init_flash_opcode(hw);
3389
3390         if (!hw->phy_configured) {
3391                 /* enable GPHY LinkChange Interrrupt */
3392                 ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
3393                 if (ret_val)
3394                         return ret_val;
3395                 /* make PHY out of power-saving state */
3396                 ret_val = atl1_phy_leave_power_saving(hw);
3397                 if (ret_val)
3398                         return ret_val;
3399                 /* Call a subroutine to configure the link */
3400                 ret_val = atl1_setup_link(hw);
3401         }
3402         return ret_val;
3403 }
3404
3405 /*
3406  * Detects the current speed and duplex settings of the hardware.
3407  * hw - Struct containing variables accessed by shared code
3408  * speed - Speed of the connection
3409  * duplex - Duplex setting of the connection
3410  */
3411 s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
3412 {
3413         struct pci_dev *pdev = hw->back->pdev;
3414         s32 ret_val;
3415         u16 phy_data;
3416
3417         /* ; --- Read   PHY Specific Status Register (17) */
3418         ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
3419         if (ret_val)
3420                 return ret_val;
3421
3422         if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
3423                 return ATLX_ERR_PHY_RES;
3424
3425         switch (phy_data & MII_ATLX_PSSR_SPEED) {
3426         case MII_ATLX_PSSR_1000MBS:
3427                 *speed = SPEED_1000;
3428                 break;
3429         case MII_ATLX_PSSR_100MBS:
3430                 *speed = SPEED_100;
3431                 break;
3432         case MII_ATLX_PSSR_10MBS:
3433                 *speed = SPEED_10;
3434                 break;
3435         default:
3436                 dev_dbg(&pdev->dev, "error getting speed\n");
3437                 return ATLX_ERR_PHY_SPEED;
3438                 break;
3439         }
3440         if (phy_data & MII_ATLX_PSSR_DPLX)
3441                 *duplex = FULL_DUPLEX;
3442         else
3443                 *duplex = HALF_DUPLEX;
3444
3445         return 0;
3446 }
3447
3448 void atl1_set_mac_addr(struct atl1_hw *hw)
3449 {
3450         u32 value;
3451         /*
3452          * 00-0B-6A-F6-00-DC
3453          * 0:  6AF600DC   1: 000B
3454          * low dword
3455          */
3456         value = (((u32) hw->mac_addr[2]) << 24) |
3457             (((u32) hw->mac_addr[3]) << 16) |
3458             (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
3459         iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
3460         /* high dword */
3461         value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
3462         iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
3463 }