]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/amd8111e.c
ba1be0b3a8c8b510fea533ba7cda0a6c7baef253
[linux-2.6-omap-h63xx.git] / drivers / net / amd8111e.c
1
2 /* Advanced  Micro Devices Inc. AMD8111E Linux Network Driver
3  * Copyright (C) 2004 Advanced Micro Devices
4  *
5  *
6  * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7  * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8  * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9  * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10  * Copyright 1993 United States Government as represented by the
11  *      Director, National Security Agency.[ pcnet32.c ]
12  * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
14  *
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  *
21  * This program is distributed in the hope that it will be useful,
22  * but WITHOUT ANY WARRANTY; without even the implied warranty of
23  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
24  * GNU General Public License for more details.
25  *
26  * You should have received a copy of the GNU General Public License
27  * along with this program; if not, write to the Free Software
28  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
29  * USA
30
31 Module Name:
32
33         amd8111e.c
34
35 Abstract:
36
37          AMD8111 based 10/100 Ethernet Controller Driver.
38
39 Environment:
40
41         Kernel Mode
42
43 Revision History:
44         3.0.0
45            Initial Revision.
46         3.0.1
47          1. Dynamic interrupt coalescing.
48          2. Removed prev_stats.
49          3. MII support.
50          4. Dynamic IPG support
51         3.0.2  05/29/2003
52          1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
53          2. Bug fix: Fixed VLAN support failure.
54          3. Bug fix: Fixed receive interrupt coalescing bug.
55          4. Dynamic IPG support is disabled by default.
56         3.0.3 06/05/2003
57          1. Bug fix: Fixed failure to close the interface if SMP is enabled.
58         3.0.4 12/09/2003
59          1. Added set_mac_address routine for bonding driver support.
60          2. Tested the driver for bonding support
61          3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
62             indicated to the h/w.
63          4. Modified amd8111e_rx() routine to receive all the received packets
64             in the first interrupt.
65          5. Bug fix: Corrected  rx_errors  reported in get_stats() function.
66         3.0.5 03/22/2004
67          1. Added NAPI support
68
69 */
70
71
72 #include <linux/module.h>
73 #include <linux/kernel.h>
74 #include <linux/types.h>
75 #include <linux/compiler.h>
76 #include <linux/slab.h>
77 #include <linux/delay.h>
78 #include <linux/init.h>
79 #include <linux/ioport.h>
80 #include <linux/pci.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/ethtool.h>
85 #include <linux/mii.h>
86 #include <linux/if_vlan.h>
87 #include <linux/ctype.h>
88 #include <linux/crc32.h>
89 #include <linux/dma-mapping.h>
90
91 #include <asm/system.h>
92 #include <asm/io.h>
93 #include <asm/byteorder.h>
94 #include <asm/uaccess.h>
95
96 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
97 #define AMD8111E_VLAN_TAG_USED 1
98 #else
99 #define AMD8111E_VLAN_TAG_USED 0
100 #endif
101
102 #include "amd8111e.h"
103 #define MODULE_NAME     "amd8111e"
104 #define MODULE_VERS     "3.0.7"
105 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
106 MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
107 MODULE_LICENSE("GPL");
108 MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
109 module_param_array(speed_duplex, int, NULL, 0);
110 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
111 module_param_array(coalesce, bool, NULL, 0);
112 MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
113 module_param_array(dynamic_ipg, bool, NULL, 0);
114 MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
115
116 static struct pci_device_id amd8111e_pci_tbl[] = {
117
118         { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
119          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
120         { 0, }
121
122 };
123 /*
124 This function will read the PHY registers.
125 */
126 static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
127 {
128         void __iomem *mmio = lp->mmio;
129         unsigned int reg_val;
130         unsigned int repeat= REPEAT_CNT;
131
132         reg_val = readl(mmio + PHY_ACCESS);
133         while (reg_val & PHY_CMD_ACTIVE)
134                 reg_val = readl( mmio + PHY_ACCESS );
135
136         writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
137                            ((reg & 0x1f) << 16),  mmio +PHY_ACCESS);
138         do{
139                 reg_val = readl(mmio + PHY_ACCESS);
140                 udelay(30);  /* It takes 30 us to read/write data */
141         } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
142         if(reg_val & PHY_RD_ERR)
143                 goto err_phy_read;
144
145         *val = reg_val & 0xffff;
146         return 0;
147 err_phy_read:
148         *val = 0;
149         return -EINVAL;
150
151 }
152
153 /*
154 This function will write into PHY registers.
155 */
156 static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
157 {
158         unsigned int repeat = REPEAT_CNT;
159         void __iomem *mmio = lp->mmio;
160         unsigned int reg_val;
161
162         reg_val = readl(mmio + PHY_ACCESS);
163         while (reg_val & PHY_CMD_ACTIVE)
164                 reg_val = readl( mmio + PHY_ACCESS );
165
166         writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
167                            ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
168
169         do{
170                 reg_val = readl(mmio + PHY_ACCESS);
171                 udelay(30);  /* It takes 30 us to read/write the data */
172         } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
173
174         if(reg_val & PHY_RD_ERR)
175                 goto err_phy_write;
176
177         return 0;
178
179 err_phy_write:
180         return -EINVAL;
181
182 }
183 /*
184 This is the mii register read function provided to the mii interface.
185 */
186 static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
187 {
188         struct amd8111e_priv* lp = netdev_priv(dev);
189         unsigned int reg_val;
190
191         amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
192         return reg_val;
193
194 }
195
196 /*
197 This is the mii register write function provided to the mii interface.
198 */
199 static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
200 {
201         struct amd8111e_priv* lp = netdev_priv(dev);
202
203         amd8111e_write_phy(lp, phy_id, reg_num, val);
204 }
205
206 /*
207 This function will set PHY speed. During initialization sets the original speed to 100 full.
208 */
209 static void amd8111e_set_ext_phy(struct net_device *dev)
210 {
211         struct amd8111e_priv *lp = netdev_priv(dev);
212         u32 bmcr,advert,tmp;
213
214         /* Determine mii register values to set the speed */
215         advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
216         tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
217         switch (lp->ext_phy_option){
218
219                 default:
220                 case SPEED_AUTONEG: /* advertise all values */
221                         tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
222                                 ADVERTISE_100HALF|ADVERTISE_100FULL) ;
223                         break;
224                 case SPEED10_HALF:
225                         tmp |= ADVERTISE_10HALF;
226                         break;
227                 case SPEED10_FULL:
228                         tmp |= ADVERTISE_10FULL;
229                         break;
230                 case SPEED100_HALF:
231                         tmp |= ADVERTISE_100HALF;
232                         break;
233                 case SPEED100_FULL:
234                         tmp |= ADVERTISE_100FULL;
235                         break;
236         }
237
238         if(advert != tmp)
239                 amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
240         /* Restart auto negotiation */
241         bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
242         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
243         amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
244
245 }
246
247 /*
248 This function will unmap skb->data space and will free
249 all transmit and receive skbuffs.
250 */
251 static int amd8111e_free_skbs(struct net_device *dev)
252 {
253         struct amd8111e_priv *lp = netdev_priv(dev);
254         struct sk_buff* rx_skbuff;
255         int i;
256
257         /* Freeing transmit skbs */
258         for(i = 0; i < NUM_TX_BUFFERS; i++){
259                 if(lp->tx_skbuff[i]){
260                         pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i],                                        lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
261                         dev_kfree_skb (lp->tx_skbuff[i]);
262                         lp->tx_skbuff[i] = NULL;
263                         lp->tx_dma_addr[i] = 0;
264                 }
265         }
266         /* Freeing previously allocated receive buffers */
267         for (i = 0; i < NUM_RX_BUFFERS; i++){
268                 rx_skbuff = lp->rx_skbuff[i];
269                 if(rx_skbuff != NULL){
270                         pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
271                                   lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
272                         dev_kfree_skb(lp->rx_skbuff[i]);
273                         lp->rx_skbuff[i] = NULL;
274                         lp->rx_dma_addr[i] = 0;
275                 }
276         }
277
278         return 0;
279 }
280
281 /*
282 This will set the receive buffer length corresponding to the mtu size of networkinterface.
283 */
284 static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
285 {
286         struct amd8111e_priv* lp = netdev_priv(dev);
287         unsigned int mtu = dev->mtu;
288
289         if (mtu > ETH_DATA_LEN){
290                 /* MTU + ethernet header + FCS
291                 + optional VLAN tag + skb reserve space 2 */
292
293                 lp->rx_buff_len = mtu + ETH_HLEN + 10;
294                 lp->options |= OPTION_JUMBO_ENABLE;
295         } else{
296                 lp->rx_buff_len = PKT_BUFF_SZ;
297                 lp->options &= ~OPTION_JUMBO_ENABLE;
298         }
299 }
300
301 /*
302 This function will free all the previously allocated buffers, determine new receive buffer length  and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
303  */
304 static int amd8111e_init_ring(struct net_device *dev)
305 {
306         struct amd8111e_priv *lp = netdev_priv(dev);
307         int i;
308
309         lp->rx_idx = lp->tx_idx = 0;
310         lp->tx_complete_idx = 0;
311         lp->tx_ring_idx = 0;
312
313
314         if(lp->opened)
315                 /* Free previously allocated transmit and receive skbs */
316                 amd8111e_free_skbs(dev);
317
318         else{
319                  /* allocate the tx and rx descriptors */
320                 if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
321                         sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
322                         &lp->tx_ring_dma_addr)) == NULL)
323
324                         goto err_no_mem;
325
326                 if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
327                         sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
328                         &lp->rx_ring_dma_addr)) == NULL)
329
330                         goto err_free_tx_ring;
331
332         }
333         /* Set new receive buff size */
334         amd8111e_set_rx_buff_len(dev);
335
336         /* Allocating receive  skbs */
337         for (i = 0; i < NUM_RX_BUFFERS; i++) {
338
339                 if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
340                                 /* Release previos allocated skbs */
341                                 for(--i; i >= 0 ;i--)
342                                         dev_kfree_skb(lp->rx_skbuff[i]);
343                                 goto err_free_rx_ring;
344                 }
345                 skb_reserve(lp->rx_skbuff[i],2);
346         }
347         /* Initilaizing receive descriptors */
348         for (i = 0; i < NUM_RX_BUFFERS; i++) {
349                 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
350                         lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
351
352                 lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
353                 lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
354                 wmb();
355                 lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
356         }
357
358         /* Initializing transmit descriptors */
359         for (i = 0; i < NUM_TX_RING_DR; i++) {
360                 lp->tx_ring[i].buff_phy_addr = 0;
361                 lp->tx_ring[i].tx_flags = 0;
362                 lp->tx_ring[i].buff_count = 0;
363         }
364
365         return 0;
366
367 err_free_rx_ring:
368
369         pci_free_consistent(lp->pci_dev,
370                 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
371                 lp->rx_ring_dma_addr);
372
373 err_free_tx_ring:
374
375         pci_free_consistent(lp->pci_dev,
376                  sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
377                  lp->tx_ring_dma_addr);
378
379 err_no_mem:
380         return -ENOMEM;
381 }
382 /* This function will set the interrupt coalescing according to the input arguments */
383 static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
384 {
385         unsigned int timeout;
386         unsigned int event_count;
387
388         struct amd8111e_priv *lp = netdev_priv(dev);
389         void __iomem *mmio = lp->mmio;
390         struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
391
392
393         switch(cmod)
394         {
395                 case RX_INTR_COAL :
396                         timeout = coal_conf->rx_timeout;
397                         event_count = coal_conf->rx_event_count;
398                         if( timeout > MAX_TIMEOUT ||
399                                         event_count > MAX_EVENT_COUNT )
400                         return -EINVAL;
401
402                         timeout = timeout * DELAY_TIMER_CONV;
403                         writel(VAL0|STINTEN, mmio+INTEN0);
404                         writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
405                                                         mmio+DLY_INT_A);
406                         break;
407
408                 case TX_INTR_COAL :
409                         timeout = coal_conf->tx_timeout;
410                         event_count = coal_conf->tx_event_count;
411                         if( timeout > MAX_TIMEOUT ||
412                                         event_count > MAX_EVENT_COUNT )
413                         return -EINVAL;
414
415
416                         timeout = timeout * DELAY_TIMER_CONV;
417                         writel(VAL0|STINTEN,mmio+INTEN0);
418                         writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
419                                                          mmio+DLY_INT_B);
420                         break;
421
422                 case DISABLE_COAL:
423                         writel(0,mmio+STVAL);
424                         writel(STINTEN, mmio+INTEN0);
425                         writel(0, mmio +DLY_INT_B);
426                         writel(0, mmio+DLY_INT_A);
427                         break;
428                  case ENABLE_COAL:
429                        /* Start the timer */
430                         writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /*  0.5 sec */
431                         writel(VAL0|STINTEN, mmio+INTEN0);
432                         break;
433                 default:
434                         break;
435
436    }
437         return 0;
438
439 }
440
441 /*
442 This function initializes the device registers  and starts the device.
443 */
444 static int amd8111e_restart(struct net_device *dev)
445 {
446         struct amd8111e_priv *lp = netdev_priv(dev);
447         void __iomem *mmio = lp->mmio;
448         int i,reg_val;
449
450         /* stop the chip */
451          writel(RUN, mmio + CMD0);
452
453         if(amd8111e_init_ring(dev))
454                 return -ENOMEM;
455
456         /* enable the port manager and set auto negotiation always */
457         writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
458         writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
459
460         amd8111e_set_ext_phy(dev);
461
462         /* set control registers */
463         reg_val = readl(mmio + CTRL1);
464         reg_val &= ~XMTSP_MASK;
465         writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
466
467         /* enable interrupt */
468         writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
469                 APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
470                 SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
471
472         writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
473
474         /* initialize tx and rx ring base addresses */
475         writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
476         writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
477
478         writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
479         writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
480
481         /* set default IPG to 96 */
482         writew((u32)DEFAULT_IPG,mmio+IPG);
483         writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
484
485         if(lp->options & OPTION_JUMBO_ENABLE){
486                 writel((u32)VAL2|JUMBO, mmio + CMD3);
487                 /* Reset REX_UFLO */
488                 writel( REX_UFLO, mmio + CMD2);
489                 /* Should not set REX_UFLO for jumbo frames */
490                 writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
491         }else{
492                 writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
493                 writel((u32)JUMBO, mmio + CMD3);
494         }
495
496 #if AMD8111E_VLAN_TAG_USED
497         writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
498 #endif
499         writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
500
501         /* Setting the MAC address to the device */
502         for(i = 0; i < ETH_ADDR_LEN; i++)
503                 writeb( dev->dev_addr[i], mmio + PADR + i );
504
505         /* Enable interrupt coalesce */
506         if(lp->options & OPTION_INTR_COAL_ENABLE){
507                 printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
508                                                                 dev->name);
509                 amd8111e_set_coalesce(dev,ENABLE_COAL);
510         }
511
512         /* set RUN bit to start the chip */
513         writel(VAL2 | RDMD0, mmio + CMD0);
514         writel(VAL0 | INTREN | RUN, mmio + CMD0);
515
516         /* To avoid PCI posting bug */
517         readl(mmio+CMD0);
518         return 0;
519 }
520 /*
521 This function clears necessary the device registers.
522 */
523 static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
524 {
525         unsigned int reg_val;
526         unsigned int logic_filter[2] ={0,};
527         void __iomem *mmio = lp->mmio;
528
529
530         /* stop the chip */
531         writel(RUN, mmio + CMD0);
532
533         /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
534         writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
535
536         /* Clear RCV_RING_BASE_ADDR */
537         writel(0, mmio + RCV_RING_BASE_ADDR0);
538
539         /* Clear XMT_RING_BASE_ADDR */
540         writel(0, mmio + XMT_RING_BASE_ADDR0);
541         writel(0, mmio + XMT_RING_BASE_ADDR1);
542         writel(0, mmio + XMT_RING_BASE_ADDR2);
543         writel(0, mmio + XMT_RING_BASE_ADDR3);
544
545         /* Clear CMD0  */
546         writel(CMD0_CLEAR,mmio + CMD0);
547
548         /* Clear CMD2 */
549         writel(CMD2_CLEAR, mmio +CMD2);
550
551         /* Clear CMD7 */
552         writel(CMD7_CLEAR , mmio + CMD7);
553
554         /* Clear DLY_INT_A and DLY_INT_B */
555         writel(0x0, mmio + DLY_INT_A);
556         writel(0x0, mmio + DLY_INT_B);
557
558         /* Clear FLOW_CONTROL */
559         writel(0x0, mmio + FLOW_CONTROL);
560
561         /* Clear INT0  write 1 to clear register */
562         reg_val = readl(mmio + INT0);
563         writel(reg_val, mmio + INT0);
564
565         /* Clear STVAL */
566         writel(0x0, mmio + STVAL);
567
568         /* Clear INTEN0 */
569         writel( INTEN0_CLEAR, mmio + INTEN0);
570
571         /* Clear LADRF */
572         writel(0x0 , mmio + LADRF);
573
574         /* Set SRAM_SIZE & SRAM_BOUNDARY registers  */
575         writel( 0x80010,mmio + SRAM_SIZE);
576
577         /* Clear RCV_RING0_LEN */
578         writel(0x0, mmio +  RCV_RING_LEN0);
579
580         /* Clear XMT_RING0/1/2/3_LEN */
581         writel(0x0, mmio +  XMT_RING_LEN0);
582         writel(0x0, mmio +  XMT_RING_LEN1);
583         writel(0x0, mmio +  XMT_RING_LEN2);
584         writel(0x0, mmio +  XMT_RING_LEN3);
585
586         /* Clear XMT_RING_LIMIT */
587         writel(0x0, mmio + XMT_RING_LIMIT);
588
589         /* Clear MIB */
590         writew(MIB_CLEAR, mmio + MIB_ADDR);
591
592         /* Clear LARF */
593         amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
594
595         /* SRAM_SIZE register */
596         reg_val = readl(mmio + SRAM_SIZE);
597
598         if(lp->options & OPTION_JUMBO_ENABLE)
599                 writel( VAL2|JUMBO, mmio + CMD3);
600 #if AMD8111E_VLAN_TAG_USED
601         writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
602 #endif
603         /* Set default value to CTRL1 Register */
604         writel(CTRL1_DEFAULT, mmio + CTRL1);
605
606         /* To avoid PCI posting bug */
607         readl(mmio + CMD2);
608
609 }
610
611 /*
612 This function disables the interrupt and clears all the pending
613 interrupts in INT0
614  */
615 static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
616 {
617         u32 intr0;
618
619         /* Disable interrupt */
620         writel(INTREN, lp->mmio + CMD0);
621
622         /* Clear INT0 */
623         intr0 = readl(lp->mmio + INT0);
624         writel(intr0, lp->mmio + INT0);
625
626         /* To avoid PCI posting bug */
627         readl(lp->mmio + INT0);
628
629 }
630
631 /*
632 This function stops the chip.
633 */
634 static void amd8111e_stop_chip(struct amd8111e_priv* lp)
635 {
636         writel(RUN, lp->mmio + CMD0);
637
638         /* To avoid PCI posting bug */
639         readl(lp->mmio + CMD0);
640 }
641
642 /*
643 This function frees the  transmiter and receiver descriptor rings.
644 */
645 static void amd8111e_free_ring(struct amd8111e_priv* lp)
646 {
647
648         /* Free transmit and receive skbs */
649         amd8111e_free_skbs(lp->amd8111e_net_dev);
650
651         /* Free transmit and receive descriptor rings */
652         if(lp->rx_ring){
653                 pci_free_consistent(lp->pci_dev,
654                         sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
655                         lp->rx_ring, lp->rx_ring_dma_addr);
656                 lp->rx_ring = NULL;
657         }
658
659         if(lp->tx_ring){
660                 pci_free_consistent(lp->pci_dev,
661                         sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
662                         lp->tx_ring, lp->tx_ring_dma_addr);
663
664                 lp->tx_ring = NULL;
665         }
666
667 }
668 #if AMD8111E_VLAN_TAG_USED
669 /*
670 This is the receive indication function for packets with vlan tag.
671 */
672 static int amd8111e_vlan_rx(struct amd8111e_priv *lp, struct sk_buff *skb, u16 vlan_tag)
673 {
674         return vlan_hwaccel_receive_skb(skb, lp->vlgrp,vlan_tag);
675 }
676 #endif
677
678 /*
679 This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
680 */
681 static int amd8111e_tx(struct net_device *dev)
682 {
683         struct amd8111e_priv* lp = netdev_priv(dev);
684         int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
685         int status;
686         /* Complete all the transmit packet */
687         while (lp->tx_complete_idx != lp->tx_idx){
688                 tx_index =  lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
689                 status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
690
691                 if(status & OWN_BIT)
692                         break;  /* It still hasn't been Txed */
693
694                 lp->tx_ring[tx_index].buff_phy_addr = 0;
695
696                 /* We must free the original skb */
697                 if (lp->tx_skbuff[tx_index]) {
698                         pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
699                                         lp->tx_skbuff[tx_index]->len,
700                                         PCI_DMA_TODEVICE);
701                         dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
702                         lp->tx_skbuff[tx_index] = NULL;
703                         lp->tx_dma_addr[tx_index] = 0;
704                 }
705                 lp->tx_complete_idx++;
706                 /*COAL update tx coalescing parameters */
707                 lp->coal_conf.tx_packets++;
708                 lp->coal_conf.tx_bytes +=
709                         le16_to_cpu(lp->tx_ring[tx_index].buff_count);
710
711                 if (netif_queue_stopped(dev) &&
712                         lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
713                         /* The ring is no longer full, clear tbusy. */
714                         /* lp->tx_full = 0; */
715                         netif_wake_queue (dev);
716                 }
717         }
718         return 0;
719 }
720
721 /* This function handles the driver receive operation in polling mode */
722 static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
723 {
724         struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
725         struct net_device *dev = lp->amd8111e_net_dev;
726         int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
727         void __iomem *mmio = lp->mmio;
728         struct sk_buff *skb,*new_skb;
729         int min_pkt_len, status;
730         unsigned int intr0;
731         int num_rx_pkt = 0;
732         short pkt_len;
733 #if AMD8111E_VLAN_TAG_USED
734         short vtag;
735 #endif
736         int rx_pkt_limit = budget;
737         unsigned long flags;
738
739         do{
740                 /* process receive packets until we use the quota*/
741                 /* If we own the next entry, it's a new packet. Send it up. */
742                 while(1) {
743                         status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
744                         if (status & OWN_BIT)
745                                 break;
746
747                         /*
748                          * There is a tricky error noted by John Murphy,
749                          * <murf@perftech.com> to Russ Nelson: Even with
750                          * full-sized * buffers it's possible for a
751                          * jabber packet to use two buffers, with only
752                          * the last correctly noting the error.
753                          */
754
755                         if(status & ERR_BIT) {
756                                 /* reseting flags */
757                                 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
758                                 goto err_next_pkt;
759                         }
760                         /* check for STP and ENP */
761                         if(!((status & STP_BIT) && (status & ENP_BIT))){
762                                 /* reseting flags */
763                                 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
764                                 goto err_next_pkt;
765                         }
766                         pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
767
768 #if AMD8111E_VLAN_TAG_USED
769                         vtag = status & TT_MASK;
770                         /*MAC will strip vlan tag*/
771                         if(lp->vlgrp != NULL && vtag !=0)
772                                 min_pkt_len =MIN_PKT_LEN - 4;
773                         else
774 #endif
775                                 min_pkt_len =MIN_PKT_LEN;
776
777                         if (pkt_len < min_pkt_len) {
778                                 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
779                                 lp->drv_rx_errors++;
780                                 goto err_next_pkt;
781                         }
782                         if(--rx_pkt_limit < 0)
783                                 goto rx_not_empty;
784                         if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
785                                 /* if allocation fail,
786                                    ignore that pkt and go to next one */
787                                 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
788                                 lp->drv_rx_errors++;
789                                 goto err_next_pkt;
790                         }
791
792                         skb_reserve(new_skb, 2);
793                         skb = lp->rx_skbuff[rx_index];
794                         pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
795                                          lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
796                         skb_put(skb, pkt_len);
797                         lp->rx_skbuff[rx_index] = new_skb;
798                         lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
799                                                                    new_skb->data,
800                                                                    lp->rx_buff_len-2,
801                                                                    PCI_DMA_FROMDEVICE);
802
803                         skb->protocol = eth_type_trans(skb, dev);
804
805 #if AMD8111E_VLAN_TAG_USED
806                         if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
807                                 amd8111e_vlan_rx(lp, skb,
808                                          le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info));
809                         } else
810 #endif
811                                 netif_receive_skb(skb);
812                         /*COAL update rx coalescing parameters*/
813                         lp->coal_conf.rx_packets++;
814                         lp->coal_conf.rx_bytes += pkt_len;
815                         num_rx_pkt++;
816                         dev->last_rx = jiffies;
817
818                 err_next_pkt:
819                         lp->rx_ring[rx_index].buff_phy_addr
820                                 = cpu_to_le32(lp->rx_dma_addr[rx_index]);
821                         lp->rx_ring[rx_index].buff_count =
822                                 cpu_to_le16(lp->rx_buff_len-2);
823                         wmb();
824                         lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
825                         rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
826                 }
827                 /* Check the interrupt status register for more packets in the
828                    mean time. Process them since we have not used up our quota.*/
829
830                 intr0 = readl(mmio + INT0);
831                 /*Ack receive packets */
832                 writel(intr0 & RINT0,mmio + INT0);
833
834         } while(intr0 & RINT0);
835
836         if (rx_pkt_limit > 0) {
837                 /* Receive descriptor is empty now */
838                 spin_lock_irqsave(&lp->lock, flags);
839                 __netif_rx_complete(dev, napi);
840                 writel(VAL0|RINTEN0, mmio + INTEN0);
841                 writel(VAL2 | RDMD0, mmio + CMD0);
842                 spin_unlock_irqrestore(&lp->lock, flags);
843         }
844
845 rx_not_empty:
846         return num_rx_pkt;
847 }
848
849 /*
850 This function will indicate the link status to the kernel.
851 */
852 static int amd8111e_link_change(struct net_device* dev)
853 {
854         struct amd8111e_priv *lp = netdev_priv(dev);
855         int status0,speed;
856
857         /* read the link change */
858         status0 = readl(lp->mmio + STAT0);
859
860         if(status0 & LINK_STATS){
861                 if(status0 & AUTONEG_COMPLETE)
862                         lp->link_config.autoneg = AUTONEG_ENABLE;
863                 else
864                         lp->link_config.autoneg = AUTONEG_DISABLE;
865
866                 if(status0 & FULL_DPLX)
867                         lp->link_config.duplex = DUPLEX_FULL;
868                 else
869                         lp->link_config.duplex = DUPLEX_HALF;
870                 speed = (status0 & SPEED_MASK) >> 7;
871                 if(speed == PHY_SPEED_10)
872                         lp->link_config.speed = SPEED_10;
873                 else if(speed == PHY_SPEED_100)
874                         lp->link_config.speed = SPEED_100;
875
876                 printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n",                        dev->name,
877                        (lp->link_config.speed == SPEED_100) ? "100": "10",
878                        (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
879                 netif_carrier_on(dev);
880         }
881         else{
882                 lp->link_config.speed = SPEED_INVALID;
883                 lp->link_config.duplex = DUPLEX_INVALID;
884                 lp->link_config.autoneg = AUTONEG_INVALID;
885                 printk(KERN_INFO "%s: Link is Down.\n",dev->name);
886                 netif_carrier_off(dev);
887         }
888
889         return 0;
890 }
891 /*
892 This function reads the mib counters.
893 */
894 static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
895 {
896         unsigned int  status;
897         unsigned  int data;
898         unsigned int repeat = REPEAT_CNT;
899
900         writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
901         do {
902                 status = readw(mmio + MIB_ADDR);
903                 udelay(2);      /* controller takes MAX 2 us to get mib data */
904         }
905         while (--repeat && (status & MIB_CMD_ACTIVE));
906
907         data = readl(mmio + MIB_DATA);
908         return data;
909 }
910
911 /*
912 This function reads the mib registers and returns the hardware statistics. It  updates previous internal driver statistics with new values.
913 */
914 static struct net_device_stats *amd8111e_get_stats(struct net_device * dev)
915 {
916         struct amd8111e_priv *lp = netdev_priv(dev);
917         void __iomem *mmio = lp->mmio;
918         unsigned long flags;
919         /* struct net_device_stats *prev_stats = &lp->prev_stats; */
920         struct net_device_stats* new_stats = &lp->stats;
921
922         if(!lp->opened)
923                 return &lp->stats;
924         spin_lock_irqsave (&lp->lock, flags);
925
926         /* stats.rx_packets */
927         new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
928                                 amd8111e_read_mib(mmio, rcv_multicast_pkts)+
929                                 amd8111e_read_mib(mmio, rcv_unicast_pkts);
930
931         /* stats.tx_packets */
932         new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
933
934         /*stats.rx_bytes */
935         new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
936
937         /* stats.tx_bytes */
938         new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
939
940         /* stats.rx_errors */
941         /* hw errors + errors driver reported */
942         new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
943                                 amd8111e_read_mib(mmio, rcv_fragments)+
944                                 amd8111e_read_mib(mmio, rcv_jabbers)+
945                                 amd8111e_read_mib(mmio, rcv_alignment_errors)+
946                                 amd8111e_read_mib(mmio, rcv_fcs_errors)+
947                                 amd8111e_read_mib(mmio, rcv_miss_pkts)+
948                                 lp->drv_rx_errors;
949
950         /* stats.tx_errors */
951         new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
952
953         /* stats.rx_dropped*/
954         new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
955
956         /* stats.tx_dropped*/
957         new_stats->tx_dropped = amd8111e_read_mib(mmio,  xmt_underrun_pkts);
958
959         /* stats.multicast*/
960         new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
961
962         /* stats.collisions*/
963         new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
964
965         /* stats.rx_length_errors*/
966         new_stats->rx_length_errors =
967                 amd8111e_read_mib(mmio, rcv_undersize_pkts)+
968                 amd8111e_read_mib(mmio, rcv_oversize_pkts);
969
970         /* stats.rx_over_errors*/
971         new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
972
973         /* stats.rx_crc_errors*/
974         new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
975
976         /* stats.rx_frame_errors*/
977         new_stats->rx_frame_errors =
978                 amd8111e_read_mib(mmio, rcv_alignment_errors);
979
980         /* stats.rx_fifo_errors */
981         new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
982
983         /* stats.rx_missed_errors */
984         new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
985
986         /* stats.tx_aborted_errors*/
987         new_stats->tx_aborted_errors =
988                 amd8111e_read_mib(mmio, xmt_excessive_collision);
989
990         /* stats.tx_carrier_errors*/
991         new_stats->tx_carrier_errors =
992                 amd8111e_read_mib(mmio, xmt_loss_carrier);
993
994         /* stats.tx_fifo_errors*/
995         new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
996
997         /* stats.tx_window_errors*/
998         new_stats->tx_window_errors =
999                 amd8111e_read_mib(mmio, xmt_late_collision);
1000
1001         /* Reset the mibs for collecting new statistics */
1002         /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
1003
1004         spin_unlock_irqrestore (&lp->lock, flags);
1005
1006         return new_stats;
1007 }
1008 /* This function recalculate the interrupt coalescing  mode on every interrupt
1009 according to the datarate and the packet rate.
1010 */
1011 static int amd8111e_calc_coalesce(struct net_device *dev)
1012 {
1013         struct amd8111e_priv *lp = netdev_priv(dev);
1014         struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
1015         int tx_pkt_rate;
1016         int rx_pkt_rate;
1017         int tx_data_rate;
1018         int rx_data_rate;
1019         int rx_pkt_size;
1020         int tx_pkt_size;
1021
1022         tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
1023         coal_conf->tx_prev_packets =  coal_conf->tx_packets;
1024
1025         tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
1026         coal_conf->tx_prev_bytes =  coal_conf->tx_bytes;
1027
1028         rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
1029         coal_conf->rx_prev_packets =  coal_conf->rx_packets;
1030
1031         rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
1032         coal_conf->rx_prev_bytes =  coal_conf->rx_bytes;
1033
1034         if(rx_pkt_rate < 800){
1035                 if(coal_conf->rx_coal_type != NO_COALESCE){
1036
1037                         coal_conf->rx_timeout = 0x0;
1038                         coal_conf->rx_event_count = 0;
1039                         amd8111e_set_coalesce(dev,RX_INTR_COAL);
1040                         coal_conf->rx_coal_type = NO_COALESCE;
1041                 }
1042         }
1043         else{
1044
1045                 rx_pkt_size = rx_data_rate/rx_pkt_rate;
1046                 if (rx_pkt_size < 128){
1047                         if(coal_conf->rx_coal_type != NO_COALESCE){
1048
1049                                 coal_conf->rx_timeout = 0;
1050                                 coal_conf->rx_event_count = 0;
1051                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1052                                 coal_conf->rx_coal_type = NO_COALESCE;
1053                         }
1054
1055                 }
1056                 else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1057
1058                         if(coal_conf->rx_coal_type !=  LOW_COALESCE){
1059                                 coal_conf->rx_timeout = 1;
1060                                 coal_conf->rx_event_count = 4;
1061                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1062                                 coal_conf->rx_coal_type = LOW_COALESCE;
1063                         }
1064                 }
1065                 else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1066
1067                         if(coal_conf->rx_coal_type !=  MEDIUM_COALESCE){
1068                                 coal_conf->rx_timeout = 1;
1069                                 coal_conf->rx_event_count = 4;
1070                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1071                                 coal_conf->rx_coal_type = MEDIUM_COALESCE;
1072                         }
1073
1074                 }
1075                 else if(rx_pkt_size >= 1024){
1076                         if(coal_conf->rx_coal_type !=  HIGH_COALESCE){
1077                                 coal_conf->rx_timeout = 2;
1078                                 coal_conf->rx_event_count = 3;
1079                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1080                                 coal_conf->rx_coal_type = HIGH_COALESCE;
1081                         }
1082                 }
1083         }
1084         /* NOW FOR TX INTR COALESC */
1085         if(tx_pkt_rate < 800){
1086                 if(coal_conf->tx_coal_type != NO_COALESCE){
1087
1088                         coal_conf->tx_timeout = 0x0;
1089                         coal_conf->tx_event_count = 0;
1090                         amd8111e_set_coalesce(dev,TX_INTR_COAL);
1091                         coal_conf->tx_coal_type = NO_COALESCE;
1092                 }
1093         }
1094         else{
1095
1096                 tx_pkt_size = tx_data_rate/tx_pkt_rate;
1097                 if (tx_pkt_size < 128){
1098
1099                         if(coal_conf->tx_coal_type != NO_COALESCE){
1100
1101                                 coal_conf->tx_timeout = 0;
1102                                 coal_conf->tx_event_count = 0;
1103                                 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1104                                 coal_conf->tx_coal_type = NO_COALESCE;
1105                         }
1106
1107                 }
1108                 else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1109
1110                         if(coal_conf->tx_coal_type !=  LOW_COALESCE){
1111                                 coal_conf->tx_timeout = 1;
1112                                 coal_conf->tx_event_count = 2;
1113                                 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1114                                 coal_conf->tx_coal_type = LOW_COALESCE;
1115
1116                         }
1117                 }
1118                 else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1119
1120                         if(coal_conf->tx_coal_type !=  MEDIUM_COALESCE){
1121                                 coal_conf->tx_timeout = 2;
1122                                 coal_conf->tx_event_count = 5;
1123                                 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1124                                 coal_conf->tx_coal_type = MEDIUM_COALESCE;
1125                         }
1126
1127                 }
1128                 else if(tx_pkt_size >= 1024){
1129                         if (tx_pkt_size >= 1024){
1130                                 if(coal_conf->tx_coal_type !=  HIGH_COALESCE){
1131                                         coal_conf->tx_timeout = 4;
1132                                         coal_conf->tx_event_count = 8;
1133                                         amd8111e_set_coalesce(dev,TX_INTR_COAL);
1134                                         coal_conf->tx_coal_type = HIGH_COALESCE;
1135                                 }
1136                         }
1137                 }
1138         }
1139         return 0;
1140
1141 }
1142 /*
1143 This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
1144 */
1145 static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1146 {
1147
1148         struct net_device * dev = (struct net_device *) dev_id;
1149         struct amd8111e_priv *lp = netdev_priv(dev);
1150         void __iomem *mmio = lp->mmio;
1151         unsigned int intr0, intren0;
1152         unsigned int handled = 1;
1153
1154         if(unlikely(dev == NULL))
1155                 return IRQ_NONE;
1156
1157         spin_lock(&lp->lock);
1158
1159         /* disabling interrupt */
1160         writel(INTREN, mmio + CMD0);
1161
1162         /* Read interrupt status */
1163         intr0 = readl(mmio + INT0);
1164         intren0 = readl(mmio + INTEN0);
1165
1166         /* Process all the INT event until INTR bit is clear. */
1167
1168         if (!(intr0 & INTR)){
1169                 handled = 0;
1170                 goto err_no_interrupt;
1171         }
1172
1173         /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1174         writel(intr0, mmio + INT0);
1175
1176         /* Check if Receive Interrupt has occurred. */
1177         if (intr0 & RINT0) {
1178                 if (netif_rx_schedule_prep(dev, &lp->napi)) {
1179                         /* Disable receive interupts */
1180                         writel(RINTEN0, mmio + INTEN0);
1181                         /* Schedule a polling routine */
1182                         __netif_rx_schedule(dev, &lp->napi);
1183                 } else if (intren0 & RINTEN0) {
1184                         printk("************Driver bug! \
1185                                 interrupt while in poll\n");
1186                         /* Fix by disable receive interrupts */
1187                         writel(RINTEN0, mmio + INTEN0);
1188                 }
1189         }
1190
1191         /* Check if  Transmit Interrupt has occurred. */
1192         if (intr0 & TINT0)
1193                 amd8111e_tx(dev);
1194
1195         /* Check if  Link Change Interrupt has occurred. */
1196         if (intr0 & LCINT)
1197                 amd8111e_link_change(dev);
1198
1199         /* Check if Hardware Timer Interrupt has occurred. */
1200         if (intr0 & STINT)
1201                 amd8111e_calc_coalesce(dev);
1202
1203 err_no_interrupt:
1204         writel( VAL0 | INTREN,mmio + CMD0);
1205
1206         spin_unlock(&lp->lock);
1207
1208         return IRQ_RETVAL(handled);
1209 }
1210
1211 #ifdef CONFIG_NET_POLL_CONTROLLER
1212 static void amd8111e_poll(struct net_device *dev)
1213 {
1214         unsigned long flags;
1215         local_irq_save(flags);
1216         amd8111e_interrupt(0, dev);
1217         local_irq_restore(flags);
1218 }
1219 #endif
1220
1221
1222 /*
1223 This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
1224 */
1225 static int amd8111e_close(struct net_device * dev)
1226 {
1227         struct amd8111e_priv *lp = netdev_priv(dev);
1228         netif_stop_queue(dev);
1229
1230         napi_disable(&lp->napi);
1231
1232         spin_lock_irq(&lp->lock);
1233
1234         amd8111e_disable_interrupt(lp);
1235         amd8111e_stop_chip(lp);
1236         amd8111e_free_ring(lp);
1237
1238         netif_carrier_off(lp->amd8111e_net_dev);
1239
1240         /* Delete ipg timer */
1241         if(lp->options & OPTION_DYN_IPG_ENABLE)
1242                 del_timer_sync(&lp->ipg_data.ipg_timer);
1243
1244         spin_unlock_irq(&lp->lock);
1245         free_irq(dev->irq, dev);
1246
1247         /* Update the statistics before closing */
1248         amd8111e_get_stats(dev);
1249         lp->opened = 0;
1250         return 0;
1251 }
1252 /* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
1253 */
1254 static int amd8111e_open(struct net_device * dev )
1255 {
1256         struct amd8111e_priv *lp = netdev_priv(dev);
1257
1258         if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
1259                                          dev->name, dev))
1260                 return -EAGAIN;
1261
1262         napi_enable(&lp->napi);
1263
1264         spin_lock_irq(&lp->lock);
1265
1266         amd8111e_init_hw_default(lp);
1267
1268         if(amd8111e_restart(dev)){
1269                 spin_unlock_irq(&lp->lock);
1270                 napi_disable(&lp->napi);
1271                 if (dev->irq)
1272                         free_irq(dev->irq, dev);
1273                 return -ENOMEM;
1274         }
1275         /* Start ipg timer */
1276         if(lp->options & OPTION_DYN_IPG_ENABLE){
1277                 add_timer(&lp->ipg_data.ipg_timer);
1278                 printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
1279         }
1280
1281         lp->opened = 1;
1282
1283         spin_unlock_irq(&lp->lock);
1284
1285         netif_start_queue(dev);
1286
1287         return 0;
1288 }
1289 /*
1290 This function checks if there is any transmit  descriptors available to queue more packet.
1291 */
1292 static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
1293 {
1294         int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1295         if (lp->tx_skbuff[tx_index])
1296                 return -1;
1297         else
1298                 return 0;
1299
1300 }
1301 /*
1302 This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
1303 */
1304
1305 static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev)
1306 {
1307         struct amd8111e_priv *lp = netdev_priv(dev);
1308         int tx_index;
1309         unsigned long flags;
1310
1311         spin_lock_irqsave(&lp->lock, flags);
1312
1313         tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1314
1315         lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1316
1317         lp->tx_skbuff[tx_index] = skb;
1318         lp->tx_ring[tx_index].tx_flags = 0;
1319
1320 #if AMD8111E_VLAN_TAG_USED
1321         if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){
1322                 lp->tx_ring[tx_index].tag_ctrl_cmd |=
1323                                 cpu_to_le16(TCC_VLAN_INSERT);
1324                 lp->tx_ring[tx_index].tag_ctrl_info =
1325                                 cpu_to_le16(vlan_tx_tag_get(skb));
1326
1327         }
1328 #endif
1329         lp->tx_dma_addr[tx_index] =
1330             pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1331         lp->tx_ring[tx_index].buff_phy_addr =
1332             cpu_to_le32(lp->tx_dma_addr[tx_index]);
1333
1334         /*  Set FCS and LTINT bits */
1335         wmb();
1336         lp->tx_ring[tx_index].tx_flags |=
1337             cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1338
1339         lp->tx_idx++;
1340
1341         /* Trigger an immediate send poll. */
1342         writel( VAL1 | TDMD0, lp->mmio + CMD0);
1343         writel( VAL2 | RDMD0,lp->mmio + CMD0);
1344
1345         dev->trans_start = jiffies;
1346
1347         if(amd8111e_tx_queue_avail(lp) < 0){
1348                 netif_stop_queue(dev);
1349         }
1350         spin_unlock_irqrestore(&lp->lock, flags);
1351         return 0;
1352 }
1353 /*
1354 This function returns all the memory mapped registers of the device.
1355 */
1356 static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
1357 {
1358         void __iomem *mmio = lp->mmio;
1359         /* Read only necessary registers */
1360         buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1361         buf[1] = readl(mmio + XMT_RING_LEN0);
1362         buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1363         buf[3] = readl(mmio + RCV_RING_LEN0);
1364         buf[4] = readl(mmio + CMD0);
1365         buf[5] = readl(mmio + CMD2);
1366         buf[6] = readl(mmio + CMD3);
1367         buf[7] = readl(mmio + CMD7);
1368         buf[8] = readl(mmio + INT0);
1369         buf[9] = readl(mmio + INTEN0);
1370         buf[10] = readl(mmio + LADRF);
1371         buf[11] = readl(mmio + LADRF+4);
1372         buf[12] = readl(mmio + STAT0);
1373 }
1374
1375
1376 /*
1377 This function sets promiscuos mode, all-multi mode or the multicast address
1378 list to the device.
1379 */
1380 static void amd8111e_set_multicast_list(struct net_device *dev)
1381 {
1382         struct dev_mc_list* mc_ptr;
1383         struct amd8111e_priv *lp = netdev_priv(dev);
1384         u32 mc_filter[2] ;
1385         int i,bit_num;
1386         if(dev->flags & IFF_PROMISC){
1387                 writel( VAL2 | PROM, lp->mmio + CMD2);
1388                 return;
1389         }
1390         else
1391                 writel( PROM, lp->mmio + CMD2);
1392         if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
1393                 /* get all multicast packet */
1394                 mc_filter[1] = mc_filter[0] = 0xffffffff;
1395                 lp->mc_list = dev->mc_list;
1396                 lp->options |= OPTION_MULTICAST_ENABLE;
1397                 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1398                 return;
1399         }
1400         if( dev->mc_count == 0 ){
1401                 /* get only own packets */
1402                 mc_filter[1] = mc_filter[0] = 0;
1403                 lp->mc_list = NULL;
1404                 lp->options &= ~OPTION_MULTICAST_ENABLE;
1405                 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1406                 /* disable promiscous mode */
1407                 writel(PROM, lp->mmio + CMD2);
1408                 return;
1409         }
1410         /* load all the multicast addresses in the logic filter */
1411         lp->options |= OPTION_MULTICAST_ENABLE;
1412         lp->mc_list = dev->mc_list;
1413         mc_filter[1] = mc_filter[0] = 0;
1414         for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
1415                      i++, mc_ptr = mc_ptr->next) {
1416                 bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
1417                 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1418         }
1419         amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
1420
1421         /* To eliminate PCI posting bug */
1422         readl(lp->mmio + CMD2);
1423
1424 }
1425
1426 static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
1427 {
1428         struct amd8111e_priv *lp = netdev_priv(dev);
1429         struct pci_dev *pci_dev = lp->pci_dev;
1430         strcpy (info->driver, MODULE_NAME);
1431         strcpy (info->version, MODULE_VERS);
1432         sprintf(info->fw_version,"%u",chip_version);
1433         strcpy (info->bus_info, pci_name(pci_dev));
1434 }
1435
1436 static int amd8111e_get_regs_len(struct net_device *dev)
1437 {
1438         return AMD8111E_REG_DUMP_LEN;
1439 }
1440
1441 static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
1442 {
1443         struct amd8111e_priv *lp = netdev_priv(dev);
1444         regs->version = 0;
1445         amd8111e_read_regs(lp, buf);
1446 }
1447
1448 static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1449 {
1450         struct amd8111e_priv *lp = netdev_priv(dev);
1451         spin_lock_irq(&lp->lock);
1452         mii_ethtool_gset(&lp->mii_if, ecmd);
1453         spin_unlock_irq(&lp->lock);
1454         return 0;
1455 }
1456
1457 static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1458 {
1459         struct amd8111e_priv *lp = netdev_priv(dev);
1460         int res;
1461         spin_lock_irq(&lp->lock);
1462         res = mii_ethtool_sset(&lp->mii_if, ecmd);
1463         spin_unlock_irq(&lp->lock);
1464         return res;
1465 }
1466
1467 static int amd8111e_nway_reset(struct net_device *dev)
1468 {
1469         struct amd8111e_priv *lp = netdev_priv(dev);
1470         return mii_nway_restart(&lp->mii_if);
1471 }
1472
1473 static u32 amd8111e_get_link(struct net_device *dev)
1474 {
1475         struct amd8111e_priv *lp = netdev_priv(dev);
1476         return mii_link_ok(&lp->mii_if);
1477 }
1478
1479 static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1480 {
1481         struct amd8111e_priv *lp = netdev_priv(dev);
1482         wol_info->supported = WAKE_MAGIC|WAKE_PHY;
1483         if (lp->options & OPTION_WOL_ENABLE)
1484                 wol_info->wolopts = WAKE_MAGIC;
1485 }
1486
1487 static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1488 {
1489         struct amd8111e_priv *lp = netdev_priv(dev);
1490         if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
1491                 return -EINVAL;
1492         spin_lock_irq(&lp->lock);
1493         if (wol_info->wolopts & WAKE_MAGIC)
1494                 lp->options |=
1495                         (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1496         else if(wol_info->wolopts & WAKE_PHY)
1497                 lp->options |=
1498                         (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1499         else
1500                 lp->options &= ~OPTION_WOL_ENABLE;
1501         spin_unlock_irq(&lp->lock);
1502         return 0;
1503 }
1504
1505 static const struct ethtool_ops ops = {
1506         .get_drvinfo = amd8111e_get_drvinfo,
1507         .get_regs_len = amd8111e_get_regs_len,
1508         .get_regs = amd8111e_get_regs,
1509         .get_settings = amd8111e_get_settings,
1510         .set_settings = amd8111e_set_settings,
1511         .nway_reset = amd8111e_nway_reset,
1512         .get_link = amd8111e_get_link,
1513         .get_wol = amd8111e_get_wol,
1514         .set_wol = amd8111e_set_wol,
1515 };
1516
1517 /*
1518 This function handles all the  ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
1519 */
1520
1521 static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1522 {
1523         struct mii_ioctl_data *data = if_mii(ifr);
1524         struct amd8111e_priv *lp = netdev_priv(dev);
1525         int err;
1526         u32 mii_regval;
1527
1528         if (!capable(CAP_NET_ADMIN))
1529                 return -EPERM;
1530
1531         switch(cmd) {
1532         case SIOCGMIIPHY:
1533                 data->phy_id = lp->ext_phy_addr;
1534
1535         /* fallthru */
1536         case SIOCGMIIREG:
1537
1538                 spin_lock_irq(&lp->lock);
1539                 err = amd8111e_read_phy(lp, data->phy_id,
1540                         data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1541                 spin_unlock_irq(&lp->lock);
1542
1543                 data->val_out = mii_regval;
1544                 return err;
1545
1546         case SIOCSMIIREG:
1547
1548                 spin_lock_irq(&lp->lock);
1549                 err = amd8111e_write_phy(lp, data->phy_id,
1550                         data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1551                 spin_unlock_irq(&lp->lock);
1552
1553                 return err;
1554
1555         default:
1556                 /* do nothing */
1557                 break;
1558         }
1559         return -EOPNOTSUPP;
1560 }
1561 static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1562 {
1563         struct amd8111e_priv *lp = netdev_priv(dev);
1564         int i;
1565         struct sockaddr *addr = p;
1566
1567         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1568         spin_lock_irq(&lp->lock);
1569         /* Setting the MAC address to the device */
1570         for(i = 0; i < ETH_ADDR_LEN; i++)
1571                 writeb( dev->dev_addr[i], lp->mmio + PADR + i );
1572
1573         spin_unlock_irq(&lp->lock);
1574
1575         return 0;
1576 }
1577
1578 /*
1579 This function changes the mtu of the device. It restarts the device  to initialize the descriptor with new receive buffers.
1580 */
1581 static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1582 {
1583         struct amd8111e_priv *lp = netdev_priv(dev);
1584         int err;
1585
1586         if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1587                 return -EINVAL;
1588
1589         if (!netif_running(dev)) {
1590                 /* new_mtu will be used
1591                    when device starts netxt time */
1592                 dev->mtu = new_mtu;
1593                 return 0;
1594         }
1595
1596         spin_lock_irq(&lp->lock);
1597
1598         /* stop the chip */
1599         writel(RUN, lp->mmio + CMD0);
1600
1601         dev->mtu = new_mtu;
1602
1603         err = amd8111e_restart(dev);
1604         spin_unlock_irq(&lp->lock);
1605         if(!err)
1606                 netif_start_queue(dev);
1607         return err;
1608 }
1609
1610 #if AMD8111E_VLAN_TAG_USED
1611 static void amd8111e_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1612 {
1613         struct  amd8111e_priv *lp = netdev_priv(dev);
1614         spin_lock_irq(&lp->lock);
1615         lp->vlgrp = grp;
1616         spin_unlock_irq(&lp->lock);
1617 }
1618 #endif
1619
1620 static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1621 {
1622         writel( VAL1|MPPLBA, lp->mmio + CMD3);
1623         writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1624
1625         /* To eliminate PCI posting bug */
1626         readl(lp->mmio + CMD7);
1627         return 0;
1628 }
1629
1630 static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1631 {
1632
1633         /* Adapter is already stoped/suspended/interrupt-disabled */
1634         writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1635
1636         /* To eliminate PCI posting bug */
1637         readl(lp->mmio + CMD7);
1638         return 0;
1639 }
1640 /* This function is called when a packet transmission fails to complete within a  resonable period, on the assumption that an interrupts have been failed or the  interface is locked up. This function will reinitialize the hardware */
1641
1642 static void amd8111e_tx_timeout(struct net_device *dev)
1643 {
1644         struct amd8111e_priv* lp = netdev_priv(dev);
1645         int err;
1646
1647         printk(KERN_ERR "%s: transmit timed out, resetting\n",
1648                                                       dev->name);
1649         spin_lock_irq(&lp->lock);
1650         err = amd8111e_restart(dev);
1651         spin_unlock_irq(&lp->lock);
1652         if(!err)
1653                 netif_wake_queue(dev);
1654 }
1655 static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state)
1656 {
1657         struct net_device *dev = pci_get_drvdata(pci_dev);
1658         struct amd8111e_priv *lp = netdev_priv(dev);
1659
1660         if (!netif_running(dev))
1661                 return 0;
1662
1663         /* disable the interrupt */
1664         spin_lock_irq(&lp->lock);
1665         amd8111e_disable_interrupt(lp);
1666         spin_unlock_irq(&lp->lock);
1667
1668         netif_device_detach(dev);
1669
1670         /* stop chip */
1671         spin_lock_irq(&lp->lock);
1672         if(lp->options & OPTION_DYN_IPG_ENABLE)
1673                 del_timer_sync(&lp->ipg_data.ipg_timer);
1674         amd8111e_stop_chip(lp);
1675         spin_unlock_irq(&lp->lock);
1676
1677         if(lp->options & OPTION_WOL_ENABLE){
1678                  /* enable wol */
1679                 if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1680                         amd8111e_enable_magicpkt(lp);
1681                 if(lp->options & OPTION_WAKE_PHY_ENABLE)
1682                         amd8111e_enable_link_change(lp);
1683
1684                 pci_enable_wake(pci_dev, PCI_D3hot, 1);
1685                 pci_enable_wake(pci_dev, PCI_D3cold, 1);
1686
1687         }
1688         else{
1689                 pci_enable_wake(pci_dev, PCI_D3hot, 0);
1690                 pci_enable_wake(pci_dev, PCI_D3cold, 0);
1691         }
1692
1693         pci_save_state(pci_dev);
1694         pci_set_power_state(pci_dev, PCI_D3hot);
1695
1696         return 0;
1697 }
1698 static int amd8111e_resume(struct pci_dev *pci_dev)
1699 {
1700         struct net_device *dev = pci_get_drvdata(pci_dev);
1701         struct amd8111e_priv *lp = netdev_priv(dev);
1702
1703         if (!netif_running(dev))
1704                 return 0;
1705
1706         pci_set_power_state(pci_dev, PCI_D0);
1707         pci_restore_state(pci_dev);
1708
1709         pci_enable_wake(pci_dev, PCI_D3hot, 0);
1710         pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */
1711
1712         netif_device_attach(dev);
1713
1714         spin_lock_irq(&lp->lock);
1715         amd8111e_restart(dev);
1716         /* Restart ipg timer */
1717         if(lp->options & OPTION_DYN_IPG_ENABLE)
1718                 mod_timer(&lp->ipg_data.ipg_timer,
1719                                 jiffies + IPG_CONVERGE_JIFFIES);
1720         spin_unlock_irq(&lp->lock);
1721
1722         return 0;
1723 }
1724
1725
1726 static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
1727 {
1728         struct net_device *dev = pci_get_drvdata(pdev);
1729         if (dev) {
1730                 unregister_netdev(dev);
1731                 iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio);
1732                 free_netdev(dev);
1733                 pci_release_regions(pdev);
1734                 pci_disable_device(pdev);
1735                 pci_set_drvdata(pdev, NULL);
1736         }
1737 }
1738 static void amd8111e_config_ipg(struct net_device* dev)
1739 {
1740         struct amd8111e_priv *lp = netdev_priv(dev);
1741         struct ipg_info* ipg_data = &lp->ipg_data;
1742         void __iomem *mmio = lp->mmio;
1743         unsigned int prev_col_cnt = ipg_data->col_cnt;
1744         unsigned int total_col_cnt;
1745         unsigned int tmp_ipg;
1746
1747         if(lp->link_config.duplex == DUPLEX_FULL){
1748                 ipg_data->ipg = DEFAULT_IPG;
1749                 return;
1750         }
1751
1752         if(ipg_data->ipg_state == SSTATE){
1753
1754                 if(ipg_data->timer_tick == IPG_STABLE_TIME){
1755
1756                         ipg_data->timer_tick = 0;
1757                         ipg_data->ipg = MIN_IPG - IPG_STEP;
1758                         ipg_data->current_ipg = MIN_IPG;
1759                         ipg_data->diff_col_cnt = 0xFFFFFFFF;
1760                         ipg_data->ipg_state = CSTATE;
1761                 }
1762                 else
1763                         ipg_data->timer_tick++;
1764         }
1765
1766         if(ipg_data->ipg_state == CSTATE){
1767
1768                 /* Get the current collision count */
1769
1770                 total_col_cnt = ipg_data->col_cnt =
1771                                 amd8111e_read_mib(mmio, xmt_collisions);
1772
1773                 if ((total_col_cnt - prev_col_cnt) <
1774                                 (ipg_data->diff_col_cnt)){
1775
1776                         ipg_data->diff_col_cnt =
1777                                 total_col_cnt - prev_col_cnt ;
1778
1779                         ipg_data->ipg = ipg_data->current_ipg;
1780                 }
1781
1782                 ipg_data->current_ipg += IPG_STEP;
1783
1784                 if (ipg_data->current_ipg <= MAX_IPG)
1785                         tmp_ipg = ipg_data->current_ipg;
1786                 else{
1787                         tmp_ipg = ipg_data->ipg;
1788                         ipg_data->ipg_state = SSTATE;
1789                 }
1790                 writew((u32)tmp_ipg, mmio + IPG);
1791                 writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1792         }
1793          mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1794         return;
1795
1796 }
1797
1798 static void __devinit amd8111e_probe_ext_phy(struct net_device* dev)
1799 {
1800         struct amd8111e_priv *lp = netdev_priv(dev);
1801         int i;
1802
1803         for (i = 0x1e; i >= 0; i--) {
1804                 u32 id1, id2;
1805
1806                 if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
1807                         continue;
1808                 if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
1809                         continue;
1810                 lp->ext_phy_id = (id1 << 16) | id2;
1811                 lp->ext_phy_addr = i;
1812                 return;
1813         }
1814         lp->ext_phy_id = 0;
1815         lp->ext_phy_addr = 1;
1816 }
1817
1818 static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1819                                   const struct pci_device_id *ent)
1820 {
1821         int err,i,pm_cap;
1822         unsigned long reg_addr,reg_len;
1823         struct amd8111e_priv* lp;
1824         struct net_device* dev;
1825         DECLARE_MAC_BUF(mac);
1826
1827         err = pci_enable_device(pdev);
1828         if(err){
1829                 printk(KERN_ERR "amd8111e: Cannot enable new PCI device, "
1830                         "exiting.\n");
1831                 return err;
1832         }
1833
1834         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1835                 printk(KERN_ERR "amd8111e: Cannot find PCI base address, "
1836                        "exiting.\n");
1837                 err = -ENODEV;
1838                 goto err_disable_pdev;
1839         }
1840
1841         err = pci_request_regions(pdev, MODULE_NAME);
1842         if(err){
1843                 printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
1844                        "exiting.\n");
1845                 goto err_disable_pdev;
1846         }
1847
1848         pci_set_master(pdev);
1849
1850         /* Find power-management capability. */
1851         if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
1852                 printk(KERN_ERR "amd8111e: No Power Management capability, "
1853                        "exiting.\n");
1854                 goto err_free_reg;
1855         }
1856
1857         /* Initialize DMA */
1858         if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) < 0) {
1859                 printk(KERN_ERR "amd8111e: DMA not supported,"
1860                         "exiting.\n");
1861                 goto err_free_reg;
1862         }
1863
1864         reg_addr = pci_resource_start(pdev, 0);
1865         reg_len = pci_resource_len(pdev, 0);
1866
1867         dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1868         if (!dev) {
1869                 printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
1870                 err = -ENOMEM;
1871                 goto err_free_reg;
1872         }
1873
1874         SET_NETDEV_DEV(dev, &pdev->dev);
1875
1876 #if AMD8111E_VLAN_TAG_USED
1877         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
1878         dev->vlan_rx_register =amd8111e_vlan_rx_register;
1879 #endif
1880
1881         lp = netdev_priv(dev);
1882         lp->pci_dev = pdev;
1883         lp->amd8111e_net_dev = dev;
1884         lp->pm_cap = pm_cap;
1885
1886         spin_lock_init(&lp->lock);
1887
1888         lp->mmio = ioremap(reg_addr, reg_len);
1889         if (!lp->mmio) {
1890                 printk(KERN_ERR "amd8111e: Cannot map device registers, "
1891                        "exiting\n");
1892                 err = -ENOMEM;
1893                 goto err_free_dev;
1894         }
1895
1896         /* Initializing MAC address */
1897         for(i = 0; i < ETH_ADDR_LEN; i++)
1898                 dev->dev_addr[i] = readb(lp->mmio + PADR + i);
1899
1900         /* Setting user defined parametrs */
1901         lp->ext_phy_option = speed_duplex[card_idx];
1902         if(coalesce[card_idx])
1903                 lp->options |= OPTION_INTR_COAL_ENABLE;
1904         if(dynamic_ipg[card_idx++])
1905                 lp->options |= OPTION_DYN_IPG_ENABLE;
1906
1907         /* Initialize driver entry points */
1908         dev->open = amd8111e_open;
1909         dev->hard_start_xmit = amd8111e_start_xmit;
1910         dev->stop = amd8111e_close;
1911         dev->get_stats = amd8111e_get_stats;
1912         dev->set_multicast_list = amd8111e_set_multicast_list;
1913         dev->set_mac_address = amd8111e_set_mac_address;
1914         dev->do_ioctl = amd8111e_ioctl;
1915         dev->change_mtu = amd8111e_change_mtu;
1916         SET_ETHTOOL_OPS(dev, &ops);
1917         dev->irq =pdev->irq;
1918         dev->tx_timeout = amd8111e_tx_timeout;
1919         dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1920         netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
1921 #ifdef CONFIG_NET_POLL_CONTROLLER
1922         dev->poll_controller = amd8111e_poll;
1923 #endif
1924
1925 #if AMD8111E_VLAN_TAG_USED
1926         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1927         dev->vlan_rx_register =amd8111e_vlan_rx_register;
1928 #endif
1929         /* Probe the external PHY */
1930         amd8111e_probe_ext_phy(dev);
1931
1932         /* setting mii default values */
1933         lp->mii_if.dev = dev;
1934         lp->mii_if.mdio_read = amd8111e_mdio_read;
1935         lp->mii_if.mdio_write = amd8111e_mdio_write;
1936         lp->mii_if.phy_id = lp->ext_phy_addr;
1937
1938         /* Set receive buffer length and set jumbo option*/
1939         amd8111e_set_rx_buff_len(dev);
1940
1941
1942         err = register_netdev(dev);
1943         if (err) {
1944                 printk(KERN_ERR "amd8111e: Cannot register net device, "
1945                        "exiting.\n");
1946                 goto err_iounmap;
1947         }
1948
1949         pci_set_drvdata(pdev, dev);
1950
1951         /* Initialize software ipg timer */
1952         if(lp->options & OPTION_DYN_IPG_ENABLE){
1953                 init_timer(&lp->ipg_data.ipg_timer);
1954                 lp->ipg_data.ipg_timer.data = (unsigned long) dev;
1955                 lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
1956                 lp->ipg_data.ipg_timer.expires = jiffies +
1957                                                  IPG_CONVERGE_JIFFIES;
1958                 lp->ipg_data.ipg = DEFAULT_IPG;
1959                 lp->ipg_data.ipg_state = CSTATE;
1960         };
1961
1962         /*  display driver and device information */
1963
1964         chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
1965         printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n",
1966                dev->name,MODULE_VERS);
1967         printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %s\n",
1968                dev->name, chip_version, print_mac(mac, dev->dev_addr));
1969         if (lp->ext_phy_id)
1970                 printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
1971                        dev->name, lp->ext_phy_id, lp->ext_phy_addr);
1972         else
1973                 printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n",
1974                        dev->name);
1975         return 0;
1976 err_iounmap:
1977         iounmap(lp->mmio);
1978
1979 err_free_dev:
1980         free_netdev(dev);
1981
1982 err_free_reg:
1983         pci_release_regions(pdev);
1984
1985 err_disable_pdev:
1986         pci_disable_device(pdev);
1987         pci_set_drvdata(pdev, NULL);
1988         return err;
1989
1990 }
1991
1992 static struct pci_driver amd8111e_driver = {
1993         .name           = MODULE_NAME,
1994         .id_table       = amd8111e_pci_tbl,
1995         .probe          = amd8111e_probe_one,
1996         .remove         = __devexit_p(amd8111e_remove_one),
1997         .suspend        = amd8111e_suspend,
1998         .resume         = amd8111e_resume
1999 };
2000
2001 static int __init amd8111e_init(void)
2002 {
2003         return pci_register_driver(&amd8111e_driver);
2004 }
2005
2006 static void __exit amd8111e_cleanup(void)
2007 {
2008         pci_unregister_driver(&amd8111e_driver);
2009 }
2010
2011 module_init(amd8111e_init);
2012 module_exit(amd8111e_cleanup);