1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
19 See the file COPYING in this distribution for more information.
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
28 * Test Tx checksumming thoroughly
29 * Implement dev->tx_timeout
32 * Complete reset on PciErr
33 * Consider Rx interrupt mitigation using TimerIntr
34 * Investigate using skb->priority with h/w VLAN priority
35 * Investigate using High Priority Tx Queue with skb->priority
36 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
37 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
38 * Implement Tx software interrupt mitigation via
40 * The real minimum of CP_MIN_MTU is 4 bytes. However,
41 for this to be supported, one must(?) turn on packet padding.
42 * Support external MII transceivers (patch available)
45 * TX checksumming is considered experimental. It is off by
46 default, use ethtool to turn it on.
50 #define DRV_NAME "8139cp"
51 #define DRV_VERSION "1.2"
52 #define DRV_RELDATE "Mar 22, 2004"
55 #include <linux/module.h>
56 #include <linux/moduleparam.h>
57 #include <linux/kernel.h>
58 #include <linux/compiler.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/init.h>
62 #include <linux/pci.h>
63 #include <linux/dma-mapping.h>
64 #include <linux/delay.h>
65 #include <linux/ethtool.h>
66 #include <linux/mii.h>
67 #include <linux/if_vlan.h>
68 #include <linux/crc32.h>
71 #include <linux/tcp.h>
72 #include <linux/udp.h>
73 #include <linux/cache.h>
76 #include <asm/uaccess.h>
78 /* VLAN tagging feature enable/disable */
79 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
80 #define CP_VLAN_TAG_USED 1
81 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
82 do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
84 #define CP_VLAN_TAG_USED 0
85 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
86 do { (tx_desc)->opts2 = 0; } while (0)
89 /* These identify the driver base version and may not be removed. */
90 static char version[] =
91 KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
93 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
94 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
95 MODULE_VERSION(DRV_VERSION);
96 MODULE_LICENSE("GPL");
98 static int debug = -1;
99 module_param(debug, int, 0);
100 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
102 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
103 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
104 static int multicast_filter_limit = 32;
105 module_param(multicast_filter_limit, int, 0);
106 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
108 #define PFX DRV_NAME ": "
112 #define TRUE (!FALSE)
115 #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
118 #define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
119 #define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
120 #define CP_REGS_SIZE (0xff + 1)
121 #define CP_REGS_VER 1 /* version 1 */
122 #define CP_RX_RING_SIZE 64
123 #define CP_TX_RING_SIZE 64
124 #define CP_RING_BYTES \
125 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
126 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
128 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
129 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
130 #define TX_BUFFS_AVAIL(CP) \
131 (((CP)->tx_tail <= (CP)->tx_head) ? \
132 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
133 (CP)->tx_tail - (CP)->tx_head - 1)
135 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
137 #define CP_INTERNAL_PHY 32
139 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
140 #define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
141 #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
142 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
143 #define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
145 /* Time in jiffies before concluding the transmitter is hung. */
146 #define TX_TIMEOUT (6*HZ)
148 /* hardware minimum and maximum for a single frame's data payload */
149 #define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
150 #define CP_MAX_MTU 4096
153 /* NIC register offsets */
154 MAC0 = 0x00, /* Ethernet hardware address. */
155 MAR0 = 0x08, /* Multicast filter. */
156 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
157 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
158 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
159 Cmd = 0x37, /* Command register */
160 IntrMask = 0x3C, /* Interrupt mask */
161 IntrStatus = 0x3E, /* Interrupt status */
162 TxConfig = 0x40, /* Tx configuration */
163 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
164 RxConfig = 0x44, /* Rx configuration */
165 RxMissed = 0x4C, /* 24 bits valid, write clears */
166 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
167 Config1 = 0x52, /* Config1 */
168 Config3 = 0x59, /* Config3 */
169 Config4 = 0x5A, /* Config4 */
170 MultiIntr = 0x5C, /* Multiple interrupt select */
171 BasicModeCtrl = 0x62, /* MII BMCR */
172 BasicModeStatus = 0x64, /* MII BMSR */
173 NWayAdvert = 0x66, /* MII ADVERTISE */
174 NWayLPAR = 0x68, /* MII LPA */
175 NWayExpansion = 0x6A, /* MII Expansion */
176 Config5 = 0xD8, /* Config5 */
177 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
178 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
179 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
180 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
181 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
182 TxThresh = 0xEC, /* Early Tx threshold */
183 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
184 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
186 /* Tx and Rx status descriptors */
187 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
188 RingEnd = (1 << 30), /* End of descriptor ring */
189 FirstFrag = (1 << 29), /* First segment of a packet */
190 LastFrag = (1 << 28), /* Final segment of a packet */
191 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
192 MSSShift = 16, /* MSS value position */
193 MSSMask = 0xfff, /* MSS value: 11 bits */
194 TxError = (1 << 23), /* Tx error summary */
195 RxError = (1 << 20), /* Rx error summary */
196 IPCS = (1 << 18), /* Calculate IP checksum */
197 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
198 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
199 TxVlanTag = (1 << 17), /* Add VLAN tag */
200 RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
201 IPFail = (1 << 15), /* IP checksum failed */
202 UDPFail = (1 << 14), /* UDP/IP checksum failed */
203 TCPFail = (1 << 13), /* TCP/IP checksum failed */
204 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
205 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
206 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
210 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
211 TxOWC = (1 << 22), /* Tx Out-of-window collision */
212 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
213 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
214 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
215 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
216 RxErrFrame = (1 << 27), /* Rx frame alignment error */
217 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
218 RxErrCRC = (1 << 18), /* Rx CRC error */
219 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
220 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
221 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
223 /* StatsAddr register */
224 DumpStats = (1 << 3), /* Begin stats dump */
226 /* RxConfig register */
227 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
228 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
229 AcceptErr = 0x20, /* Accept packets with CRC errors */
230 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
231 AcceptBroadcast = 0x08, /* Accept broadcast packets */
232 AcceptMulticast = 0x04, /* Accept multicast packets */
233 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
234 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
236 /* IntrMask / IntrStatus registers */
237 PciErr = (1 << 15), /* System error on the PCI bus */
238 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
239 LenChg = (1 << 13), /* Cable length change */
240 SWInt = (1 << 8), /* Software-requested interrupt */
241 TxEmpty = (1 << 7), /* No Tx descriptors available */
242 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
243 LinkChg = (1 << 5), /* Packet underrun, or link change */
244 RxEmpty = (1 << 4), /* No Rx descriptors available */
245 TxErr = (1 << 3), /* Tx error */
246 TxOK = (1 << 2), /* Tx packet sent */
247 RxErr = (1 << 1), /* Rx error */
248 RxOK = (1 << 0), /* Rx packet received */
249 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
250 but hardware likes to raise it */
252 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
253 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
254 RxErr | RxOK | IntrResvd,
256 /* C mode command register */
257 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
258 RxOn = (1 << 3), /* Rx mode enable */
259 TxOn = (1 << 2), /* Tx mode enable */
261 /* C+ mode command register */
262 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
263 RxChkSum = (1 << 5), /* Rx checksum offload enable */
264 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
265 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
266 CpRxOn = (1 << 1), /* Rx mode enable */
267 CpTxOn = (1 << 0), /* Tx mode enable */
269 /* Cfg9436 EEPROM control register */
270 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
271 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
273 /* TxConfig register */
274 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
275 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
277 /* Early Tx Threshold register */
278 TxThreshMask = 0x3f, /* Mask bits 5-0 */
279 TxThreshMax = 2048, /* Max early Tx threshold */
281 /* Config1 register */
282 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
283 LWACT = (1 << 4), /* LWAKE active mode */
284 PMEnable = (1 << 0), /* Enable various PM features of chip */
286 /* Config3 register */
287 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
288 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
289 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
291 /* Config4 register */
292 LWPTN = (1 << 1), /* LWAKE Pattern */
293 LWPME = (1 << 4), /* LANWAKE vs PMEB */
295 /* Config5 register */
296 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
297 MWF = (1 << 5), /* Accept Multicast wakeup frame */
298 UWF = (1 << 4), /* Accept Unicast wakeup frame */
299 LANWake = (1 << 1), /* Enable LANWake signal */
300 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
302 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
303 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
304 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
307 static const unsigned int cp_rx_config =
308 (RX_FIFO_THRESH << RxCfgFIFOShift) |
309 (RX_DMA_BURST << RxCfgDMAShift);
322 struct cp_dma_stats {
336 } __attribute__((packed));
338 struct cp_extra_stats {
339 unsigned long rx_frags;
344 struct net_device *dev;
348 struct pci_dev *pdev;
352 struct net_device_stats net_stats;
353 struct cp_extra_stats cp_stats;
355 unsigned rx_head ____cacheline_aligned;
357 struct cp_desc *rx_ring;
358 struct sk_buff *rx_skb[CP_RX_RING_SIZE];
360 unsigned tx_head ____cacheline_aligned;
362 struct cp_desc *tx_ring;
363 struct ring_info tx_skb[CP_TX_RING_SIZE];
366 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
369 struct vlan_group *vlgrp;
373 struct mii_if_info mii_if;
376 #define cpr8(reg) readb(cp->regs + (reg))
377 #define cpr16(reg) readw(cp->regs + (reg))
378 #define cpr32(reg) readl(cp->regs + (reg))
379 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
380 #define cpw16(reg,val) writew((val), cp->regs + (reg))
381 #define cpw32(reg,val) writel((val), cp->regs + (reg))
382 #define cpw8_f(reg,val) do { \
383 writeb((val), cp->regs + (reg)); \
384 readb(cp->regs + (reg)); \
386 #define cpw16_f(reg,val) do { \
387 writew((val), cp->regs + (reg)); \
388 readw(cp->regs + (reg)); \
390 #define cpw32_f(reg,val) do { \
391 writel((val), cp->regs + (reg)); \
392 readl(cp->regs + (reg)); \
396 static void __cp_set_rx_mode (struct net_device *dev);
397 static void cp_tx (struct cp_private *cp);
398 static void cp_clean_rings (struct cp_private *cp);
399 #ifdef CONFIG_NET_POLL_CONTROLLER
400 static void cp_poll_controller(struct net_device *dev);
402 static int cp_get_eeprom_len(struct net_device *dev);
403 static int cp_get_eeprom(struct net_device *dev,
404 struct ethtool_eeprom *eeprom, u8 *data);
405 static int cp_set_eeprom(struct net_device *dev,
406 struct ethtool_eeprom *eeprom, u8 *data);
408 static struct pci_device_id cp_pci_tbl[] = {
409 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
410 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
413 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
416 const char str[ETH_GSTRING_LEN];
417 } ethtool_stats_keys[] = {
436 static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
438 struct cp_private *cp = netdev_priv(dev);
441 spin_lock_irqsave(&cp->lock, flags);
443 cp->cpcmd |= RxVlanOn;
444 cpw16(CpCmd, cp->cpcmd);
445 spin_unlock_irqrestore(&cp->lock, flags);
448 static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
450 struct cp_private *cp = netdev_priv(dev);
453 spin_lock_irqsave(&cp->lock, flags);
454 cp->cpcmd &= ~RxVlanOn;
455 cpw16(CpCmd, cp->cpcmd);
457 cp->vlgrp->vlan_devices[vid] = NULL;
458 spin_unlock_irqrestore(&cp->lock, flags);
460 #endif /* CP_VLAN_TAG_USED */
462 static inline void cp_set_rxbufsize (struct cp_private *cp)
464 unsigned int mtu = cp->dev->mtu;
466 if (mtu > ETH_DATA_LEN)
467 /* MTU + ethernet header + FCS + optional VLAN tag */
468 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
470 cp->rx_buf_sz = PKT_BUF_SZ;
473 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
474 struct cp_desc *desc)
476 skb->protocol = eth_type_trans (skb, cp->dev);
478 cp->net_stats.rx_packets++;
479 cp->net_stats.rx_bytes += skb->len;
480 cp->dev->last_rx = jiffies;
483 if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) {
484 vlan_hwaccel_receive_skb(skb, cp->vlgrp,
485 be16_to_cpu(desc->opts2 & 0xffff));
488 netif_receive_skb(skb);
491 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
494 if (netif_msg_rx_err (cp))
496 "%s: rx err, slot %d status 0x%x len %d\n",
497 cp->dev->name, rx_tail, status, len);
498 cp->net_stats.rx_errors++;
499 if (status & RxErrFrame)
500 cp->net_stats.rx_frame_errors++;
501 if (status & RxErrCRC)
502 cp->net_stats.rx_crc_errors++;
503 if ((status & RxErrRunt) || (status & RxErrLong))
504 cp->net_stats.rx_length_errors++;
505 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
506 cp->net_stats.rx_length_errors++;
507 if (status & RxErrFIFO)
508 cp->net_stats.rx_fifo_errors++;
511 static inline unsigned int cp_rx_csum_ok (u32 status)
513 unsigned int protocol = (status >> 16) & 0x3;
515 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
517 else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
519 else if ((protocol == RxProtoIP) && (!(status & IPFail)))
524 static int cp_rx_poll (struct net_device *dev, int *budget)
526 struct cp_private *cp = netdev_priv(dev);
527 unsigned rx_tail = cp->rx_tail;
528 unsigned rx_work = dev->quota;
533 cpw16(IntrStatus, cp_rx_intr_mask);
538 struct sk_buff *skb, *new_skb;
539 struct cp_desc *desc;
542 skb = cp->rx_skb[rx_tail];
545 desc = &cp->rx_ring[rx_tail];
546 status = le32_to_cpu(desc->opts1);
547 if (status & DescOwn)
550 len = (status & 0x1fff) - 4;
551 mapping = le64_to_cpu(desc->addr);
553 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
554 /* we don't support incoming fragmented frames.
555 * instead, we attempt to ensure that the
556 * pre-allocated RX skbs are properly sized such
557 * that RX fragments are never encountered
559 cp_rx_err_acct(cp, rx_tail, status, len);
560 cp->net_stats.rx_dropped++;
561 cp->cp_stats.rx_frags++;
565 if (status & (RxError | RxErrFIFO)) {
566 cp_rx_err_acct(cp, rx_tail, status, len);
570 if (netif_msg_rx_status(cp))
571 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
572 dev->name, rx_tail, status, len);
574 buflen = cp->rx_buf_sz + RX_OFFSET;
575 new_skb = dev_alloc_skb (buflen);
577 cp->net_stats.rx_dropped++;
581 skb_reserve(new_skb, RX_OFFSET);
584 pci_unmap_single(cp->pdev, mapping,
585 buflen, PCI_DMA_FROMDEVICE);
587 /* Handle checksum offloading for incoming packets. */
588 if (cp_rx_csum_ok(status))
589 skb->ip_summed = CHECKSUM_UNNECESSARY;
591 skb->ip_summed = CHECKSUM_NONE;
595 mapping = pci_map_single(cp->pdev, new_skb->data, buflen,
597 cp->rx_skb[rx_tail] = new_skb;
599 cp_rx_skb(cp, skb, desc);
603 cp->rx_ring[rx_tail].opts2 = 0;
604 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
605 if (rx_tail == (CP_RX_RING_SIZE - 1))
606 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
609 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
610 rx_tail = NEXT_RX(rx_tail);
616 cp->rx_tail = rx_tail;
621 /* if we did not reach work limit, then we're done with
622 * this round of polling
625 if (cpr16(IntrStatus) & cp_rx_intr_mask)
629 cpw16_f(IntrMask, cp_intr_mask);
630 __netif_rx_complete(dev);
636 return 1; /* not done */
640 cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
642 struct net_device *dev = dev_instance;
643 struct cp_private *cp;
646 if (unlikely(dev == NULL))
648 cp = netdev_priv(dev);
650 status = cpr16(IntrStatus);
651 if (!status || (status == 0xFFFF))
654 if (netif_msg_intr(cp))
655 printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
656 dev->name, status, cpr8(Cmd), cpr16(CpCmd));
658 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
660 spin_lock(&cp->lock);
662 /* close possible race's with dev_close */
663 if (unlikely(!netif_running(dev))) {
665 spin_unlock(&cp->lock);
669 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
670 if (netif_rx_schedule_prep(dev)) {
671 cpw16_f(IntrMask, cp_norx_intr_mask);
672 __netif_rx_schedule(dev);
675 if (status & (TxOK | TxErr | TxEmpty | SWInt))
677 if (status & LinkChg)
678 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
680 spin_unlock(&cp->lock);
682 if (status & PciErr) {
685 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
686 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
687 printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
688 dev->name, status, pci_status);
690 /* TODO: reset hardware */
696 #ifdef CONFIG_NET_POLL_CONTROLLER
698 * Polling receive - used by netconsole and other diagnostic tools
699 * to allow network i/o with interrupts disabled.
701 static void cp_poll_controller(struct net_device *dev)
703 disable_irq(dev->irq);
704 cp_interrupt(dev->irq, dev, NULL);
705 enable_irq(dev->irq);
709 static void cp_tx (struct cp_private *cp)
711 unsigned tx_head = cp->tx_head;
712 unsigned tx_tail = cp->tx_tail;
714 while (tx_tail != tx_head) {
715 struct cp_desc *txd = cp->tx_ring + tx_tail;
720 status = le32_to_cpu(txd->opts1);
721 if (status & DescOwn)
724 skb = cp->tx_skb[tx_tail].skb;
727 pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr),
728 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
730 if (status & LastFrag) {
731 if (status & (TxError | TxFIFOUnder)) {
732 if (netif_msg_tx_err(cp))
733 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
734 cp->dev->name, status);
735 cp->net_stats.tx_errors++;
737 cp->net_stats.tx_window_errors++;
738 if (status & TxMaxCol)
739 cp->net_stats.tx_aborted_errors++;
740 if (status & TxLinkFail)
741 cp->net_stats.tx_carrier_errors++;
742 if (status & TxFIFOUnder)
743 cp->net_stats.tx_fifo_errors++;
745 cp->net_stats.collisions +=
746 ((status >> TxColCntShift) & TxColCntMask);
747 cp->net_stats.tx_packets++;
748 cp->net_stats.tx_bytes += skb->len;
749 if (netif_msg_tx_done(cp))
750 printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
752 dev_kfree_skb_irq(skb);
755 cp->tx_skb[tx_tail].skb = NULL;
757 tx_tail = NEXT_TX(tx_tail);
760 cp->tx_tail = tx_tail;
762 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
763 netif_wake_queue(cp->dev);
766 static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
768 struct cp_private *cp = netdev_priv(dev);
776 spin_lock_irq(&cp->lock);
778 /* This is a hard error, log it. */
779 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
780 netif_stop_queue(dev);
781 spin_unlock_irq(&cp->lock);
782 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
788 if (cp->vlgrp && vlan_tx_tag_present(skb))
789 vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
793 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
794 if (dev->features & NETIF_F_TSO)
795 mss = skb_shinfo(skb)->gso_size;
797 if (skb_shinfo(skb)->nr_frags == 0) {
798 struct cp_desc *txd = &cp->tx_ring[entry];
803 mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
804 CP_VLAN_TX_TAG(txd, vlan_tag);
805 txd->addr = cpu_to_le64(mapping);
808 flags = eor | len | DescOwn | FirstFrag | LastFrag;
811 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
812 else if (skb->ip_summed == CHECKSUM_HW) {
813 const struct iphdr *ip = skb->nh.iph;
814 if (ip->protocol == IPPROTO_TCP)
815 flags |= IPCS | TCPCS;
816 else if (ip->protocol == IPPROTO_UDP)
817 flags |= IPCS | UDPCS;
819 WARN_ON(1); /* we need a WARN() */
822 txd->opts1 = cpu_to_le32(flags);
825 cp->tx_skb[entry].skb = skb;
826 cp->tx_skb[entry].len = len;
827 entry = NEXT_TX(entry);
830 u32 first_len, first_eor;
831 dma_addr_t first_mapping;
832 int frag, first_entry = entry;
833 const struct iphdr *ip = skb->nh.iph;
835 /* We must give this initial chunk to the device last.
836 * Otherwise we could race with the device.
839 first_len = skb_headlen(skb);
840 first_mapping = pci_map_single(cp->pdev, skb->data,
841 first_len, PCI_DMA_TODEVICE);
842 cp->tx_skb[entry].skb = skb;
843 cp->tx_skb[entry].len = first_len;
844 entry = NEXT_TX(entry);
846 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
847 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
852 len = this_frag->size;
853 mapping = pci_map_single(cp->pdev,
854 ((void *) page_address(this_frag->page) +
855 this_frag->page_offset),
856 len, PCI_DMA_TODEVICE);
857 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
859 ctrl = eor | len | DescOwn;
863 ((mss & MSSMask) << MSSShift);
864 else if (skb->ip_summed == CHECKSUM_HW) {
865 if (ip->protocol == IPPROTO_TCP)
866 ctrl |= IPCS | TCPCS;
867 else if (ip->protocol == IPPROTO_UDP)
868 ctrl |= IPCS | UDPCS;
873 if (frag == skb_shinfo(skb)->nr_frags - 1)
876 txd = &cp->tx_ring[entry];
877 CP_VLAN_TX_TAG(txd, vlan_tag);
878 txd->addr = cpu_to_le64(mapping);
881 txd->opts1 = cpu_to_le32(ctrl);
884 cp->tx_skb[entry].skb = skb;
885 cp->tx_skb[entry].len = len;
886 entry = NEXT_TX(entry);
889 txd = &cp->tx_ring[first_entry];
890 CP_VLAN_TX_TAG(txd, vlan_tag);
891 txd->addr = cpu_to_le64(first_mapping);
894 if (skb->ip_summed == CHECKSUM_HW) {
895 if (ip->protocol == IPPROTO_TCP)
896 txd->opts1 = cpu_to_le32(first_eor | first_len |
897 FirstFrag | DescOwn |
899 else if (ip->protocol == IPPROTO_UDP)
900 txd->opts1 = cpu_to_le32(first_eor | first_len |
901 FirstFrag | DescOwn |
906 txd->opts1 = cpu_to_le32(first_eor | first_len |
907 FirstFrag | DescOwn);
911 if (netif_msg_tx_queued(cp))
912 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
913 dev->name, entry, skb->len);
914 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
915 netif_stop_queue(dev);
917 spin_unlock_irq(&cp->lock);
919 cpw8(TxPoll, NormalTxPoll);
920 dev->trans_start = jiffies;
925 /* Set or clear the multicast filter for this adaptor.
926 This routine is not state sensitive and need not be SMP locked. */
928 static void __cp_set_rx_mode (struct net_device *dev)
930 struct cp_private *cp = netdev_priv(dev);
931 u32 mc_filter[2]; /* Multicast hash filter */
935 /* Note: do not reorder, GCC is clever about common statements. */
936 if (dev->flags & IFF_PROMISC) {
937 /* Unconditionally log net taps. */
938 printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
941 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
943 mc_filter[1] = mc_filter[0] = 0xffffffff;
944 } else if ((dev->mc_count > multicast_filter_limit)
945 || (dev->flags & IFF_ALLMULTI)) {
946 /* Too many to filter perfectly -- accept all multicasts. */
947 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
948 mc_filter[1] = mc_filter[0] = 0xffffffff;
950 struct dev_mc_list *mclist;
951 rx_mode = AcceptBroadcast | AcceptMyPhys;
952 mc_filter[1] = mc_filter[0] = 0;
953 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
954 i++, mclist = mclist->next) {
955 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
957 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
958 rx_mode |= AcceptMulticast;
962 /* We can safely update without stopping the chip. */
963 tmp = cp_rx_config | rx_mode;
964 if (cp->rx_config != tmp) {
965 cpw32_f (RxConfig, tmp);
968 cpw32_f (MAR0 + 0, mc_filter[0]);
969 cpw32_f (MAR0 + 4, mc_filter[1]);
972 static void cp_set_rx_mode (struct net_device *dev)
975 struct cp_private *cp = netdev_priv(dev);
977 spin_lock_irqsave (&cp->lock, flags);
978 __cp_set_rx_mode(dev);
979 spin_unlock_irqrestore (&cp->lock, flags);
982 static void __cp_get_stats(struct cp_private *cp)
984 /* only lower 24 bits valid; write any value to clear */
985 cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
989 static struct net_device_stats *cp_get_stats(struct net_device *dev)
991 struct cp_private *cp = netdev_priv(dev);
994 /* The chip only need report frame silently dropped. */
995 spin_lock_irqsave(&cp->lock, flags);
996 if (netif_running(dev) && netif_device_present(dev))
998 spin_unlock_irqrestore(&cp->lock, flags);
1000 return &cp->net_stats;
1003 static void cp_stop_hw (struct cp_private *cp)
1005 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
1006 cpw16_f(IntrMask, 0);
1009 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
1012 cp->tx_head = cp->tx_tail = 0;
1015 static void cp_reset_hw (struct cp_private *cp)
1017 unsigned work = 1000;
1019 cpw8(Cmd, CmdReset);
1022 if (!(cpr8(Cmd) & CmdReset))
1025 schedule_timeout_uninterruptible(10);
1028 printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
1031 static inline void cp_start_hw (struct cp_private *cp)
1033 cpw16(CpCmd, cp->cpcmd);
1034 cpw8(Cmd, RxOn | TxOn);
1037 static void cp_init_hw (struct cp_private *cp)
1039 struct net_device *dev = cp->dev;
1040 dma_addr_t ring_dma;
1044 cpw8_f (Cfg9346, Cfg9346_Unlock);
1046 /* Restore our idea of the MAC address. */
1047 cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
1048 cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
1051 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1053 __cp_set_rx_mode(dev);
1054 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1056 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1057 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1058 cpw8(Config3, PARMEnable);
1059 cp->wol_enabled = 0;
1061 cpw8(Config5, cpr8(Config5) & PMEStatus);
1063 cpw32_f(HiTxRingAddr, 0);
1064 cpw32_f(HiTxRingAddr + 4, 0);
1066 ring_dma = cp->ring_dma;
1067 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1068 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1070 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1071 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1072 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1074 cpw16(MultiIntr, 0);
1076 cpw16_f(IntrMask, cp_intr_mask);
1078 cpw8_f(Cfg9346, Cfg9346_Lock);
1081 static int cp_refill_rx (struct cp_private *cp)
1085 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1086 struct sk_buff *skb;
1089 skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
1094 skb_reserve(skb, RX_OFFSET);
1096 mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
1097 PCI_DMA_FROMDEVICE);
1098 cp->rx_skb[i] = skb;
1100 cp->rx_ring[i].opts2 = 0;
1101 cp->rx_ring[i].addr = cpu_to_le64(mapping);
1102 if (i == (CP_RX_RING_SIZE - 1))
1103 cp->rx_ring[i].opts1 =
1104 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1106 cp->rx_ring[i].opts1 =
1107 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1117 static void cp_init_rings_index (struct cp_private *cp)
1120 cp->tx_head = cp->tx_tail = 0;
1123 static int cp_init_rings (struct cp_private *cp)
1125 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1126 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1128 cp_init_rings_index(cp);
1130 return cp_refill_rx (cp);
1133 static int cp_alloc_rings (struct cp_private *cp)
1137 mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
1142 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1144 return cp_init_rings(cp);
1147 static void cp_clean_rings (struct cp_private *cp)
1149 struct cp_desc *desc;
1152 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1153 if (cp->rx_skb[i]) {
1154 desc = cp->rx_ring + i;
1155 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
1156 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1157 dev_kfree_skb(cp->rx_skb[i]);
1161 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1162 if (cp->tx_skb[i].skb) {
1163 struct sk_buff *skb = cp->tx_skb[i].skb;
1165 desc = cp->tx_ring + i;
1166 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
1167 cp->tx_skb[i].len, PCI_DMA_TODEVICE);
1168 if (le32_to_cpu(desc->opts1) & LastFrag)
1170 cp->net_stats.tx_dropped++;
1174 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1175 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1177 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1178 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
1181 static void cp_free_rings (struct cp_private *cp)
1184 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1189 static int cp_open (struct net_device *dev)
1191 struct cp_private *cp = netdev_priv(dev);
1194 if (netif_msg_ifup(cp))
1195 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1197 rc = cp_alloc_rings(cp);
1203 rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1207 netif_carrier_off(dev);
1208 mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
1209 netif_start_queue(dev);
1219 static int cp_close (struct net_device *dev)
1221 struct cp_private *cp = netdev_priv(dev);
1222 unsigned long flags;
1224 if (netif_msg_ifdown(cp))
1225 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1227 spin_lock_irqsave(&cp->lock, flags);
1229 netif_stop_queue(dev);
1230 netif_carrier_off(dev);
1234 spin_unlock_irqrestore(&cp->lock, flags);
1236 synchronize_irq(dev->irq);
1237 free_irq(dev->irq, dev);
1244 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1246 struct cp_private *cp = netdev_priv(dev);
1248 unsigned long flags;
1250 /* check for invalid MTU, according to hardware limits */
1251 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1254 /* if network interface not up, no need for complexity */
1255 if (!netif_running(dev)) {
1257 cp_set_rxbufsize(cp); /* set new rx buf size */
1261 spin_lock_irqsave(&cp->lock, flags);
1263 cp_stop_hw(cp); /* stop h/w and free rings */
1267 cp_set_rxbufsize(cp); /* set new rx buf size */
1269 rc = cp_init_rings(cp); /* realloc and restart h/w */
1272 spin_unlock_irqrestore(&cp->lock, flags);
1278 static const char mii_2_8139_map[8] = {
1289 static int mdio_read(struct net_device *dev, int phy_id, int location)
1291 struct cp_private *cp = netdev_priv(dev);
1293 return location < 8 && mii_2_8139_map[location] ?
1294 readw(cp->regs + mii_2_8139_map[location]) : 0;
1298 static void mdio_write(struct net_device *dev, int phy_id, int location,
1301 struct cp_private *cp = netdev_priv(dev);
1303 if (location == 0) {
1304 cpw8(Cfg9346, Cfg9346_Unlock);
1305 cpw16(BasicModeCtrl, value);
1306 cpw8(Cfg9346, Cfg9346_Lock);
1307 } else if (location < 8 && mii_2_8139_map[location])
1308 cpw16(mii_2_8139_map[location], value);
1311 /* Set the ethtool Wake-on-LAN settings */
1312 static int netdev_set_wol (struct cp_private *cp,
1313 const struct ethtool_wolinfo *wol)
1317 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1318 /* If WOL is being disabled, no need for complexity */
1320 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1321 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1324 cpw8 (Cfg9346, Cfg9346_Unlock);
1325 cpw8 (Config3, options);
1326 cpw8 (Cfg9346, Cfg9346_Lock);
1328 options = 0; /* Paranoia setting */
1329 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1330 /* If WOL is being disabled, no need for complexity */
1332 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1333 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1334 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1337 cpw8 (Config5, options);
1339 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1344 /* Get the ethtool Wake-on-LAN settings */
1345 static void netdev_get_wol (struct cp_private *cp,
1346 struct ethtool_wolinfo *wol)
1350 wol->wolopts = 0; /* Start from scratch */
1351 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1352 WAKE_MCAST | WAKE_UCAST;
1353 /* We don't need to go on if WOL is disabled */
1354 if (!cp->wol_enabled) return;
1356 options = cpr8 (Config3);
1357 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1358 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1360 options = 0; /* Paranoia setting */
1361 options = cpr8 (Config5);
1362 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1363 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1364 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1367 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1369 struct cp_private *cp = netdev_priv(dev);
1371 strcpy (info->driver, DRV_NAME);
1372 strcpy (info->version, DRV_VERSION);
1373 strcpy (info->bus_info, pci_name(cp->pdev));
1376 static int cp_get_regs_len(struct net_device *dev)
1378 return CP_REGS_SIZE;
1381 static int cp_get_stats_count (struct net_device *dev)
1383 return CP_NUM_STATS;
1386 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1388 struct cp_private *cp = netdev_priv(dev);
1390 unsigned long flags;
1392 spin_lock_irqsave(&cp->lock, flags);
1393 rc = mii_ethtool_gset(&cp->mii_if, cmd);
1394 spin_unlock_irqrestore(&cp->lock, flags);
1399 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1401 struct cp_private *cp = netdev_priv(dev);
1403 unsigned long flags;
1405 spin_lock_irqsave(&cp->lock, flags);
1406 rc = mii_ethtool_sset(&cp->mii_if, cmd);
1407 spin_unlock_irqrestore(&cp->lock, flags);
1412 static int cp_nway_reset(struct net_device *dev)
1414 struct cp_private *cp = netdev_priv(dev);
1415 return mii_nway_restart(&cp->mii_if);
1418 static u32 cp_get_msglevel(struct net_device *dev)
1420 struct cp_private *cp = netdev_priv(dev);
1421 return cp->msg_enable;
1424 static void cp_set_msglevel(struct net_device *dev, u32 value)
1426 struct cp_private *cp = netdev_priv(dev);
1427 cp->msg_enable = value;
1430 static u32 cp_get_rx_csum(struct net_device *dev)
1432 struct cp_private *cp = netdev_priv(dev);
1433 return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1436 static int cp_set_rx_csum(struct net_device *dev, u32 data)
1438 struct cp_private *cp = netdev_priv(dev);
1439 u16 cmd = cp->cpcmd, newcmd;
1446 newcmd &= ~RxChkSum;
1448 if (newcmd != cmd) {
1449 unsigned long flags;
1451 spin_lock_irqsave(&cp->lock, flags);
1453 cpw16_f(CpCmd, newcmd);
1454 spin_unlock_irqrestore(&cp->lock, flags);
1460 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1463 struct cp_private *cp = netdev_priv(dev);
1464 unsigned long flags;
1466 if (regs->len < CP_REGS_SIZE)
1467 return /* -EINVAL */;
1469 regs->version = CP_REGS_VER;
1471 spin_lock_irqsave(&cp->lock, flags);
1472 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1473 spin_unlock_irqrestore(&cp->lock, flags);
1476 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1478 struct cp_private *cp = netdev_priv(dev);
1479 unsigned long flags;
1481 spin_lock_irqsave (&cp->lock, flags);
1482 netdev_get_wol (cp, wol);
1483 spin_unlock_irqrestore (&cp->lock, flags);
1486 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1488 struct cp_private *cp = netdev_priv(dev);
1489 unsigned long flags;
1492 spin_lock_irqsave (&cp->lock, flags);
1493 rc = netdev_set_wol (cp, wol);
1494 spin_unlock_irqrestore (&cp->lock, flags);
1499 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1501 switch (stringset) {
1503 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
1511 static void cp_get_ethtool_stats (struct net_device *dev,
1512 struct ethtool_stats *estats, u64 *tmp_stats)
1514 struct cp_private *cp = netdev_priv(dev);
1515 struct cp_dma_stats *nic_stats;
1519 nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
1523 /* begin NIC statistics dump */
1524 cpw32(StatsAddr + 4, (u64)dma >> 32);
1525 cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
1528 for (i = 0; i < 1000; i++) {
1529 if ((cpr32(StatsAddr) & DumpStats) == 0)
1533 cpw32(StatsAddr, 0);
1534 cpw32(StatsAddr + 4, 0);
1538 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1539 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1540 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1541 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1542 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1543 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1544 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1545 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1546 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1547 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1548 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1549 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1550 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1551 tmp_stats[i++] = cp->cp_stats.rx_frags;
1552 BUG_ON(i != CP_NUM_STATS);
1554 pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
1557 static struct ethtool_ops cp_ethtool_ops = {
1558 .get_drvinfo = cp_get_drvinfo,
1559 .get_regs_len = cp_get_regs_len,
1560 .get_stats_count = cp_get_stats_count,
1561 .get_settings = cp_get_settings,
1562 .set_settings = cp_set_settings,
1563 .nway_reset = cp_nway_reset,
1564 .get_link = ethtool_op_get_link,
1565 .get_msglevel = cp_get_msglevel,
1566 .set_msglevel = cp_set_msglevel,
1567 .get_rx_csum = cp_get_rx_csum,
1568 .set_rx_csum = cp_set_rx_csum,
1569 .get_tx_csum = ethtool_op_get_tx_csum,
1570 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1571 .get_sg = ethtool_op_get_sg,
1572 .set_sg = ethtool_op_set_sg,
1573 .get_tso = ethtool_op_get_tso,
1574 .set_tso = ethtool_op_set_tso,
1575 .get_regs = cp_get_regs,
1576 .get_wol = cp_get_wol,
1577 .set_wol = cp_set_wol,
1578 .get_strings = cp_get_strings,
1579 .get_ethtool_stats = cp_get_ethtool_stats,
1580 .get_perm_addr = ethtool_op_get_perm_addr,
1581 .get_eeprom_len = cp_get_eeprom_len,
1582 .get_eeprom = cp_get_eeprom,
1583 .set_eeprom = cp_set_eeprom,
1586 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1588 struct cp_private *cp = netdev_priv(dev);
1590 unsigned long flags;
1592 if (!netif_running(dev))
1595 spin_lock_irqsave(&cp->lock, flags);
1596 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1597 spin_unlock_irqrestore(&cp->lock, flags);
1601 /* Serial EEPROM section. */
1603 /* EEPROM_Ctrl bits. */
1604 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1605 #define EE_CS 0x08 /* EEPROM chip select. */
1606 #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1607 #define EE_WRITE_0 0x00
1608 #define EE_WRITE_1 0x02
1609 #define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1610 #define EE_ENB (0x80 | EE_CS)
1612 /* Delay between EEPROM clock transitions.
1613 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1616 #define eeprom_delay() readl(ee_addr)
1618 /* The EEPROM commands include the alway-set leading bit. */
1619 #define EE_EXTEND_CMD (4)
1620 #define EE_WRITE_CMD (5)
1621 #define EE_READ_CMD (6)
1622 #define EE_ERASE_CMD (7)
1624 #define EE_EWDS_ADDR (0)
1625 #define EE_WRAL_ADDR (1)
1626 #define EE_ERAL_ADDR (2)
1627 #define EE_EWEN_ADDR (3)
1629 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1631 static void eeprom_cmd_start(void __iomem *ee_addr)
1633 writeb (EE_ENB & ~EE_CS, ee_addr);
1634 writeb (EE_ENB, ee_addr);
1638 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1642 /* Shift the command bits out. */
1643 for (i = cmd_len - 1; i >= 0; i--) {
1644 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1645 writeb (EE_ENB | dataval, ee_addr);
1647 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1650 writeb (EE_ENB, ee_addr);
1654 static void eeprom_cmd_end(void __iomem *ee_addr)
1656 writeb (~EE_CS, ee_addr);
1660 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1663 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1665 eeprom_cmd_start(ee_addr);
1666 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1667 eeprom_cmd_end(ee_addr);
1670 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1674 void __iomem *ee_addr = ioaddr + Cfg9346;
1675 int read_cmd = location | (EE_READ_CMD << addr_len);
1677 eeprom_cmd_start(ee_addr);
1678 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1680 for (i = 16; i > 0; i--) {
1681 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1684 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1686 writeb (EE_ENB, ee_addr);
1690 eeprom_cmd_end(ee_addr);
1695 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1699 void __iomem *ee_addr = ioaddr + Cfg9346;
1700 int write_cmd = location | (EE_WRITE_CMD << addr_len);
1702 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1704 eeprom_cmd_start(ee_addr);
1705 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1706 eeprom_cmd(ee_addr, val, 16);
1707 eeprom_cmd_end(ee_addr);
1709 eeprom_cmd_start(ee_addr);
1710 for (i = 0; i < 20000; i++)
1711 if (readb(ee_addr) & EE_DATA_READ)
1713 eeprom_cmd_end(ee_addr);
1715 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1718 static int cp_get_eeprom_len(struct net_device *dev)
1720 struct cp_private *cp = netdev_priv(dev);
1723 spin_lock_irq(&cp->lock);
1724 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1725 spin_unlock_irq(&cp->lock);
1730 static int cp_get_eeprom(struct net_device *dev,
1731 struct ethtool_eeprom *eeprom, u8 *data)
1733 struct cp_private *cp = netdev_priv(dev);
1734 unsigned int addr_len;
1736 u32 offset = eeprom->offset >> 1;
1737 u32 len = eeprom->len;
1740 eeprom->magic = CP_EEPROM_MAGIC;
1742 spin_lock_irq(&cp->lock);
1744 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1746 if (eeprom->offset & 1) {
1747 val = read_eeprom(cp->regs, offset, addr_len);
1748 data[i++] = (u8)(val >> 8);
1752 while (i < len - 1) {
1753 val = read_eeprom(cp->regs, offset, addr_len);
1754 data[i++] = (u8)val;
1755 data[i++] = (u8)(val >> 8);
1760 val = read_eeprom(cp->regs, offset, addr_len);
1764 spin_unlock_irq(&cp->lock);
1768 static int cp_set_eeprom(struct net_device *dev,
1769 struct ethtool_eeprom *eeprom, u8 *data)
1771 struct cp_private *cp = netdev_priv(dev);
1772 unsigned int addr_len;
1774 u32 offset = eeprom->offset >> 1;
1775 u32 len = eeprom->len;
1778 if (eeprom->magic != CP_EEPROM_MAGIC)
1781 spin_lock_irq(&cp->lock);
1783 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1785 if (eeprom->offset & 1) {
1786 val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1787 val |= (u16)data[i++] << 8;
1788 write_eeprom(cp->regs, offset, val, addr_len);
1792 while (i < len - 1) {
1793 val = (u16)data[i++];
1794 val |= (u16)data[i++] << 8;
1795 write_eeprom(cp->regs, offset, val, addr_len);
1800 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1801 val |= (u16)data[i];
1802 write_eeprom(cp->regs, offset, val, addr_len);
1805 spin_unlock_irq(&cp->lock);
1809 /* Put the board into D3cold state and wait for WakeUp signal */
1810 static void cp_set_d3_state (struct cp_private *cp)
1812 pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1813 pci_set_power_state (cp->pdev, PCI_D3hot);
1816 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1818 struct net_device *dev;
1819 struct cp_private *cp;
1822 resource_size_t pciaddr;
1823 unsigned int addr_len, i, pci_using_dac;
1827 static int version_printed;
1828 if (version_printed++ == 0)
1829 printk("%s", version);
1832 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
1834 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1835 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
1837 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
1838 pdev->vendor, pdev->device, pci_rev);
1839 dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
1843 dev = alloc_etherdev(sizeof(struct cp_private));
1846 SET_MODULE_OWNER(dev);
1847 SET_NETDEV_DEV(dev, &pdev->dev);
1849 cp = netdev_priv(dev);
1852 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1853 spin_lock_init (&cp->lock);
1854 cp->mii_if.dev = dev;
1855 cp->mii_if.mdio_read = mdio_read;
1856 cp->mii_if.mdio_write = mdio_write;
1857 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1858 cp->mii_if.phy_id_mask = 0x1f;
1859 cp->mii_if.reg_num_mask = 0x1f;
1860 cp_set_rxbufsize(cp);
1862 rc = pci_enable_device(pdev);
1866 rc = pci_set_mwi(pdev);
1868 goto err_out_disable;
1870 rc = pci_request_regions(pdev, DRV_NAME);
1874 pciaddr = pci_resource_start(pdev, 1);
1877 dev_err(&pdev->dev, "no MMIO resource\n");
1880 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1882 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1883 (unsigned long long)pci_resource_len(pdev, 1));
1887 /* Configure DMA attributes. */
1888 if ((sizeof(dma_addr_t) > 4) &&
1889 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
1890 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1895 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1898 "No usable DMA configuration, aborting.\n");
1901 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1904 "No usable consistent DMA configuration, "
1910 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1911 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1913 regs = ioremap(pciaddr, CP_REGS_SIZE);
1916 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1917 (unsigned long long)pci_resource_len(pdev, 1),
1918 (unsigned long long)pciaddr);
1921 dev->base_addr = (unsigned long) regs;
1926 /* read MAC address from EEPROM */
1927 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1928 for (i = 0; i < 3; i++)
1929 ((u16 *) (dev->dev_addr))[i] =
1930 le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
1931 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1933 dev->open = cp_open;
1934 dev->stop = cp_close;
1935 dev->set_multicast_list = cp_set_rx_mode;
1936 dev->hard_start_xmit = cp_start_xmit;
1937 dev->get_stats = cp_get_stats;
1938 dev->do_ioctl = cp_ioctl;
1939 dev->poll = cp_rx_poll;
1940 #ifdef CONFIG_NET_POLL_CONTROLLER
1941 dev->poll_controller = cp_poll_controller;
1943 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
1945 dev->change_mtu = cp_change_mtu;
1947 dev->ethtool_ops = &cp_ethtool_ops;
1949 dev->tx_timeout = cp_tx_timeout;
1950 dev->watchdog_timeo = TX_TIMEOUT;
1953 #if CP_VLAN_TAG_USED
1954 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1955 dev->vlan_rx_register = cp_vlan_rx_register;
1956 dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid;
1960 dev->features |= NETIF_F_HIGHDMA;
1962 #if 0 /* disabled by default until verified */
1963 dev->features |= NETIF_F_TSO;
1966 dev->irq = pdev->irq;
1968 rc = register_netdev(dev);
1972 printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
1973 "%02x:%02x:%02x:%02x:%02x:%02x, "
1977 dev->dev_addr[0], dev->dev_addr[1],
1978 dev->dev_addr[2], dev->dev_addr[3],
1979 dev->dev_addr[4], dev->dev_addr[5],
1982 pci_set_drvdata(pdev, dev);
1984 /* enable busmastering and memory-write-invalidate */
1985 pci_set_master(pdev);
1987 if (cp->wol_enabled)
1988 cp_set_d3_state (cp);
1995 pci_release_regions(pdev);
1997 pci_clear_mwi(pdev);
1999 pci_disable_device(pdev);
2005 static void cp_remove_one (struct pci_dev *pdev)
2007 struct net_device *dev = pci_get_drvdata(pdev);
2008 struct cp_private *cp = netdev_priv(dev);
2010 unregister_netdev(dev);
2012 if (cp->wol_enabled)
2013 pci_set_power_state (pdev, PCI_D0);
2014 pci_release_regions(pdev);
2015 pci_clear_mwi(pdev);
2016 pci_disable_device(pdev);
2017 pci_set_drvdata(pdev, NULL);
2022 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2024 struct net_device *dev = pci_get_drvdata(pdev);
2025 struct cp_private *cp = netdev_priv(dev);
2026 unsigned long flags;
2028 if (!netif_running(dev))
2031 netif_device_detach (dev);
2032 netif_stop_queue (dev);
2034 spin_lock_irqsave (&cp->lock, flags);
2036 /* Disable Rx and Tx */
2037 cpw16 (IntrMask, 0);
2038 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2040 spin_unlock_irqrestore (&cp->lock, flags);
2042 pci_save_state(pdev);
2043 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2044 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2049 static int cp_resume (struct pci_dev *pdev)
2051 struct net_device *dev = pci_get_drvdata (pdev);
2052 struct cp_private *cp = netdev_priv(dev);
2053 unsigned long flags;
2055 if (!netif_running(dev))
2058 netif_device_attach (dev);
2060 pci_set_power_state(pdev, PCI_D0);
2061 pci_restore_state(pdev);
2062 pci_enable_wake(pdev, PCI_D0, 0);
2064 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2065 cp_init_rings_index (cp);
2067 netif_start_queue (dev);
2069 spin_lock_irqsave (&cp->lock, flags);
2071 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
2073 spin_unlock_irqrestore (&cp->lock, flags);
2077 #endif /* CONFIG_PM */
2079 static struct pci_driver cp_driver = {
2081 .id_table = cp_pci_tbl,
2082 .probe = cp_init_one,
2083 .remove = cp_remove_one,
2085 .resume = cp_resume,
2086 .suspend = cp_suspend,
2090 static int __init cp_init (void)
2093 printk("%s", version);
2095 return pci_module_init (&cp_driver);
2098 static void __exit cp_exit (void)
2100 pci_unregister_driver (&cp_driver);
2103 module_init(cp_init);
2104 module_exit(cp_exit);