2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
46 #ifdef CONFIG_SIS190_NAPI
47 #define NAPI_SUFFIX "-NAPI"
49 #define NAPI_SUFFIX ""
52 #define DRV_VERSION "1.2" NAPI_SUFFIX
53 #define DRV_NAME "sis190"
54 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
55 #define PFX DRV_NAME ": "
57 #ifdef CONFIG_SIS190_NAPI
58 #define sis190_rx_skb netif_receive_skb
59 #define sis190_rx_quota(count, quota) min(count, quota)
61 #define sis190_rx_skb netif_rx
62 #define sis190_rx_quota(count, quota) count
65 #define MAC_ADDR_LEN 6
67 #define NUM_TX_DESC 64
68 #define NUM_RX_DESC 64
69 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
70 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
71 #define RX_BUF_SIZE 1536
73 #define SIS190_REGS_SIZE 0x80
74 #define SIS190_TX_TIMEOUT (6*HZ)
75 #define SIS190_PHY_TIMEOUT (10*HZ)
76 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
77 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
80 /* Enhanced PHY access register bit definitions */
81 #define EhnMIIread 0x0000
82 #define EhnMIIwrite 0x0020
83 #define EhnMIIdataShift 16
84 #define EhnMIIpmdShift 6 /* 7016 only */
85 #define EhnMIIregShift 11
86 #define EhnMIIreq 0x0010
87 #define EhnMIInotDone 0x0010
89 /* Write/read MMIO register */
90 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
91 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
92 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
93 #define SIS_R8(reg) readb (ioaddr + (reg))
94 #define SIS_R16(reg) readw (ioaddr + (reg))
95 #define SIS_R32(reg) readl (ioaddr + (reg))
97 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
99 enum sis190_registers {
101 TxDescStartAddr = 0x04,
102 rsv0 = 0x08, // reserved
103 TxSts = 0x0c, // unused (Control/Status)
105 RxDescStartAddr = 0x14,
106 rsv1 = 0x18, // reserved
107 RxSts = 0x1c, // unused
111 IntrTimer = 0x2c, // unused (Interupt Timer)
112 PMControl = 0x30, // unused (Power Mgmt Control/Status)
113 rsv2 = 0x34, // reserved
116 StationControl = 0x40,
118 GIoCR = 0x48, // unused (GMAC IO Compensation)
119 GIoCtrl = 0x4c, // unused (GMAC IO Control)
121 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
122 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
123 rsv3 = 0x5c, // reserved
127 // Undocumented = 0x6c,
129 RxWolData = 0x74, // unused (Rx WOL Data Access)
130 RxMPSControl = 0x78, // unused (Rx MPS Control)
131 rsv4 = 0x7c, // reserved
134 enum sis190_register_content {
136 SoftInt = 0x40000000, // unused
137 Timeup = 0x20000000, // unused
138 PauseFrame = 0x00080000, // unused
139 MagicPacket = 0x00040000, // unused
140 WakeupFrame = 0x00020000, // unused
141 LinkChange = 0x00010000,
142 RxQEmpty = 0x00000080,
144 TxQ1Empty = 0x00000020, // unused
145 TxQ1Int = 0x00000010,
146 TxQ0Empty = 0x00000008, // unused
147 TxQ0Int = 0x00000004,
152 RxRES = 0x00200000, // unused
154 RxRUNT = 0x00100000, // unused
155 RxRWT = 0x00400000, // unused
159 CmdRxEnb = 0x08, // unused
161 RxBufEmpty = 0x01, // unused
164 Cfg9346_Lock = 0x00, // unused
165 Cfg9346_Unlock = 0xc0, // unused
168 AcceptErr = 0x20, // unused
169 AcceptRunt = 0x10, // unused
170 AcceptBroadcast = 0x0800,
171 AcceptMulticast = 0x0400,
172 AcceptMyPhys = 0x0200,
173 AcceptAllPhys = 0x0100,
177 RxCfgDMAShift = 8, // 0x1a in RxControl ?
180 TxInterFrameGapShift = 24,
181 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
191 LinkStatus = 0x02, // unused
192 FullDup = 0x01, // unused
195 TBILinkOK = 0x02000000, // unused
212 enum _DescStatusBit {
222 RxSizeMask = 0x0000ffff
225 enum sis190_eeprom_access_register_bits {
226 EECS = 0x00000001, // unused
227 EECLK = 0x00000002, // unused
228 EEDO = 0x00000008, // unused
229 EEDI = 0x00000004, // unused
232 EEWOP = 0x00000100 // unused
235 struct sis190_private {
236 void __iomem *mmio_addr;
237 struct pci_dev *pci_dev;
238 struct net_device_stats stats;
247 struct RxDesc *RxDescRing;
248 struct TxDesc *TxDescRing;
249 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
250 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
251 struct work_struct phy_task;
252 struct timer_list timer;
254 struct mii_if_info mii_if;
257 const static struct {
259 u8 version; /* depend on docs */
260 u32 RxConfigMask; /* clear the bits supported by this chip */
261 } sis_chip_info[] = {
262 { DRV_NAME, 0x00, 0xff7e1880, },
265 static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
266 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
270 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
272 static int rx_copybreak = 200;
278 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
279 module_param(rx_copybreak, int, 0);
280 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
281 module_param_named(debug, debug.msg_enable, int, 0);
282 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
283 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
284 MODULE_VERSION(DRV_VERSION);
285 MODULE_LICENSE("GPL");
287 static const u32 sis190_intr_mask =
288 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
291 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
292 * The chips use a 64 element hash table based on the Ethernet CRC.
294 static int multicast_filter_limit = 32;
296 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
300 SIS_W32(GMIIControl, ctl);
304 for (i = 0; i < 100; i++) {
305 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
311 printk(KERN_ERR PFX "PHY command failed !\n");
314 static void mdio_write(void __iomem *ioaddr, int reg, int val)
318 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
319 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift) |
320 (((u32) val) << EhnMIIdataShift));
323 static int mdio_read(void __iomem *ioaddr, int reg)
327 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
328 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift));
330 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
333 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
335 struct sis190_private *tp = netdev_priv(dev);
337 mdio_write(tp->mmio_addr, reg, val);
340 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
342 struct sis190_private *tp = netdev_priv(dev);
344 return mdio_read(tp->mmio_addr, reg);
347 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
352 if (!(SIS_R32(ROMControl) & 0x0002))
355 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
357 for (i = 0; i < 200; i++) {
358 if (!(SIS_R32(ROMInterface) & EEREQ)) {
359 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
368 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
370 SIS_W32(IntrMask, 0x00);
371 SIS_W32(IntrStatus, 0xffffffff);
375 static void sis190_asic_down(void __iomem *ioaddr)
377 /* Stop the chip's Tx and Rx DMA processes. */
379 SIS_W32(TxControl, 0x1a00);
380 SIS_W32(RxControl, 0x1a00);
382 sis190_irq_mask_and_ack(ioaddr);
385 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
387 desc->size |= cpu_to_le32(RingEnd);
390 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
392 u32 eor = le32_to_cpu(desc->size) & RingEnd;
395 desc->size = cpu_to_le32(rx_buf_sz | eor);
397 desc->status = cpu_to_le32(OWNbit | INTbit);
400 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
403 desc->addr = cpu_to_le32(mapping);
404 sis190_give_to_asic(desc, rx_buf_sz);
407 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
410 desc->addr = 0xdeadbeef;
411 desc->size &= cpu_to_le32(RingEnd);
416 static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
417 struct RxDesc *desc, u32 rx_buf_sz)
423 skb = dev_alloc_skb(rx_buf_sz);
429 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
432 sis190_map_to_asic(desc, mapping, rx_buf_sz);
438 sis190_make_unusable_by_asic(desc);
442 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
447 for (cur = start; cur < end; cur++) {
448 int ret, i = cur % NUM_RX_DESC;
450 if (tp->Rx_skbuff[i])
453 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
454 tp->RxDescRing + i, tp->rx_buf_sz);
461 static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
462 struct RxDesc *desc, int rx_buf_sz)
466 if (pkt_size < rx_copybreak) {
469 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
471 skb_reserve(skb, NET_IP_ALIGN);
472 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
474 sis190_give_to_asic(desc, rx_buf_sz);
481 static int sis190_rx_interrupt(struct net_device *dev,
482 struct sis190_private *tp, void __iomem *ioaddr)
484 struct net_device_stats *stats = &tp->stats;
485 u32 rx_left, cur_rx = tp->cur_rx;
488 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
489 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
491 for (; rx_left > 0; rx_left--, cur_rx++) {
492 unsigned int entry = cur_rx % NUM_RX_DESC;
493 struct RxDesc *desc = tp->RxDescRing + entry;
496 if (desc->status & OWNbit)
499 status = le32_to_cpu(desc->PSize);
501 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
504 if (status & RxCRC) {
505 net_intr(tp, KERN_INFO "%s: bad crc. status = %08x.\n",
508 stats->rx_crc_errors++;
509 sis190_give_to_asic(desc, tp->rx_buf_sz);
510 } else if (!(status & PADbit)) {
511 net_intr(tp, KERN_INFO "%s: bad pad. status = %08x.\n",
514 stats->rx_length_errors++;
515 sis190_give_to_asic(desc, tp->rx_buf_sz);
517 struct sk_buff *skb = tp->Rx_skbuff[entry];
518 int pkt_size = (status & RxSizeMask) - 4;
519 void (*pci_action)(struct pci_dev *, dma_addr_t,
520 size_t, int) = pci_dma_sync_single_for_device;
522 if (unlikely(pkt_size > tp->rx_buf_sz)) {
523 net_intr(tp, KERN_INFO
524 "%s: (frag) status = %08x.\n",
527 stats->rx_length_errors++;
528 sis190_give_to_asic(desc, tp->rx_buf_sz);
532 pci_dma_sync_single_for_cpu(tp->pci_dev,
533 le32_to_cpu(desc->addr), tp->rx_buf_sz,
536 if (sis190_try_rx_copy(&skb, pkt_size, desc,
538 pci_action = pci_unmap_single;
539 tp->Rx_skbuff[entry] = NULL;
540 sis190_make_unusable_by_asic(desc);
543 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
544 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
547 skb_put(skb, pkt_size);
548 skb->protocol = eth_type_trans(skb, dev);
552 dev->last_rx = jiffies;
553 stats->rx_bytes += pkt_size;
557 count = cur_rx - tp->cur_rx;
560 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
561 if (!delta && count && netif_msg_intr(tp))
562 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
563 tp->dirty_rx += delta;
565 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
566 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
571 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
576 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
578 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
580 memset(desc, 0x00, sizeof(*desc));
583 static void sis190_tx_interrupt(struct net_device *dev,
584 struct sis190_private *tp, void __iomem *ioaddr)
586 u32 pending, dirty_tx = tp->dirty_tx;
588 * It would not be needed if queueing was allowed to be enabled
589 * again too early (hint: think preempt and unclocked smp systems).
591 unsigned int queue_stopped;
594 pending = tp->cur_tx - dirty_tx;
595 queue_stopped = (pending == NUM_TX_DESC);
597 for (; pending; pending--, dirty_tx++) {
598 unsigned int entry = dirty_tx % NUM_TX_DESC;
599 struct TxDesc *txd = tp->TxDescRing + entry;
602 if (le32_to_cpu(txd->status) & OWNbit)
605 skb = tp->Tx_skbuff[entry];
607 tp->stats.tx_packets++;
608 tp->stats.tx_bytes += skb->len;
610 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
611 tp->Tx_skbuff[entry] = NULL;
612 dev_kfree_skb_irq(skb);
615 if (tp->dirty_tx != dirty_tx) {
616 tp->dirty_tx = dirty_tx;
619 netif_wake_queue(dev);
624 * The interrupt handler does all of the Rx thread work and cleans up after
627 static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
629 struct net_device *dev = __dev;
630 struct sis190_private *tp = netdev_priv(dev);
631 void __iomem *ioaddr = tp->mmio_addr;
632 unsigned int handled = 0;
635 status = SIS_R32(IntrStatus);
637 if ((status == 0xffffffff) || !status)
642 if (unlikely(!netif_running(dev))) {
643 sis190_asic_down(ioaddr);
647 SIS_W32(IntrStatus, status);
649 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
651 if (status & LinkChange) {
652 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
653 schedule_work(&tp->phy_task);
657 sis190_rx_interrupt(dev, tp, ioaddr);
659 if (status & TxQ0Int)
660 sis190_tx_interrupt(dev, tp, ioaddr);
662 return IRQ_RETVAL(handled);
665 #ifdef CONFIG_NET_POLL_CONTROLLER
666 static void sis190_netpoll(struct net_device *dev)
668 struct sis190_private *tp = netdev_priv(dev);
669 struct pci_dev *pdev = tp->pci_dev;
671 disable_irq(pdev->irq);
672 sis190_interrupt(pdev->irq, dev, NULL);
673 enable_irq(pdev->irq);
677 static void sis190_free_rx_skb(struct sis190_private *tp,
678 struct sk_buff **sk_buff, struct RxDesc *desc)
680 struct pci_dev *pdev = tp->pci_dev;
682 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
684 dev_kfree_skb(*sk_buff);
686 sis190_make_unusable_by_asic(desc);
689 static void sis190_rx_clear(struct sis190_private *tp)
693 for (i = 0; i < NUM_RX_DESC; i++) {
694 if (!tp->Rx_skbuff[i])
696 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
700 static void sis190_init_ring_indexes(struct sis190_private *tp)
702 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
705 static int sis190_init_ring(struct net_device *dev)
707 struct sis190_private *tp = netdev_priv(dev);
709 sis190_init_ring_indexes(tp);
711 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
712 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
714 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
717 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
726 static void sis190_set_rx_mode(struct net_device *dev)
728 struct sis190_private *tp = netdev_priv(dev);
729 void __iomem *ioaddr = tp->mmio_addr;
731 u32 mc_filter[2]; /* Multicast hash filter */
734 if (dev->flags & IFF_PROMISC) {
735 /* Unconditionally log net taps. */
736 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
739 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
741 mc_filter[1] = mc_filter[0] = 0xffffffff;
742 } else if ((dev->mc_count > multicast_filter_limit) ||
743 (dev->flags & IFF_ALLMULTI)) {
744 /* Too many to filter perfectly -- accept all multicasts. */
745 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
746 mc_filter[1] = mc_filter[0] = 0xffffffff;
748 struct dev_mc_list *mclist;
751 rx_mode = AcceptBroadcast | AcceptMyPhys;
752 mc_filter[1] = mc_filter[0] = 0;
753 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
754 i++, mclist = mclist->next) {
756 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
757 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
758 rx_mode |= AcceptMulticast;
762 spin_lock_irqsave(&tp->lock, flags);
764 SIS_W16(RxMacControl, rx_mode | 0x2);
765 SIS_W32(RxHashTable, mc_filter[0]);
766 SIS_W32(RxHashTable + 4, mc_filter[1]);
768 spin_unlock_irqrestore(&tp->lock, flags);
771 static void sis190_soft_reset(void __iomem *ioaddr)
773 SIS_W32(IntrControl, 0x8000);
776 SIS_W32(IntrControl, 0x0);
777 sis190_asic_down(ioaddr);
781 static void sis190_hw_start(struct net_device *dev)
783 struct sis190_private *tp = netdev_priv(dev);
784 void __iomem *ioaddr = tp->mmio_addr;
786 sis190_soft_reset(ioaddr);
788 SIS_W32(TxDescStartAddr, tp->tx_dma);
789 SIS_W32(RxDescStartAddr, tp->rx_dma);
791 SIS_W32(IntrStatus, 0xffffffff);
792 SIS_W32(IntrMask, 0x0);
794 * Default is 100Mbps.
795 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
797 SIS_W16(StationControl, 0x1901);
798 SIS_W32(GMIIControl, 0x0);
799 SIS_W32(TxMacControl, 0x60);
800 SIS_W16(RxMacControl, 0x02);
801 SIS_W32(RxHashTable, 0x0);
803 SIS_W32(RxWolCtrl, 0x0);
804 SIS_W32(RxWolData, 0x0);
808 sis190_set_rx_mode(dev);
810 /* Enable all known interrupts by setting the interrupt mask. */
811 SIS_W32(IntrMask, sis190_intr_mask);
813 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
814 SIS_W32(RxControl, 0x1a1d);
816 netif_start_queue(dev);
819 static void sis190_phy_task(void * data)
821 struct net_device *dev = data;
822 struct sis190_private *tp = netdev_priv(dev);
823 void __iomem *ioaddr = tp->mmio_addr;
828 val = mdio_read(ioaddr, MII_BMCR);
829 if (val & BMCR_RESET) {
830 // FIXME: needlessly high ? -- FR 02/07/2005
831 mod_timer(&tp->timer, jiffies + HZ/10);
832 } else if (!(mdio_read(ioaddr, MII_BMSR) & BMSR_ANEGCOMPLETE)) {
833 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
835 mdio_write(ioaddr, MII_BMCR, val | BMCR_RESET);
836 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
844 { LPA_1000XFULL | LPA_SLCT,
845 "1000 Mbps Full Duplex",
847 { LPA_1000XHALF | LPA_SLCT,
848 "1000 Mbps Half Duplex",
851 "100 Mbps Full Duplex",
854 "100 Mbps Half Duplex",
857 "10 Mbps Full Duplex",
860 "10 Mbps Half Duplex",
862 { 0, "unknown", 0x0000 }
865 val = mdio_read(ioaddr, 0x1f);
866 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
868 val = mdio_read(ioaddr, MII_LPA);
869 net_link(tp, KERN_INFO "%s: mii lpa = %04x.\n", dev->name, val);
871 for (p = reg31; p->ctl; p++) {
872 if ((val & p->val) == p->val)
876 SIS_W16(StationControl, p->ctl);
877 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
879 netif_carrier_on(dev);
885 static void sis190_phy_timer(unsigned long __opaque)
887 struct net_device *dev = (struct net_device *)__opaque;
888 struct sis190_private *tp = netdev_priv(dev);
890 if (likely(netif_running(dev)))
891 schedule_work(&tp->phy_task);
894 static inline void sis190_delete_timer(struct net_device *dev)
896 struct sis190_private *tp = netdev_priv(dev);
898 del_timer_sync(&tp->timer);
901 static inline void sis190_request_timer(struct net_device *dev)
903 struct sis190_private *tp = netdev_priv(dev);
904 struct timer_list *timer = &tp->timer;
907 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
908 timer->data = (unsigned long)dev;
909 timer->function = sis190_phy_timer;
913 static void sis190_set_rxbufsize(struct sis190_private *tp,
914 struct net_device *dev)
916 unsigned int mtu = dev->mtu;
918 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
921 static int sis190_open(struct net_device *dev)
923 struct sis190_private *tp = netdev_priv(dev);
924 struct pci_dev *pdev = tp->pci_dev;
927 sis190_set_rxbufsize(tp, dev);
930 * Rx and Tx descriptors need 256 bytes alignment.
931 * pci_alloc_consistent() guarantees a stronger alignment.
933 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
937 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
941 rc = sis190_init_ring(dev);
945 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
947 sis190_request_timer(dev);
949 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
951 goto err_release_timer_2;
953 sis190_hw_start(dev);
958 sis190_delete_timer(dev);
961 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
964 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
969 static void sis190_tx_clear(struct sis190_private *tp)
973 for (i = 0; i < NUM_TX_DESC; i++) {
974 struct sk_buff *skb = tp->Tx_skbuff[i];
979 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
980 tp->Tx_skbuff[i] = NULL;
983 tp->stats.tx_dropped++;
985 tp->cur_tx = tp->dirty_tx = 0;
988 static void sis190_down(struct net_device *dev)
990 struct sis190_private *tp = netdev_priv(dev);
991 void __iomem *ioaddr = tp->mmio_addr;
992 unsigned int poll_locked = 0;
994 sis190_delete_timer(dev);
996 netif_stop_queue(dev);
998 flush_scheduled_work();
1001 spin_lock_irq(&tp->lock);
1003 sis190_asic_down(ioaddr);
1005 spin_unlock_irq(&tp->lock);
1007 synchronize_irq(dev->irq);
1010 netif_poll_disable(dev);
1014 synchronize_sched();
1016 } while (SIS_R32(IntrMask));
1018 sis190_tx_clear(tp);
1019 sis190_rx_clear(tp);
1022 static int sis190_close(struct net_device *dev)
1024 struct sis190_private *tp = netdev_priv(dev);
1025 struct pci_dev *pdev = tp->pci_dev;
1029 free_irq(dev->irq, dev);
1031 netif_poll_enable(dev);
1033 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1034 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1036 tp->TxDescRing = NULL;
1037 tp->RxDescRing = NULL;
1042 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1044 struct sis190_private *tp = netdev_priv(dev);
1045 void __iomem *ioaddr = tp->mmio_addr;
1046 u32 len, entry, dirty_tx;
1047 struct TxDesc *desc;
1050 if (unlikely(skb->len < ETH_ZLEN)) {
1051 skb = skb_padto(skb, ETH_ZLEN);
1053 tp->stats.tx_dropped++;
1061 entry = tp->cur_tx % NUM_TX_DESC;
1062 desc = tp->TxDescRing + entry;
1064 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1065 netif_stop_queue(dev);
1066 net_tx_err(tp, KERN_ERR PFX
1067 "%s: BUG! Tx Ring full when queue awake!\n",
1069 return NETDEV_TX_BUSY;
1072 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1074 tp->Tx_skbuff[entry] = skb;
1076 desc->PSize = cpu_to_le32(len);
1077 desc->addr = cpu_to_le32(mapping);
1079 desc->size = cpu_to_le32(len);
1080 if (entry == (NUM_TX_DESC - 1))
1081 desc->size |= cpu_to_le32(RingEnd);
1085 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1091 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1093 dev->trans_start = jiffies;
1095 dirty_tx = tp->dirty_tx;
1096 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1097 netif_stop_queue(dev);
1099 if (dirty_tx != tp->dirty_tx)
1100 netif_wake_queue(dev);
1103 return NETDEV_TX_OK;
1106 static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1108 struct sis190_private *tp = netdev_priv(dev);
1113 static void sis190_release_board(struct pci_dev *pdev)
1115 struct net_device *dev = pci_get_drvdata(pdev);
1116 struct sis190_private *tp = netdev_priv(dev);
1118 iounmap(tp->mmio_addr);
1119 pci_release_regions(pdev);
1120 pci_disable_device(pdev);
1124 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1126 struct sis190_private *tp;
1127 struct net_device *dev;
1128 void __iomem *ioaddr;
1131 dev = alloc_etherdev(sizeof(*tp));
1133 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1138 SET_MODULE_OWNER(dev);
1139 SET_NETDEV_DEV(dev, &pdev->dev);
1141 tp = netdev_priv(dev);
1142 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1144 rc = pci_enable_device(pdev);
1146 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1147 goto err_free_dev_1;
1152 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1153 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1155 goto err_pci_disable_2;
1157 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1158 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1160 goto err_pci_disable_2;
1163 rc = pci_request_regions(pdev, DRV_NAME);
1165 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1167 goto err_pci_disable_2;
1170 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1172 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1174 goto err_free_res_3;
1177 pci_set_master(pdev);
1179 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1181 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1184 goto err_free_res_3;
1188 tp->mmio_addr = ioaddr;
1190 tp->mii_if.dev = dev;
1191 tp->mii_if.mdio_read = __mdio_read;
1192 tp->mii_if.mdio_write = __mdio_write;
1193 // tp->mii_if.phy_id = XXX;
1194 tp->mii_if.phy_id_mask = 0x1f;
1195 tp->mii_if.reg_num_mask = 0x1f;
1197 sis190_irq_mask_and_ack(ioaddr);
1199 sis190_soft_reset(ioaddr);
1204 pci_release_regions(pdev);
1206 pci_disable_device(pdev);
1214 static void sis190_tx_timeout(struct net_device *dev)
1216 struct sis190_private *tp = netdev_priv(dev);
1217 void __iomem *ioaddr = tp->mmio_addr;
1220 /* Disable Tx, if not already */
1221 tmp8 = SIS_R8(TxControl);
1222 if (tmp8 & CmdTxEnb)
1223 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1226 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1227 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1229 /* Disable interrupts by clearing the interrupt mask. */
1230 SIS_W32(IntrMask, 0x0000);
1232 /* Stop a shared interrupt from scavenging while we are. */
1233 spin_lock_irq(&tp->lock);
1234 sis190_tx_clear(tp);
1235 spin_unlock_irq(&tp->lock);
1237 /* ...and finally, reset everything. */
1238 sis190_hw_start(dev);
1240 netif_wake_queue(dev);
1243 static void sis190_set_speed_auto(struct net_device *dev)
1245 struct sis190_private *tp = netdev_priv(dev);
1246 void __iomem *ioaddr = tp->mmio_addr;
1249 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1251 val = mdio_read(ioaddr, MII_ADVERTISE);
1253 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1255 mdio_write(ioaddr, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1256 ADVERTISE_100FULL | ADVERTISE_10FULL |
1257 ADVERTISE_100HALF | ADVERTISE_10HALF);
1259 // Enable 1000 Full Mode.
1260 mdio_write(ioaddr, MII_CTRL1000, ADVERTISE_1000FULL);
1262 // Enable auto-negotiation and restart auto-negotiation.
1263 mdio_write(ioaddr, MII_BMCR,
1264 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1267 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1269 struct sis190_private *tp = netdev_priv(dev);
1271 return mii_ethtool_gset(&tp->mii_if, cmd);
1274 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1276 struct sis190_private *tp = netdev_priv(dev);
1278 return mii_ethtool_sset(&tp->mii_if, cmd);
1281 static void sis190_get_drvinfo(struct net_device *dev,
1282 struct ethtool_drvinfo *info)
1284 struct sis190_private *tp = netdev_priv(dev);
1286 strcpy(info->driver, DRV_NAME);
1287 strcpy(info->version, DRV_VERSION);
1288 strcpy(info->bus_info, pci_name(tp->pci_dev));
1291 static int sis190_get_regs_len(struct net_device *dev)
1293 return SIS190_REGS_SIZE;
1296 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1299 struct sis190_private *tp = netdev_priv(dev);
1300 unsigned long flags;
1302 if (regs->len > SIS190_REGS_SIZE)
1303 regs->len = SIS190_REGS_SIZE;
1305 spin_lock_irqsave(&tp->lock, flags);
1306 memcpy_fromio(p, tp->mmio_addr, regs->len);
1307 spin_unlock_irqrestore(&tp->lock, flags);
1310 static int sis190_nway_reset(struct net_device *dev)
1312 struct sis190_private *tp = netdev_priv(dev);
1314 return mii_nway_restart(&tp->mii_if);
1317 static u32 sis190_get_msglevel(struct net_device *dev)
1319 struct sis190_private *tp = netdev_priv(dev);
1321 return tp->msg_enable;
1324 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1326 struct sis190_private *tp = netdev_priv(dev);
1328 tp->msg_enable = value;
1331 static struct ethtool_ops sis190_ethtool_ops = {
1332 .get_settings = sis190_get_settings,
1333 .set_settings = sis190_set_settings,
1334 .get_drvinfo = sis190_get_drvinfo,
1335 .get_regs_len = sis190_get_regs_len,
1336 .get_regs = sis190_get_regs,
1337 .get_link = ethtool_op_get_link,
1338 .get_msglevel = sis190_get_msglevel,
1339 .set_msglevel = sis190_set_msglevel,
1340 .nway_reset = sis190_nway_reset,
1343 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1345 struct sis190_private *tp = netdev_priv(dev);
1347 return !netif_running(dev) ? -EINVAL :
1348 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1351 static int __devinit sis190_init_one(struct pci_dev *pdev,
1352 const struct pci_device_id *ent)
1354 static int printed_version = 0;
1355 struct sis190_private *tp;
1356 struct net_device *dev;
1357 void __iomem *ioaddr;
1360 if (!printed_version) {
1361 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1362 printed_version = 1;
1365 dev = sis190_init_board(pdev);
1371 tp = netdev_priv(dev);
1372 ioaddr = tp->mmio_addr;
1374 /* Get MAC address */
1375 /* Read node address from the EEPROM */
1377 if (SIS_R32(ROMControl) & 0x4) {
1378 for (i = 0; i < 3; i++) {
1379 SIS_W16(RxMacAddr + 2*i,
1380 sis190_read_eeprom(ioaddr, 3 + i));
1384 for (i = 0; i < MAC_ADDR_LEN; i++)
1385 dev->dev_addr[i] = SIS_R8(RxMacAddr + i);
1387 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1389 dev->open = sis190_open;
1390 dev->stop = sis190_close;
1391 dev->do_ioctl = sis190_ioctl;
1392 dev->get_stats = sis190_get_stats;
1393 dev->tx_timeout = sis190_tx_timeout;
1394 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1395 dev->hard_start_xmit = sis190_start_xmit;
1396 #ifdef CONFIG_NET_POLL_CONTROLLER
1397 dev->poll_controller = sis190_netpoll;
1399 dev->set_multicast_list = sis190_set_rx_mode;
1400 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1401 dev->irq = pdev->irq;
1402 dev->base_addr = (unsigned long) 0xdead;
1404 spin_lock_init(&tp->lock);
1405 rc = register_netdev(dev);
1407 sis190_release_board(pdev);
1411 pci_set_drvdata(pdev, dev);
1413 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1414 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1415 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1417 dev->dev_addr[0], dev->dev_addr[1],
1418 dev->dev_addr[2], dev->dev_addr[3],
1419 dev->dev_addr[4], dev->dev_addr[5]);
1421 netif_carrier_off(dev);
1423 sis190_set_speed_auto(dev);
1428 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1430 struct net_device *dev = pci_get_drvdata(pdev);
1432 unregister_netdev(dev);
1433 sis190_release_board(pdev);
1434 pci_set_drvdata(pdev, NULL);
1437 static struct pci_driver sis190_pci_driver = {
1439 .id_table = sis190_pci_tbl,
1440 .probe = sis190_init_one,
1441 .remove = __devexit_p(sis190_remove_one),
1444 static int __init sis190_init_module(void)
1446 return pci_module_init(&sis190_pci_driver);
1449 static void __exit sis190_cleanup_module(void)
1451 pci_unregister_driver(&sis190_pci_driver);
1454 module_init(sis190_init_module);
1455 module_exit(sis190_cleanup_module);