2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
18 See the file COPYING in this distribution for more information.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
50 #define DRV_VERSION "1.2"
51 #define DRV_NAME "sis190"
52 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53 #define PFX DRV_NAME ": "
55 #define sis190_rx_skb netif_rx
56 #define sis190_rx_quota(count, quota) count
58 #define MAC_ADDR_LEN 6
60 #define NUM_TX_DESC 64 /* [8..1024] */
61 #define NUM_RX_DESC 64 /* [8..8192] */
62 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
63 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
64 #define RX_BUF_SIZE 1536
65 #define RX_BUF_MASK 0xfff8
67 #define SIS190_REGS_SIZE 0x80
68 #define SIS190_TX_TIMEOUT (6*HZ)
69 #define SIS190_PHY_TIMEOUT (10*HZ)
70 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
71 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
74 /* Enhanced PHY access register bit definitions */
75 #define EhnMIIread 0x0000
76 #define EhnMIIwrite 0x0020
77 #define EhnMIIdataShift 16
78 #define EhnMIIpmdShift 6 /* 7016 only */
79 #define EhnMIIregShift 11
80 #define EhnMIIreq 0x0010
81 #define EhnMIInotDone 0x0010
83 /* Write/read MMIO register */
84 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
85 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
86 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
87 #define SIS_R8(reg) readb (ioaddr + (reg))
88 #define SIS_R16(reg) readw (ioaddr + (reg))
89 #define SIS_R32(reg) readl (ioaddr + (reg))
91 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
93 enum sis190_registers {
95 TxDescStartAddr = 0x04,
96 rsv0 = 0x08, // reserved
97 TxSts = 0x0c, // unused (Control/Status)
99 RxDescStartAddr = 0x14,
100 rsv1 = 0x18, // reserved
101 RxSts = 0x1c, // unused
105 IntrTimer = 0x2c, // unused (Interupt Timer)
106 PMControl = 0x30, // unused (Power Mgmt Control/Status)
107 rsv2 = 0x34, // reserved
110 StationControl = 0x40,
112 GIoCR = 0x48, // unused (GMAC IO Compensation)
113 GIoCtrl = 0x4c, // unused (GMAC IO Control)
115 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
116 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
117 rsv3 = 0x5c, // reserved
121 // Undocumented = 0x6c,
123 RxWolData = 0x74, // unused (Rx WOL Data Access)
124 RxMPSControl = 0x78, // unused (Rx MPS Control)
125 rsv4 = 0x7c, // reserved
128 enum sis190_register_content {
130 SoftInt = 0x40000000, // unused
131 Timeup = 0x20000000, // unused
132 PauseFrame = 0x00080000, // unused
133 MagicPacket = 0x00040000, // unused
134 WakeupFrame = 0x00020000, // unused
135 LinkChange = 0x00010000,
136 RxQEmpty = 0x00000080,
138 TxQ1Empty = 0x00000020, // unused
139 TxQ1Int = 0x00000010,
140 TxQ0Empty = 0x00000008, // unused
141 TxQ0Int = 0x00000004,
147 CmdRxEnb = 0x08, // unused
149 RxBufEmpty = 0x01, // unused
152 Cfg9346_Lock = 0x00, // unused
153 Cfg9346_Unlock = 0xc0, // unused
156 AcceptErr = 0x20, // unused
157 AcceptRunt = 0x10, // unused
158 AcceptBroadcast = 0x0800,
159 AcceptMulticast = 0x0400,
160 AcceptMyPhys = 0x0200,
161 AcceptAllPhys = 0x0100,
165 RxCfgDMAShift = 8, // 0x1a in RxControl ?
168 TxInterFrameGapShift = 24,
169 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
171 LinkStatus = 0x02, // unused
172 FullDup = 0x01, // unused
175 TBILinkOK = 0x02000000, // unused
192 enum _DescStatusBit {
194 OWNbit = 0x80000000, // RXOWN/TXOWN
195 INTbit = 0x40000000, // RXINT/TXINT
196 CRCbit = 0x00020000, // CRCOFF/CRCEN
197 PADbit = 0x00010000, // PREADD/PADEN
199 RingEnd = 0x80000000,
201 LSEN = 0x08000000, // TSO ? -- FR
228 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
237 RxSizeMask = 0x0000ffff
239 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
240 * provide two (unused with Linux) Tx queues. No publically
241 * available documentation alas.
245 enum sis190_eeprom_access_register_bits {
246 EECS = 0x00000001, // unused
247 EECLK = 0x00000002, // unused
248 EEDO = 0x00000008, // unused
249 EEDI = 0x00000004, // unused
252 EEWOP = 0x00000100 // unused
255 /* EEPROM Addresses */
256 enum sis190_eeprom_address {
257 EEPROMSignature = 0x00,
258 EEPROMCLK = 0x01, // unused
263 enum sis190_feature {
269 struct sis190_private {
270 void __iomem *mmio_addr;
271 struct pci_dev *pci_dev;
272 struct net_device *dev;
281 struct RxDesc *RxDescRing;
282 struct TxDesc *TxDescRing;
283 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
284 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
285 struct work_struct phy_task;
286 struct timer_list timer;
288 struct mii_if_info mii_if;
289 struct list_head first_phy;
294 struct list_head list;
301 enum sis190_phy_type {
308 static struct mii_chip_info {
313 } mii_chip_table[] = {
314 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
315 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
316 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
317 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
318 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
322 static const struct {
324 } sis_chip_info[] = {
325 { "SiS 190 PCI Fast Ethernet adapter" },
326 { "SiS 191 PCI Gigabit Ethernet adapter" },
329 static struct pci_device_id sis190_pci_tbl[] = {
330 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
331 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
335 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
337 static int rx_copybreak = 200;
343 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
344 module_param(rx_copybreak, int, 0);
345 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
346 module_param_named(debug, debug.msg_enable, int, 0);
347 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
348 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
349 MODULE_VERSION(DRV_VERSION);
350 MODULE_LICENSE("GPL");
352 static const u32 sis190_intr_mask =
353 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
356 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
357 * The chips use a 64 element hash table based on the Ethernet CRC.
359 static const int multicast_filter_limit = 32;
361 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
365 SIS_W32(GMIIControl, ctl);
369 for (i = 0; i < 100; i++) {
370 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
376 printk(KERN_ERR PFX "PHY command failed !\n");
379 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
381 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
382 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
383 (((u32) val) << EhnMIIdataShift));
386 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
388 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
389 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
391 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
394 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
396 struct sis190_private *tp = netdev_priv(dev);
398 mdio_write(tp->mmio_addr, phy_id, reg, val);
401 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
403 struct sis190_private *tp = netdev_priv(dev);
405 return mdio_read(tp->mmio_addr, phy_id, reg);
408 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
410 mdio_read(ioaddr, phy_id, reg);
411 return mdio_read(ioaddr, phy_id, reg);
414 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
419 if (!(SIS_R32(ROMControl) & 0x0002))
422 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
424 for (i = 0; i < 200; i++) {
425 if (!(SIS_R32(ROMInterface) & EEREQ)) {
426 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
435 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
437 SIS_W32(IntrMask, 0x00);
438 SIS_W32(IntrStatus, 0xffffffff);
442 static void sis190_asic_down(void __iomem *ioaddr)
444 /* Stop the chip's Tx and Rx DMA processes. */
446 SIS_W32(TxControl, 0x1a00);
447 SIS_W32(RxControl, 0x1a00);
449 sis190_irq_mask_and_ack(ioaddr);
452 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
454 desc->size |= cpu_to_le32(RingEnd);
457 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
459 u32 eor = le32_to_cpu(desc->size) & RingEnd;
462 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
464 desc->status = cpu_to_le32(OWNbit | INTbit);
467 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
470 desc->addr = cpu_to_le32(mapping);
471 sis190_give_to_asic(desc, rx_buf_sz);
474 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
477 desc->addr = cpu_to_le32(0xdeadbeef);
478 desc->size &= cpu_to_le32(RingEnd);
483 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
486 u32 rx_buf_sz = tp->rx_buf_sz;
489 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
493 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
495 sis190_map_to_asic(desc, mapping, rx_buf_sz);
497 sis190_make_unusable_by_asic(desc);
502 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
507 for (cur = start; cur < end; cur++) {
508 unsigned int i = cur % NUM_RX_DESC;
510 if (tp->Rx_skbuff[i])
513 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
515 if (!tp->Rx_skbuff[i])
521 static bool sis190_try_rx_copy(struct sis190_private *tp,
522 struct sk_buff **sk_buff, int pkt_size,
528 if (pkt_size >= rx_copybreak)
531 skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
535 pci_dma_sync_single_for_device(tp->pci_dev, addr, pkt_size,
538 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
545 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
547 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
549 if ((status & CRCOK) && !(status & ErrMask))
552 if (!(status & CRCOK))
553 stats->rx_crc_errors++;
554 else if (status & OVRUN)
555 stats->rx_over_errors++;
556 else if (status & (SHORT | LIMIT))
557 stats->rx_length_errors++;
558 else if (status & (MIIER | NIBON | COLON))
559 stats->rx_frame_errors++;
565 static int sis190_rx_interrupt(struct net_device *dev,
566 struct sis190_private *tp, void __iomem *ioaddr)
568 struct net_device_stats *stats = &dev->stats;
569 u32 rx_left, cur_rx = tp->cur_rx;
572 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
573 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
575 for (; rx_left > 0; rx_left--, cur_rx++) {
576 unsigned int entry = cur_rx % NUM_RX_DESC;
577 struct RxDesc *desc = tp->RxDescRing + entry;
580 if (le32_to_cpu(desc->status) & OWNbit)
583 status = le32_to_cpu(desc->PSize);
585 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
588 if (sis190_rx_pkt_err(status, stats) < 0)
589 sis190_give_to_asic(desc, tp->rx_buf_sz);
591 struct sk_buff *skb = tp->Rx_skbuff[entry];
592 dma_addr_t addr = le32_to_cpu(desc->addr);
593 int pkt_size = (status & RxSizeMask) - 4;
594 struct pci_dev *pdev = tp->pci_dev;
596 if (unlikely(pkt_size > tp->rx_buf_sz)) {
597 net_intr(tp, KERN_INFO
598 "%s: (frag) status = %08x.\n",
601 stats->rx_length_errors++;
602 sis190_give_to_asic(desc, tp->rx_buf_sz);
607 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
608 pci_dma_sync_single_for_device(pdev, addr,
609 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
610 sis190_give_to_asic(desc, tp->rx_buf_sz);
612 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
614 tp->Rx_skbuff[entry] = NULL;
615 sis190_make_unusable_by_asic(desc);
618 skb_put(skb, pkt_size);
619 skb->protocol = eth_type_trans(skb, dev);
623 dev->last_rx = jiffies;
625 stats->rx_bytes += pkt_size;
626 if ((status & BCAST) == MCAST)
630 count = cur_rx - tp->cur_rx;
633 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
634 if (!delta && count && netif_msg_intr(tp))
635 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
636 tp->dirty_rx += delta;
638 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
639 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
644 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
649 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
651 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
653 memset(desc, 0x00, sizeof(*desc));
656 static void sis190_tx_interrupt(struct net_device *dev,
657 struct sis190_private *tp, void __iomem *ioaddr)
659 u32 pending, dirty_tx = tp->dirty_tx;
661 * It would not be needed if queueing was allowed to be enabled
662 * again too early (hint: think preempt and unclocked smp systems).
664 unsigned int queue_stopped;
667 pending = tp->cur_tx - dirty_tx;
668 queue_stopped = (pending == NUM_TX_DESC);
670 for (; pending; pending--, dirty_tx++) {
671 unsigned int entry = dirty_tx % NUM_TX_DESC;
672 struct TxDesc *txd = tp->TxDescRing + entry;
675 if (le32_to_cpu(txd->status) & OWNbit)
678 skb = tp->Tx_skbuff[entry];
680 dev->stats.tx_packets++;
681 dev->stats.tx_bytes += skb->len;
683 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
684 tp->Tx_skbuff[entry] = NULL;
685 dev_kfree_skb_irq(skb);
688 if (tp->dirty_tx != dirty_tx) {
689 tp->dirty_tx = dirty_tx;
692 netif_wake_queue(dev);
697 * The interrupt handler does all of the Rx thread work and cleans up after
700 static irqreturn_t sis190_interrupt(int irq, void *__dev)
702 struct net_device *dev = __dev;
703 struct sis190_private *tp = netdev_priv(dev);
704 void __iomem *ioaddr = tp->mmio_addr;
705 unsigned int handled = 0;
708 status = SIS_R32(IntrStatus);
710 if ((status == 0xffffffff) || !status)
715 if (unlikely(!netif_running(dev))) {
716 sis190_asic_down(ioaddr);
720 SIS_W32(IntrStatus, status);
722 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
724 if (status & LinkChange) {
725 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
726 schedule_work(&tp->phy_task);
730 sis190_rx_interrupt(dev, tp, ioaddr);
732 if (status & TxQ0Int)
733 sis190_tx_interrupt(dev, tp, ioaddr);
735 return IRQ_RETVAL(handled);
738 #ifdef CONFIG_NET_POLL_CONTROLLER
739 static void sis190_netpoll(struct net_device *dev)
741 struct sis190_private *tp = netdev_priv(dev);
742 struct pci_dev *pdev = tp->pci_dev;
744 disable_irq(pdev->irq);
745 sis190_interrupt(pdev->irq, dev);
746 enable_irq(pdev->irq);
750 static void sis190_free_rx_skb(struct sis190_private *tp,
751 struct sk_buff **sk_buff, struct RxDesc *desc)
753 struct pci_dev *pdev = tp->pci_dev;
755 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
757 dev_kfree_skb(*sk_buff);
759 sis190_make_unusable_by_asic(desc);
762 static void sis190_rx_clear(struct sis190_private *tp)
766 for (i = 0; i < NUM_RX_DESC; i++) {
767 if (!tp->Rx_skbuff[i])
769 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
773 static void sis190_init_ring_indexes(struct sis190_private *tp)
775 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
778 static int sis190_init_ring(struct net_device *dev)
780 struct sis190_private *tp = netdev_priv(dev);
782 sis190_init_ring_indexes(tp);
784 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
785 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
787 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
790 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
799 static void sis190_set_rx_mode(struct net_device *dev)
801 struct sis190_private *tp = netdev_priv(dev);
802 void __iomem *ioaddr = tp->mmio_addr;
804 u32 mc_filter[2]; /* Multicast hash filter */
807 if (dev->flags & IFF_PROMISC) {
809 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
811 mc_filter[1] = mc_filter[0] = 0xffffffff;
812 } else if ((dev->mc_count > multicast_filter_limit) ||
813 (dev->flags & IFF_ALLMULTI)) {
814 /* Too many to filter perfectly -- accept all multicasts. */
815 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
816 mc_filter[1] = mc_filter[0] = 0xffffffff;
818 struct dev_mc_list *mclist;
821 rx_mode = AcceptBroadcast | AcceptMyPhys;
822 mc_filter[1] = mc_filter[0] = 0;
823 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
824 i++, mclist = mclist->next) {
826 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
827 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
828 rx_mode |= AcceptMulticast;
832 spin_lock_irqsave(&tp->lock, flags);
834 SIS_W16(RxMacControl, rx_mode | 0x2);
835 SIS_W32(RxHashTable, mc_filter[0]);
836 SIS_W32(RxHashTable + 4, mc_filter[1]);
838 spin_unlock_irqrestore(&tp->lock, flags);
841 static void sis190_soft_reset(void __iomem *ioaddr)
843 SIS_W32(IntrControl, 0x8000);
845 SIS_W32(IntrControl, 0x0);
846 sis190_asic_down(ioaddr);
849 static void sis190_hw_start(struct net_device *dev)
851 struct sis190_private *tp = netdev_priv(dev);
852 void __iomem *ioaddr = tp->mmio_addr;
854 sis190_soft_reset(ioaddr);
856 SIS_W32(TxDescStartAddr, tp->tx_dma);
857 SIS_W32(RxDescStartAddr, tp->rx_dma);
859 SIS_W32(IntrStatus, 0xffffffff);
860 SIS_W32(IntrMask, 0x0);
861 SIS_W32(GMIIControl, 0x0);
862 SIS_W32(TxMacControl, 0x60);
863 SIS_W16(RxMacControl, 0x02);
864 SIS_W32(RxHashTable, 0x0);
866 SIS_W32(RxWolCtrl, 0x0);
867 SIS_W32(RxWolData, 0x0);
871 sis190_set_rx_mode(dev);
873 /* Enable all known interrupts by setting the interrupt mask. */
874 SIS_W32(IntrMask, sis190_intr_mask);
876 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
877 SIS_W32(RxControl, 0x1a1d);
879 netif_start_queue(dev);
882 static void sis190_phy_task(struct work_struct *work)
884 struct sis190_private *tp =
885 container_of(work, struct sis190_private, phy_task);
886 struct net_device *dev = tp->dev;
887 void __iomem *ioaddr = tp->mmio_addr;
888 int phy_id = tp->mii_if.phy_id;
893 if (!netif_running(dev))
896 val = mdio_read(ioaddr, phy_id, MII_BMCR);
897 if (val & BMCR_RESET) {
898 // FIXME: needlessly high ? -- FR 02/07/2005
899 mod_timer(&tp->timer, jiffies + HZ/10);
900 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
901 BMSR_ANEGCOMPLETE)) {
902 netif_carrier_off(dev);
903 net_link(tp, KERN_WARNING "%s: auto-negotiating...\n",
905 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
913 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
914 "1000 Mbps Full Duplex" },
915 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
916 "1000 Mbps Half Duplex" },
917 { LPA_100FULL, 0x04000800 | 0x00001000,
918 "100 Mbps Full Duplex" },
919 { LPA_100HALF, 0x04000800,
920 "100 Mbps Half Duplex" },
921 { LPA_10FULL, 0x04000400 | 0x00001000,
922 "10 Mbps Full Duplex" },
923 { LPA_10HALF, 0x04000400,
924 "10 Mbps Half Duplex" },
925 { 0, 0x04000400, "unknown" }
929 val = mdio_read(ioaddr, phy_id, 0x1f);
930 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
932 val = mdio_read(ioaddr, phy_id, MII_LPA);
933 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
934 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
935 dev->name, val, adv);
939 for (p = reg31; p->val; p++) {
940 if ((val & p->val) == p->val)
944 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
946 if ((tp->features & F_HAS_RGMII) &&
947 (tp->features & F_PHY_BCM5461)) {
948 // Set Tx Delay in RGMII mode.
949 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
951 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
952 p->ctl |= 0x03000000;
955 SIS_W32(StationControl, p->ctl);
957 if (tp->features & F_HAS_RGMII) {
958 SIS_W32(RGDelay, 0x0441);
959 SIS_W32(RGDelay, 0x0440);
962 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
964 netif_carrier_on(dev);
971 static void sis190_phy_timer(unsigned long __opaque)
973 struct net_device *dev = (struct net_device *)__opaque;
974 struct sis190_private *tp = netdev_priv(dev);
976 if (likely(netif_running(dev)))
977 schedule_work(&tp->phy_task);
980 static inline void sis190_delete_timer(struct net_device *dev)
982 struct sis190_private *tp = netdev_priv(dev);
984 del_timer_sync(&tp->timer);
987 static inline void sis190_request_timer(struct net_device *dev)
989 struct sis190_private *tp = netdev_priv(dev);
990 struct timer_list *timer = &tp->timer;
993 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
994 timer->data = (unsigned long)dev;
995 timer->function = sis190_phy_timer;
999 static void sis190_set_rxbufsize(struct sis190_private *tp,
1000 struct net_device *dev)
1002 unsigned int mtu = dev->mtu;
1004 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1005 /* RxDesc->size has a licence to kill the lower bits */
1006 if (tp->rx_buf_sz & 0x07) {
1008 tp->rx_buf_sz &= RX_BUF_MASK;
1012 static int sis190_open(struct net_device *dev)
1014 struct sis190_private *tp = netdev_priv(dev);
1015 struct pci_dev *pdev = tp->pci_dev;
1018 sis190_set_rxbufsize(tp, dev);
1021 * Rx and Tx descriptors need 256 bytes alignment.
1022 * pci_alloc_consistent() guarantees a stronger alignment.
1024 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1025 if (!tp->TxDescRing)
1028 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1029 if (!tp->RxDescRing)
1032 rc = sis190_init_ring(dev);
1036 sis190_request_timer(dev);
1038 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1040 goto err_release_timer_2;
1042 sis190_hw_start(dev);
1046 err_release_timer_2:
1047 sis190_delete_timer(dev);
1048 sis190_rx_clear(tp);
1050 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1053 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1058 static void sis190_tx_clear(struct sis190_private *tp)
1062 for (i = 0; i < NUM_TX_DESC; i++) {
1063 struct sk_buff *skb = tp->Tx_skbuff[i];
1068 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1069 tp->Tx_skbuff[i] = NULL;
1072 tp->dev->stats.tx_dropped++;
1074 tp->cur_tx = tp->dirty_tx = 0;
1077 static void sis190_down(struct net_device *dev)
1079 struct sis190_private *tp = netdev_priv(dev);
1080 void __iomem *ioaddr = tp->mmio_addr;
1081 unsigned int poll_locked = 0;
1083 sis190_delete_timer(dev);
1085 netif_stop_queue(dev);
1088 spin_lock_irq(&tp->lock);
1090 sis190_asic_down(ioaddr);
1092 spin_unlock_irq(&tp->lock);
1094 synchronize_irq(dev->irq);
1099 synchronize_sched();
1101 } while (SIS_R32(IntrMask));
1103 sis190_tx_clear(tp);
1104 sis190_rx_clear(tp);
1107 static int sis190_close(struct net_device *dev)
1109 struct sis190_private *tp = netdev_priv(dev);
1110 struct pci_dev *pdev = tp->pci_dev;
1114 free_irq(dev->irq, dev);
1116 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1117 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1119 tp->TxDescRing = NULL;
1120 tp->RxDescRing = NULL;
1125 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1127 struct sis190_private *tp = netdev_priv(dev);
1128 void __iomem *ioaddr = tp->mmio_addr;
1129 u32 len, entry, dirty_tx;
1130 struct TxDesc *desc;
1133 if (unlikely(skb->len < ETH_ZLEN)) {
1134 if (skb_padto(skb, ETH_ZLEN)) {
1135 dev->stats.tx_dropped++;
1143 entry = tp->cur_tx % NUM_TX_DESC;
1144 desc = tp->TxDescRing + entry;
1146 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1147 netif_stop_queue(dev);
1148 net_tx_err(tp, KERN_ERR PFX
1149 "%s: BUG! Tx Ring full when queue awake!\n",
1151 return NETDEV_TX_BUSY;
1154 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1156 tp->Tx_skbuff[entry] = skb;
1158 desc->PSize = cpu_to_le32(len);
1159 desc->addr = cpu_to_le32(mapping);
1161 desc->size = cpu_to_le32(len);
1162 if (entry == (NUM_TX_DESC - 1))
1163 desc->size |= cpu_to_le32(RingEnd);
1167 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1173 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1175 dev->trans_start = jiffies;
1177 dirty_tx = tp->dirty_tx;
1178 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1179 netif_stop_queue(dev);
1181 if (dirty_tx != tp->dirty_tx)
1182 netif_wake_queue(dev);
1185 return NETDEV_TX_OK;
1188 static void sis190_free_phy(struct list_head *first_phy)
1190 struct sis190_phy *cur, *next;
1192 list_for_each_entry_safe(cur, next, first_phy, list) {
1198 * sis190_default_phy - Select default PHY for sis190 mac.
1199 * @dev: the net device to probe for
1201 * Select first detected PHY with link as default.
1202 * If no one is link on, select PHY whose types is HOME as default.
1203 * If HOME doesn't exist, select LAN.
1205 static u16 sis190_default_phy(struct net_device *dev)
1207 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1208 struct sis190_private *tp = netdev_priv(dev);
1209 struct mii_if_info *mii_if = &tp->mii_if;
1210 void __iomem *ioaddr = tp->mmio_addr;
1213 phy_home = phy_default = phy_lan = NULL;
1215 list_for_each_entry(phy, &tp->first_phy, list) {
1216 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1218 // Link ON & Not select default PHY & not ghost PHY.
1219 if ((status & BMSR_LSTATUS) &&
1221 (phy->type != UNKNOWN)) {
1224 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1225 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1226 status | BMCR_ANENABLE | BMCR_ISOLATE);
1227 if (phy->type == HOME)
1229 else if (phy->type == LAN)
1236 phy_default = phy_home;
1238 phy_default = phy_lan;
1240 phy_default = list_entry(&tp->first_phy,
1241 struct sis190_phy, list);
1244 if (mii_if->phy_id != phy_default->phy_id) {
1245 mii_if->phy_id = phy_default->phy_id;
1246 net_probe(tp, KERN_INFO
1247 "%s: Using transceiver at address %d as default.\n",
1248 pci_name(tp->pci_dev), mii_if->phy_id);
1251 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1252 status &= (~BMCR_ISOLATE);
1254 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1255 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1260 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1261 struct sis190_phy *phy, unsigned int phy_id,
1264 void __iomem *ioaddr = tp->mmio_addr;
1265 struct mii_chip_info *p;
1267 INIT_LIST_HEAD(&phy->list);
1268 phy->status = mii_status;
1269 phy->phy_id = phy_id;
1271 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1272 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1274 for (p = mii_chip_table; p->type; p++) {
1275 if ((p->id[0] == phy->id[0]) &&
1276 (p->id[1] == (phy->id[1] & 0xfff0))) {
1282 phy->type = (p->type == MIX) ?
1283 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1284 LAN : HOME) : p->type;
1285 tp->features |= p->feature;
1287 phy->type = UNKNOWN;
1289 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1290 pci_name(tp->pci_dev),
1291 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1294 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1296 if (tp->features & F_PHY_88E1111) {
1297 void __iomem *ioaddr = tp->mmio_addr;
1298 int phy_id = tp->mii_if.phy_id;
1304 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1306 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1308 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1314 * sis190_mii_probe - Probe MII PHY for sis190
1315 * @dev: the net device to probe for
1317 * Search for total of 32 possible mii phy addresses.
1318 * Identify and set current phy if found one,
1319 * return error if it failed to found.
1321 static int __devinit sis190_mii_probe(struct net_device *dev)
1323 struct sis190_private *tp = netdev_priv(dev);
1324 struct mii_if_info *mii_if = &tp->mii_if;
1325 void __iomem *ioaddr = tp->mmio_addr;
1329 INIT_LIST_HEAD(&tp->first_phy);
1331 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1332 struct sis190_phy *phy;
1335 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1337 // Try next mii if the current one is not accessible.
1338 if (status == 0xffff || status == 0x0000)
1341 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1343 sis190_free_phy(&tp->first_phy);
1348 sis190_init_phy(dev, tp, phy, phy_id, status);
1350 list_add(&tp->first_phy, &phy->list);
1353 if (list_empty(&tp->first_phy)) {
1354 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1355 pci_name(tp->pci_dev));
1360 /* Select default PHY for mac */
1361 sis190_default_phy(dev);
1363 sis190_mii_probe_88e1111_fixup(tp);
1366 mii_if->mdio_read = __mdio_read;
1367 mii_if->mdio_write = __mdio_write;
1368 mii_if->phy_id_mask = PHY_ID_ANY;
1369 mii_if->reg_num_mask = MII_REG_ANY;
1374 static void sis190_mii_remove(struct net_device *dev)
1376 struct sis190_private *tp = netdev_priv(dev);
1378 sis190_free_phy(&tp->first_phy);
1381 static void sis190_release_board(struct pci_dev *pdev)
1383 struct net_device *dev = pci_get_drvdata(pdev);
1384 struct sis190_private *tp = netdev_priv(dev);
1386 iounmap(tp->mmio_addr);
1387 pci_release_regions(pdev);
1388 pci_disable_device(pdev);
1392 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1394 struct sis190_private *tp;
1395 struct net_device *dev;
1396 void __iomem *ioaddr;
1399 dev = alloc_etherdev(sizeof(*tp));
1401 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1406 SET_NETDEV_DEV(dev, &pdev->dev);
1408 tp = netdev_priv(dev);
1410 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1412 rc = pci_enable_device(pdev);
1414 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1415 goto err_free_dev_1;
1420 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1421 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1423 goto err_pci_disable_2;
1425 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1426 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1428 goto err_pci_disable_2;
1431 rc = pci_request_regions(pdev, DRV_NAME);
1433 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1435 goto err_pci_disable_2;
1438 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1440 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1442 goto err_free_res_3;
1445 pci_set_master(pdev);
1447 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1449 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1452 goto err_free_res_3;
1456 tp->mmio_addr = ioaddr;
1458 sis190_irq_mask_and_ack(ioaddr);
1460 sis190_soft_reset(ioaddr);
1465 pci_release_regions(pdev);
1467 pci_disable_device(pdev);
1475 static void sis190_tx_timeout(struct net_device *dev)
1477 struct sis190_private *tp = netdev_priv(dev);
1478 void __iomem *ioaddr = tp->mmio_addr;
1481 /* Disable Tx, if not already */
1482 tmp8 = SIS_R8(TxControl);
1483 if (tmp8 & CmdTxEnb)
1484 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1487 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1488 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1490 /* Disable interrupts by clearing the interrupt mask. */
1491 SIS_W32(IntrMask, 0x0000);
1493 /* Stop a shared interrupt from scavenging while we are. */
1494 spin_lock_irq(&tp->lock);
1495 sis190_tx_clear(tp);
1496 spin_unlock_irq(&tp->lock);
1498 /* ...and finally, reset everything. */
1499 sis190_hw_start(dev);
1501 netif_wake_queue(dev);
1504 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1506 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1509 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1510 struct net_device *dev)
1512 struct sis190_private *tp = netdev_priv(dev);
1513 void __iomem *ioaddr = tp->mmio_addr;
1517 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1520 /* Check to see if there is a sane EEPROM */
1521 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1523 if ((sig == 0xffff) || (sig == 0x0000)) {
1524 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1525 pci_name(pdev), sig);
1529 /* Get MAC address from EEPROM */
1530 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1531 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1533 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1536 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1542 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1544 * @dev: network device to get address for
1546 * SiS96x model, use APC CMOS RAM to store MAC address.
1547 * APC CMOS RAM is accessed through ISA bridge.
1548 * MAC address is read into @net_dev->dev_addr.
1550 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1551 struct net_device *dev)
1553 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1554 struct sis190_private *tp = netdev_priv(dev);
1555 struct pci_dev *isa_bridge;
1559 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1562 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1563 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1569 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1574 /* Enable port 78h & 79h to access APC Registers. */
1575 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1576 reg = (tmp8 & ~0x02);
1577 pci_write_config_byte(isa_bridge, 0x48, reg);
1579 pci_read_config_byte(isa_bridge, 0x48, ®);
1581 for (i = 0; i < MAC_ADDR_LEN; i++) {
1582 outb(0x9 + i, 0x78);
1583 dev->dev_addr[i] = inb(0x79);
1589 sis190_set_rgmii(tp, reg);
1591 /* Restore the value to ISA Bridge */
1592 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1593 pci_dev_put(isa_bridge);
1599 * sis190_init_rxfilter - Initialize the Rx filter
1600 * @dev: network device to initialize
1602 * Set receive filter address to our MAC address
1603 * and enable packet filtering.
1605 static inline void sis190_init_rxfilter(struct net_device *dev)
1607 struct sis190_private *tp = netdev_priv(dev);
1608 void __iomem *ioaddr = tp->mmio_addr;
1612 ctl = SIS_R16(RxMacControl);
1614 * Disable packet filtering before setting filter.
1615 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1616 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1618 SIS_W16(RxMacControl, ctl & ~0x0f00);
1620 for (i = 0; i < MAC_ADDR_LEN; i++)
1621 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1623 SIS_W16(RxMacControl, ctl);
1627 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1628 struct net_device *dev)
1632 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1636 pci_read_config_byte(pdev, 0x73, ®);
1638 if (reg & 0x00000001)
1639 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1644 static void sis190_set_speed_auto(struct net_device *dev)
1646 struct sis190_private *tp = netdev_priv(dev);
1647 void __iomem *ioaddr = tp->mmio_addr;
1648 int phy_id = tp->mii_if.phy_id;
1651 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1653 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1655 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1657 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1658 ADVERTISE_100FULL | ADVERTISE_10FULL |
1659 ADVERTISE_100HALF | ADVERTISE_10HALF);
1661 // Enable 1000 Full Mode.
1662 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1664 // Enable auto-negotiation and restart auto-negotiation.
1665 mdio_write(ioaddr, phy_id, MII_BMCR,
1666 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1669 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1671 struct sis190_private *tp = netdev_priv(dev);
1673 return mii_ethtool_gset(&tp->mii_if, cmd);
1676 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1678 struct sis190_private *tp = netdev_priv(dev);
1680 return mii_ethtool_sset(&tp->mii_if, cmd);
1683 static void sis190_get_drvinfo(struct net_device *dev,
1684 struct ethtool_drvinfo *info)
1686 struct sis190_private *tp = netdev_priv(dev);
1688 strcpy(info->driver, DRV_NAME);
1689 strcpy(info->version, DRV_VERSION);
1690 strcpy(info->bus_info, pci_name(tp->pci_dev));
1693 static int sis190_get_regs_len(struct net_device *dev)
1695 return SIS190_REGS_SIZE;
1698 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1701 struct sis190_private *tp = netdev_priv(dev);
1702 unsigned long flags;
1704 if (regs->len > SIS190_REGS_SIZE)
1705 regs->len = SIS190_REGS_SIZE;
1707 spin_lock_irqsave(&tp->lock, flags);
1708 memcpy_fromio(p, tp->mmio_addr, regs->len);
1709 spin_unlock_irqrestore(&tp->lock, flags);
1712 static int sis190_nway_reset(struct net_device *dev)
1714 struct sis190_private *tp = netdev_priv(dev);
1716 return mii_nway_restart(&tp->mii_if);
1719 static u32 sis190_get_msglevel(struct net_device *dev)
1721 struct sis190_private *tp = netdev_priv(dev);
1723 return tp->msg_enable;
1726 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1728 struct sis190_private *tp = netdev_priv(dev);
1730 tp->msg_enable = value;
1733 static const struct ethtool_ops sis190_ethtool_ops = {
1734 .get_settings = sis190_get_settings,
1735 .set_settings = sis190_set_settings,
1736 .get_drvinfo = sis190_get_drvinfo,
1737 .get_regs_len = sis190_get_regs_len,
1738 .get_regs = sis190_get_regs,
1739 .get_link = ethtool_op_get_link,
1740 .get_msglevel = sis190_get_msglevel,
1741 .set_msglevel = sis190_set_msglevel,
1742 .nway_reset = sis190_nway_reset,
1745 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1747 struct sis190_private *tp = netdev_priv(dev);
1749 return !netif_running(dev) ? -EINVAL :
1750 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1753 static int __devinit sis190_init_one(struct pci_dev *pdev,
1754 const struct pci_device_id *ent)
1756 static int printed_version = 0;
1757 struct sis190_private *tp;
1758 struct net_device *dev;
1759 void __iomem *ioaddr;
1761 DECLARE_MAC_BUF(mac);
1763 if (!printed_version) {
1764 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1765 printed_version = 1;
1768 dev = sis190_init_board(pdev);
1774 pci_set_drvdata(pdev, dev);
1776 tp = netdev_priv(dev);
1777 ioaddr = tp->mmio_addr;
1779 rc = sis190_get_mac_addr(pdev, dev);
1781 goto err_release_board;
1783 sis190_init_rxfilter(dev);
1785 INIT_WORK(&tp->phy_task, sis190_phy_task);
1787 dev->open = sis190_open;
1788 dev->stop = sis190_close;
1789 dev->do_ioctl = sis190_ioctl;
1790 dev->tx_timeout = sis190_tx_timeout;
1791 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1792 dev->hard_start_xmit = sis190_start_xmit;
1793 #ifdef CONFIG_NET_POLL_CONTROLLER
1794 dev->poll_controller = sis190_netpoll;
1796 dev->set_multicast_list = sis190_set_rx_mode;
1797 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1798 dev->irq = pdev->irq;
1799 dev->base_addr = (unsigned long) 0xdead;
1801 spin_lock_init(&tp->lock);
1803 rc = sis190_mii_probe(dev);
1805 goto err_release_board;
1807 rc = register_netdev(dev);
1809 goto err_remove_mii;
1811 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1813 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1814 ioaddr, dev->irq, print_mac(mac, dev->dev_addr));
1816 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1817 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1819 netif_carrier_off(dev);
1821 sis190_set_speed_auto(dev);
1826 sis190_mii_remove(dev);
1828 sis190_release_board(pdev);
1832 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1834 struct net_device *dev = pci_get_drvdata(pdev);
1836 sis190_mii_remove(dev);
1837 flush_scheduled_work();
1838 unregister_netdev(dev);
1839 sis190_release_board(pdev);
1840 pci_set_drvdata(pdev, NULL);
1843 static struct pci_driver sis190_pci_driver = {
1845 .id_table = sis190_pci_tbl,
1846 .probe = sis190_init_one,
1847 .remove = __devexit_p(sis190_remove_one),
1850 static int __init sis190_init_module(void)
1852 return pci_register_driver(&sis190_pci_driver);
1855 static void __exit sis190_cleanup_module(void)
1857 pci_unregister_driver(&sis190_pci_driver);
1860 module_init(sis190_init_module);
1861 module_exit(sis190_cleanup_module);