2 A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3 ethernet driver for Linux.
4 Copyright (C) 1997 Sten Wang
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 DAVICOM Web-Site: www.davicom.com.tw
18 Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19 Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
21 (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
23 Marcelo Tosatti <marcelo@conectiva.com.br> :
24 Made it compile in 2.3 (device to net_device)
26 Alan Cox <alan@redhat.com> :
27 Cleaned up for kernel merge.
28 Removed the back compatibility support
29 Reformatted, fixing spelling etc as I went
30 Removed IRQ 0-15 assumption
32 Jeff Garzik <jgarzik@pobox.com> :
33 Updated to use new PCI driver API.
34 Resource usage cleanups.
35 Report driver version to user.
37 Tobias Ringstrom <tori@unhappy.mine.nu> :
38 Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
39 Andrew Morton and Frank Davis for the SMP safety fixes.
41 Vojtech Pavlik <vojtech@suse.cz> :
42 Cleaned up pointer arithmetics.
43 Fixed a lot of 64bit issues.
44 Cleaned up printk()s a bit.
45 Fixed some obvious big endian problems.
47 Tobias Ringstrom <tori@unhappy.mine.nu> :
48 Use time_after for jiffies calculation. Added ethtool
49 support. Updated PCI resource allocation. Do not
50 forget to unmap PCI mapped skbs.
52 Alan Cox <alan@redhat.com>
53 Added new PCI identifiers provided by Clear Zhang at ALi
54 for their 1563 ethernet device.
58 Implement pci_driver::suspend() and pci_driver::resume()
59 power management methods.
61 Check on 64 bit boxes.
62 Check and fix on big endian boxes.
64 Test and make sure PCI latency is now correct for all cases.
67 #define DRV_NAME "dmfe"
68 #define DRV_VERSION "1.36.4"
69 #define DRV_RELDATE "2002-01-17"
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/timer.h>
75 #include <linux/ptrace.h>
76 #include <linux/errno.h>
77 #include <linux/ioport.h>
78 #include <linux/slab.h>
79 #include <linux/interrupt.h>
80 #include <linux/pci.h>
81 #include <linux/dma-mapping.h>
82 #include <linux/init.h>
83 #include <linux/netdevice.h>
84 #include <linux/etherdevice.h>
85 #include <linux/ethtool.h>
86 #include <linux/skbuff.h>
87 #include <linux/delay.h>
88 #include <linux/spinlock.h>
89 #include <linux/crc32.h>
90 #include <linux/bitops.h>
92 #include <asm/processor.h>
95 #include <asm/uaccess.h>
99 /* Board/System/Debug information/definition ---------------- */
100 #define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
101 #define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
102 #define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
103 #define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
105 #define DM9102_IO_SIZE 0x80
106 #define DM9102A_IO_SIZE 0x100
107 #define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
108 #define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
109 #define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
110 #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
111 #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
112 #define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
113 #define TX_BUF_ALLOC 0x600
114 #define RX_ALLOC_SIZE 0x620
115 #define DM910X_RESET 1
116 #define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
117 #define CR6_DEFAULT 0x00080000 /* HD */
118 #define CR7_DEFAULT 0x180c1
119 #define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
120 #define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
121 #define MAX_PACKET_SIZE 1514
122 #define DMFE_MAX_MULTICAST 14
123 #define RX_COPY_SIZE 100
124 #define MAX_CHECK_PACKET 0x8000
125 #define DM9801_NOISE_FLOOR 8
126 #define DM9802_NOISE_FLOOR 5
129 #define DMFE_100MHF 1
131 #define DMFE_100MFD 5
133 #define DMFE_1M_HPNA 0x10
135 #define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
136 #define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
137 #define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
138 #define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
139 #define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
140 #define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
142 #define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
143 #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
144 #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
146 #define DMFE_DBUG(dbug_now, msg, value) \
148 if (dmfe_debug || (dbug_now)) \
149 printk(KERN_ERR DRV_NAME ": %s %lx\n",\
150 (msg), (long) (value)); \
153 #define SHOW_MEDIA_TYPE(mode) \
154 printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
155 (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
158 /* CR9 definition: SROM/MII */
159 #define CR9_SROM_READ 0x4800
161 #define CR9_SRCLK 0x2
162 #define CR9_CRDOUT 0x8
163 #define SROM_DATA_0 0x0
164 #define SROM_DATA_1 0x4
165 #define PHY_DATA_1 0x20000
166 #define PHY_DATA_0 0x00000
167 #define MDCLKH 0x10000
169 #define PHY_POWER_DOWN 0x800
171 #define SROM_V41_CODE 0x14
173 #define SROM_CLK_WRITE(data, ioaddr) \
174 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
176 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
178 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
181 #define __CHK_IO_SIZE(pci_id, dev_rev) \
182 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \
183 DM9102A_IO_SIZE: DM9102_IO_SIZE)
185 #define CHK_IO_SIZE(pci_dev, dev_rev) \
186 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev))
189 #define DEVICE net_device
191 /* Structure/enum declaration ------------------------------- */
193 u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
194 char *tx_buf_ptr; /* Data for us */
195 struct tx_desc *next_tx_desc;
196 } __attribute__(( aligned(32) ));
199 u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
200 struct sk_buff *rx_skb_ptr; /* Data for us */
201 struct rx_desc *next_rx_desc;
202 } __attribute__(( aligned(32) ));
204 struct dmfe_board_info {
205 u32 chip_id; /* Chip vendor/Device ID */
206 u32 chip_revision; /* Chip revision */
207 struct DEVICE *next_dev; /* next device */
208 struct pci_dev *pdev; /* PCI device */
211 long ioaddr; /* I/O base address */
218 /* pointer for memory physical address */
219 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
220 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
221 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
222 dma_addr_t first_tx_desc_dma;
223 dma_addr_t first_rx_desc_dma;
225 /* descriptor pointer */
226 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
227 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
228 unsigned char *desc_pool_ptr; /* descriptor pool memory */
229 struct tx_desc *first_tx_desc;
230 struct tx_desc *tx_insert_ptr;
231 struct tx_desc *tx_remove_ptr;
232 struct rx_desc *first_rx_desc;
233 struct rx_desc *rx_insert_ptr;
234 struct rx_desc *rx_ready_ptr; /* packet come pointer */
235 unsigned long tx_packet_cnt; /* transmitted packet count */
236 unsigned long tx_queue_cnt; /* wait to send packet count */
237 unsigned long rx_avail_cnt; /* available rx descriptor count */
238 unsigned long interval_rx_cnt; /* rx packet count a callback time */
240 u16 HPNA_command; /* For HPNA register 16 */
241 u16 HPNA_timer; /* For HPNA remote device check */
243 u16 NIC_capability; /* NIC media capability */
244 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
246 u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */
247 u8 chip_type; /* Keep DM9102A chip type */
248 u8 media_mode; /* user specify media mode */
249 u8 op_mode; /* real work media mode */
251 u8 link_failed; /* Ever link failed */
252 u8 wait_reset; /* Hardware failed, need to reset */
253 u8 dm910x_chk_mode; /* Operating mode check */
254 u8 first_in_callback; /* Flag to record state */
255 struct timer_list timer;
257 /* System defined statistic counter */
258 struct net_device_stats stats;
260 /* Driver defined statistic counter */
261 unsigned long tx_fifo_underrun;
262 unsigned long tx_loss_carrier;
263 unsigned long tx_no_carrier;
264 unsigned long tx_late_collision;
265 unsigned long tx_excessive_collision;
266 unsigned long tx_jabber_timeout;
267 unsigned long reset_count;
268 unsigned long reset_cr8;
269 unsigned long reset_fatal;
270 unsigned long reset_TXtimeout;
273 unsigned char srom[128];
277 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
278 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
279 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
284 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
285 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
286 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
289 /* Global variable declaration ----------------------------- */
290 static int __devinitdata printed_version;
291 static char version[] __devinitdata =
292 KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
293 DRV_VERSION " (" DRV_RELDATE ")\n";
295 static int dmfe_debug;
296 static unsigned char dmfe_media_mode = DMFE_AUTO;
297 static u32 dmfe_cr6_user_set;
299 /* For module input parameter */
302 static unsigned char mode = 8;
303 static u8 chkmode = 1;
304 static u8 HPNA_mode; /* Default: Low Power/High Speed */
305 static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */
306 static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */
307 static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */
308 static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
309 4: TX pause packet */
312 /* function declaration ------------------------------------- */
313 static int dmfe_open(struct DEVICE *);
314 static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
315 static int dmfe_stop(struct DEVICE *);
316 static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
317 static void dmfe_set_filter_mode(struct DEVICE *);
318 static const struct ethtool_ops netdev_ethtool_ops;
319 static u16 read_srom_word(long ,int);
320 static irqreturn_t dmfe_interrupt(int , void *);
321 #ifdef CONFIG_NET_POLL_CONTROLLER
322 static void poll_dmfe (struct net_device *dev);
324 static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
325 static void allocate_rx_buffer(struct dmfe_board_info *);
326 static void update_cr6(u32, unsigned long);
327 static void send_filter_frame(struct DEVICE * ,int);
328 static void dm9132_id_table(struct DEVICE * ,int);
329 static u16 phy_read(unsigned long, u8, u8, u32);
330 static void phy_write(unsigned long, u8, u8, u16, u32);
331 static void phy_write_1bit(unsigned long, u32);
332 static u16 phy_read_1bit(unsigned long);
333 static u8 dmfe_sense_speed(struct dmfe_board_info *);
334 static void dmfe_process_mode(struct dmfe_board_info *);
335 static void dmfe_timer(unsigned long);
336 static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
337 static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
338 static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
339 static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
340 static void dmfe_dynamic_reset(struct DEVICE *);
341 static void dmfe_free_rxbuffer(struct dmfe_board_info *);
342 static void dmfe_init_dm910x(struct DEVICE *);
343 static void dmfe_parse_srom(struct dmfe_board_info *);
344 static void dmfe_program_DM9801(struct dmfe_board_info *, int);
345 static void dmfe_program_DM9802(struct dmfe_board_info *);
346 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
347 static void dmfe_set_phyxcer(struct dmfe_board_info *);
349 /* DM910X network board routine ---------------------------- */
352 * Search DM910X board ,allocate space and register it
355 static int __devinit dmfe_init_one (struct pci_dev *pdev,
356 const struct pci_device_id *ent)
358 struct dmfe_board_info *db; /* board information structure */
359 struct net_device *dev;
360 u32 dev_rev, pci_pmr;
363 DMFE_DBUG(0, "dmfe_init_one()", 0);
365 if (!printed_version++)
368 /* Init network device */
369 dev = alloc_etherdev(sizeof(*db));
372 SET_MODULE_OWNER(dev);
373 SET_NETDEV_DEV(dev, &pdev->dev);
375 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
376 printk(KERN_WARNING DRV_NAME
377 ": 32-bit PCI DMA not available.\n");
382 /* Enable Master/IO access, Disable memory access */
383 err = pci_enable_device(pdev);
387 if (!pci_resource_start(pdev, 0)) {
388 printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
390 goto err_out_disable;
393 /* Read Chip revision */
394 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
396 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
397 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
399 goto err_out_disable;
402 #if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
404 /* Set Latency Timer 80h */
405 /* FIXME: setting values > 32 breaks some SiS 559x stuff.
406 Need a PCI quirk.. */
408 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
411 if (pci_request_regions(pdev, DRV_NAME)) {
412 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
414 goto err_out_disable;
417 /* Init system & device */
418 db = netdev_priv(dev);
420 /* Allocate Tx/Rx descriptor memory */
421 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
422 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
424 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
425 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
427 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
428 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
429 db->buf_pool_start = db->buf_pool_ptr;
430 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
432 db->chip_id = ent->driver_data;
433 db->ioaddr = pci_resource_start(pdev, 0);
434 db->chip_revision = dev_rev;
438 dev->base_addr = db->ioaddr;
439 dev->irq = pdev->irq;
440 pci_set_drvdata(pdev, dev);
441 dev->open = &dmfe_open;
442 dev->hard_start_xmit = &dmfe_start_xmit;
443 dev->stop = &dmfe_stop;
444 dev->get_stats = &dmfe_get_stats;
445 dev->set_multicast_list = &dmfe_set_filter_mode;
446 #ifdef CONFIG_NET_POLL_CONTROLLER
447 dev->poll_controller = &poll_dmfe;
449 dev->ethtool_ops = &netdev_ethtool_ops;
450 spin_lock_init(&db->lock);
452 pci_read_config_dword(pdev, 0x50, &pci_pmr);
454 if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) )
455 db->chip_type = 1; /* DM9102A E3 */
459 /* read 64 word srom data */
460 for (i = 0; i < 64; i++)
461 ((u16 *) db->srom)[i] =
462 cpu_to_le16(read_srom_word(db->ioaddr, i));
464 /* Set Node address */
465 for (i = 0; i < 6; i++)
466 dev->dev_addr[i] = db->srom[20 + i];
468 err = register_netdev (dev);
472 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,",
474 ent->driver_data >> 16,
476 for (i = 0; i < 6; i++)
477 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
478 printk(", irq %d.\n", dev->irq);
480 pci_set_master(pdev);
485 pci_release_regions(pdev);
487 pci_disable_device(pdev);
489 pci_set_drvdata(pdev, NULL);
496 static void __devexit dmfe_remove_one (struct pci_dev *pdev)
498 struct net_device *dev = pci_get_drvdata(pdev);
499 struct dmfe_board_info *db = netdev_priv(dev);
501 DMFE_DBUG(0, "dmfe_remove_one()", 0);
505 unregister_netdev(dev);
507 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
508 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
509 db->desc_pool_dma_ptr);
510 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
511 db->buf_pool_ptr, db->buf_pool_dma_ptr);
512 pci_release_regions(pdev);
513 free_netdev(dev); /* free board information */
515 pci_set_drvdata(pdev, NULL);
518 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
523 * Open the interface.
524 * The interface is opened whenever "ifconfig" actives it.
527 static int dmfe_open(struct DEVICE *dev)
530 struct dmfe_board_info *db = netdev_priv(dev);
532 DMFE_DBUG(0, "dmfe_open", 0);
534 ret = request_irq(dev->irq, &dmfe_interrupt,
535 IRQF_SHARED, dev->name, dev);
539 /* system variable init */
540 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
541 db->tx_packet_cnt = 0;
542 db->tx_queue_cnt = 0;
543 db->rx_avail_cnt = 0;
547 db->first_in_callback = 0;
548 db->NIC_capability = 0xf; /* All capability*/
549 db->PHY_reg4 = 0x1e0;
551 /* CR6 operation mode decision */
552 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
553 (db->chip_revision >= 0x02000030) ) {
554 db->cr6_data |= DMFE_TXTH_256;
555 db->cr0_data = CR0_DEFAULT;
556 db->dm910x_chk_mode=4; /* Enter the normal mode */
558 db->cr6_data |= CR6_SFT; /* Store & Forward mode */
560 db->dm910x_chk_mode = 1; /* Enter the check mode */
563 /* Initilize DM910X board */
564 dmfe_init_dm910x(dev);
566 /* Active System Interface */
567 netif_wake_queue(dev);
569 /* set and active a timer process */
570 init_timer(&db->timer);
571 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
572 db->timer.data = (unsigned long)dev;
573 db->timer.function = &dmfe_timer;
574 add_timer(&db->timer);
580 /* Initilize DM910X board
582 * Initilize TX/Rx descriptor chain structure
583 * Send the set-up frame
584 * Enable Tx/Rx machine
587 static void dmfe_init_dm910x(struct DEVICE *dev)
589 struct dmfe_board_info *db = netdev_priv(dev);
590 unsigned long ioaddr = db->ioaddr;
592 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
594 /* Reset DM910x MAC controller */
595 outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */
597 outl(db->cr0_data, ioaddr + DCR0);
600 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
603 /* Parser SROM and media mode */
605 db->media_mode = dmfe_media_mode;
607 /* RESET Phyxcer Chip by GPR port bit 7 */
608 outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */
609 if (db->chip_id == PCI_DM9009_ID) {
610 outl(0x80, ioaddr + DCR12); /* Issue RESET signal */
611 mdelay(300); /* Delay 300 ms */
613 outl(0x0, ioaddr + DCR12); /* Clear RESET signal */
615 /* Process Phyxcer Media Mode */
616 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
617 dmfe_set_phyxcer(db);
619 /* Media Mode Process */
620 if ( !(db->media_mode & DMFE_AUTO) )
621 db->op_mode = db->media_mode; /* Force Mode */
623 /* Initiliaze Transmit/Receive decriptor and CR3/4 */
624 dmfe_descriptor_init(db, ioaddr);
626 /* Init CR6 to program DM910x operation */
627 update_cr6(db->cr6_data, ioaddr);
629 /* Send setup frame */
630 if (db->chip_id == PCI_DM9132_ID)
631 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
633 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
635 /* Init CR7, interrupt active bit */
636 db->cr7_data = CR7_DEFAULT;
637 outl(db->cr7_data, ioaddr + DCR7);
639 /* Init CR15, Tx jabber and Rx watchdog timer */
640 outl(db->cr15_data, ioaddr + DCR15);
642 /* Enable DM910X Tx/Rx function */
643 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
644 update_cr6(db->cr6_data, ioaddr);
649 * Hardware start transmission.
650 * Send a packet to media from the upper layer.
653 static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
655 struct dmfe_board_info *db = netdev_priv(dev);
656 struct tx_desc *txptr;
659 DMFE_DBUG(0, "dmfe_start_xmit", 0);
661 /* Resource flag check */
662 netif_stop_queue(dev);
664 /* Too large packet check */
665 if (skb->len > MAX_PACKET_SIZE) {
666 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
671 spin_lock_irqsave(&db->lock, flags);
673 /* No Tx resource check, it never happen nromally */
674 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
675 spin_unlock_irqrestore(&db->lock, flags);
676 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
681 /* Disable NIC interrupt */
682 outl(0, dev->base_addr + DCR7);
684 /* transmit this packet */
685 txptr = db->tx_insert_ptr;
686 memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
687 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
689 /* Point to next transmit free descriptor */
690 db->tx_insert_ptr = txptr->next_tx_desc;
692 /* Transmit Packet Process */
693 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
694 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
695 db->tx_packet_cnt++; /* Ready to send */
696 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
697 dev->trans_start = jiffies; /* saved time stamp */
699 db->tx_queue_cnt++; /* queue TX packet */
700 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
703 /* Tx resource check */
704 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
705 netif_wake_queue(dev);
707 /* Restore CR7 to enable interrupt */
708 spin_unlock_irqrestore(&db->lock, flags);
709 outl(db->cr7_data, dev->base_addr + DCR7);
719 * Stop the interface.
720 * The interface is stopped when it is brought.
723 static int dmfe_stop(struct DEVICE *dev)
725 struct dmfe_board_info *db = netdev_priv(dev);
726 unsigned long ioaddr = dev->base_addr;
728 DMFE_DBUG(0, "dmfe_stop", 0);
731 netif_stop_queue(dev);
734 del_timer_sync(&db->timer);
736 /* Reset & stop DM910X board */
737 outl(DM910X_RESET, ioaddr + DCR0);
739 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
742 free_irq(dev->irq, dev);
744 /* free allocated rx buffer */
745 dmfe_free_rxbuffer(db);
748 /* show statistic counter */
749 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
750 " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
751 db->tx_fifo_underrun, db->tx_excessive_collision,
752 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
753 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
754 db->reset_fatal, db->reset_TXtimeout);
762 * DM9102 insterrupt handler
763 * receive the packet to upper layer, free the transmitted packet
766 static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
768 struct DEVICE *dev = dev_id;
769 struct dmfe_board_info *db = netdev_priv(dev);
770 unsigned long ioaddr = dev->base_addr;
773 DMFE_DBUG(0, "dmfe_interrupt()", 0);
775 spin_lock_irqsave(&db->lock, flags);
777 /* Got DM910X status */
778 db->cr5_data = inl(ioaddr + DCR5);
779 outl(db->cr5_data, ioaddr + DCR5);
780 if ( !(db->cr5_data & 0xc1) ) {
781 spin_unlock_irqrestore(&db->lock, flags);
785 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
786 outl(0, ioaddr + DCR7);
788 /* Check system status */
789 if (db->cr5_data & 0x2000) {
790 /* system bus error happen */
791 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
793 db->wait_reset = 1; /* Need to RESET */
794 spin_unlock_irqrestore(&db->lock, flags);
798 /* Received the coming packet */
799 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
800 dmfe_rx_packet(dev, db);
802 /* reallocate rx descriptor buffer */
803 if (db->rx_avail_cnt<RX_DESC_CNT)
804 allocate_rx_buffer(db);
806 /* Free the transmitted descriptor */
807 if ( db->cr5_data & 0x01)
808 dmfe_free_tx_pkt(dev, db);
811 if (db->dm910x_chk_mode & 0x2) {
812 db->dm910x_chk_mode = 0x4;
813 db->cr6_data |= 0x100;
814 update_cr6(db->cr6_data, db->ioaddr);
817 /* Restore CR7 to enable interrupt mask */
818 outl(db->cr7_data, ioaddr + DCR7);
820 spin_unlock_irqrestore(&db->lock, flags);
825 #ifdef CONFIG_NET_POLL_CONTROLLER
827 * Polling 'interrupt' - used by things like netconsole to send skbs
828 * without having to re-enable interrupts. It's not called while
829 * the interrupt routine is executing.
832 static void poll_dmfe (struct net_device *dev)
834 /* disable_irq here is not very nice, but with the lockless
835 interrupt handler we have no other choice. */
836 disable_irq(dev->irq);
837 dmfe_interrupt (dev->irq, dev);
838 enable_irq(dev->irq);
843 * Free TX resource after TX complete
846 static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
848 struct tx_desc *txptr;
849 unsigned long ioaddr = dev->base_addr;
852 txptr = db->tx_remove_ptr;
853 while(db->tx_packet_cnt) {
854 tdes0 = le32_to_cpu(txptr->tdes0);
855 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
856 if (tdes0 & 0x80000000)
859 /* A packet sent completed */
861 db->stats.tx_packets++;
863 /* Transmit statistic counter */
864 if ( tdes0 != 0x7fffffff ) {
865 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
866 db->stats.collisions += (tdes0 >> 3) & 0xf;
867 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
868 if (tdes0 & TDES0_ERR_MASK) {
869 db->stats.tx_errors++;
871 if (tdes0 & 0x0002) { /* UnderRun */
872 db->tx_fifo_underrun++;
873 if ( !(db->cr6_data & CR6_SFT) ) {
874 db->cr6_data = db->cr6_data | CR6_SFT;
875 update_cr6(db->cr6_data, db->ioaddr);
879 db->tx_excessive_collision++;
881 db->tx_late_collision++;
885 db->tx_loss_carrier++;
887 db->tx_jabber_timeout++;
891 txptr = txptr->next_tx_desc;
894 /* Update TX remove pointer to next */
895 db->tx_remove_ptr = txptr;
897 /* Send the Tx packet in queue */
898 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
899 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
900 db->tx_packet_cnt++; /* Ready to send */
902 outl(0x1, ioaddr + DCR1); /* Issue Tx polling */
903 dev->trans_start = jiffies; /* saved time stamp */
906 /* Resource available check */
907 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
908 netif_wake_queue(dev); /* Active upper layer, send again */
913 * Calculate the CRC valude of the Rx packet
914 * flag = 1 : return the reverse CRC (for the received packet CRC)
915 * 0 : return the normal CRC (for Hash Table index)
918 static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
920 u32 crc = crc32(~0, Data, Len);
921 if (flag) crc = ~crc;
927 * Receive the come packet and pass to upper layer
930 static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
932 struct rx_desc *rxptr;
933 struct sk_buff *skb, *newskb;
937 rxptr = db->rx_ready_ptr;
939 while(db->rx_avail_cnt) {
940 rdes0 = le32_to_cpu(rxptr->rdes0);
941 if (rdes0 & 0x80000000) /* packet owner check */
945 db->interval_rx_cnt++;
947 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
948 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
950 if ( (rdes0 & 0x300) != 0x300) {
951 /* A packet without First/Last flag */
953 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
954 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
956 /* A packet with First/Last flag */
957 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
959 /* error summary bit check */
960 if (rdes0 & 0x8000) {
961 /* This is a error packet */
962 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
963 db->stats.rx_errors++;
965 db->stats.rx_fifo_errors++;
967 db->stats.rx_crc_errors++;
969 db->stats.rx_length_errors++;
972 if ( !(rdes0 & 0x8000) ||
973 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
974 skb = rxptr->rx_skb_ptr;
976 /* Received Packet CRC check need or not */
977 if ( (db->dm910x_chk_mode & 1) &&
978 (cal_CRC(skb->data, rxlen, 1) !=
979 (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
980 /* Found a error received packet */
981 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
982 db->dm910x_chk_mode = 3;
984 /* Good packet, send to upper layer */
985 /* Shorst packet used new SKB */
986 if ((rxlen < RX_COPY_SIZE) &&
987 ((newskb = dev_alloc_skb(rxlen + 2))
991 /* size less than COPY_SIZE, allocate a rxlen SKB */
993 skb_reserve(skb, 2); /* 16byte align */
994 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen);
995 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1000 skb->protocol = eth_type_trans(skb, dev);
1002 dev->last_rx = jiffies;
1003 db->stats.rx_packets++;
1004 db->stats.rx_bytes += rxlen;
1007 /* Reuse SKB buffer when the packet is error */
1008 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1009 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1013 rxptr = rxptr->next_rx_desc;
1016 db->rx_ready_ptr = rxptr;
1021 * Get statistics from driver.
1024 static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
1026 struct dmfe_board_info *db = netdev_priv(dev);
1028 DMFE_DBUG(0, "dmfe_get_stats", 0);
1034 * Set DM910X multicast address
1037 static void dmfe_set_filter_mode(struct DEVICE * dev)
1039 struct dmfe_board_info *db = netdev_priv(dev);
1040 unsigned long flags;
1042 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1043 spin_lock_irqsave(&db->lock, flags);
1045 if (dev->flags & IFF_PROMISC) {
1046 DMFE_DBUG(0, "Enable PROM Mode", 0);
1047 db->cr6_data |= CR6_PM | CR6_PBF;
1048 update_cr6(db->cr6_data, db->ioaddr);
1049 spin_unlock_irqrestore(&db->lock, flags);
1053 if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
1054 DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
1055 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1056 db->cr6_data |= CR6_PAM;
1057 spin_unlock_irqrestore(&db->lock, flags);
1061 DMFE_DBUG(0, "Set multicast address", dev->mc_count);
1062 if (db->chip_id == PCI_DM9132_ID)
1063 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
1065 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
1066 spin_unlock_irqrestore(&db->lock, flags);
1069 static void netdev_get_drvinfo(struct net_device *dev,
1070 struct ethtool_drvinfo *info)
1072 struct dmfe_board_info *np = netdev_priv(dev);
1074 strcpy(info->driver, DRV_NAME);
1075 strcpy(info->version, DRV_VERSION);
1077 strcpy(info->bus_info, pci_name(np->pdev));
1079 sprintf(info->bus_info, "EISA 0x%lx %d",
1080 dev->base_addr, dev->irq);
1083 static const struct ethtool_ops netdev_ethtool_ops = {
1084 .get_drvinfo = netdev_get_drvinfo,
1088 * A periodic timer routine
1089 * Dynamic media sense, allocate Rx buffer...
1092 static void dmfe_timer(unsigned long data)
1095 unsigned char tmp_cr12;
1096 struct DEVICE *dev = (struct DEVICE *) data;
1097 struct dmfe_board_info *db = netdev_priv(dev);
1098 unsigned long flags;
1100 DMFE_DBUG(0, "dmfe_timer()", 0);
1101 spin_lock_irqsave(&db->lock, flags);
1103 /* Media mode process when Link OK before enter this route */
1104 if (db->first_in_callback == 0) {
1105 db->first_in_callback = 1;
1106 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1107 db->cr6_data &= ~0x40000;
1108 update_cr6(db->cr6_data, db->ioaddr);
1109 phy_write(db->ioaddr,
1110 db->phy_addr, 0, 0x1000, db->chip_id);
1111 db->cr6_data |= 0x40000;
1112 update_cr6(db->cr6_data, db->ioaddr);
1113 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1114 add_timer(&db->timer);
1115 spin_unlock_irqrestore(&db->lock, flags);
1121 /* Operating Mode Check */
1122 if ( (db->dm910x_chk_mode & 0x1) &&
1123 (db->stats.rx_packets > MAX_CHECK_PACKET) )
1124 db->dm910x_chk_mode = 0x4;
1126 /* Dynamic reset DM910X : system error or transmit time-out */
1127 tmp_cr8 = inl(db->ioaddr + DCR8);
1128 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1132 db->interval_rx_cnt = 0;
1134 /* TX polling kick monitor */
1135 if ( db->tx_packet_cnt &&
1136 time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
1137 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
1140 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1141 db->reset_TXtimeout++;
1143 printk(KERN_WARNING "%s: Tx timeout - resetting\n",
1148 if (db->wait_reset) {
1149 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1151 dmfe_dynamic_reset(dev);
1152 db->first_in_callback = 0;
1153 db->timer.expires = DMFE_TIMER_WUT;
1154 add_timer(&db->timer);
1155 spin_unlock_irqrestore(&db->lock, flags);
1159 /* Link status check, Dynamic media type change */
1160 if (db->chip_id == PCI_DM9132_ID)
1161 tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */
1163 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
1165 if ( ((db->chip_id == PCI_DM9102_ID) &&
1166 (db->chip_revision == 0x02000030)) ||
1167 ((db->chip_id == PCI_DM9132_ID) &&
1168 (db->chip_revision == 0x02000010)) ) {
1171 tmp_cr12 = 0x0; /* Link failed */
1173 tmp_cr12 = 0x3; /* Link OK */
1176 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
1178 DMFE_DBUG(0, "Link Failed", tmp_cr12);
1179 db->link_failed = 1;
1181 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1182 /* AUTO or force 1M Homerun/Longrun don't need */
1183 if ( !(db->media_mode & 0x38) )
1184 phy_write(db->ioaddr, db->phy_addr,
1185 0, 0x1000, db->chip_id);
1187 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1188 if (db->media_mode & DMFE_AUTO) {
1189 /* 10/100M link failed, used 1M Home-Net */
1190 db->cr6_data|=0x00040000; /* bit18=1, MII */
1191 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1192 update_cr6(db->cr6_data, db->ioaddr);
1195 if ((tmp_cr12 & 0x3) && db->link_failed) {
1196 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1197 db->link_failed = 0;
1199 /* Auto Sense Speed */
1200 if ( (db->media_mode & DMFE_AUTO) &&
1201 dmfe_sense_speed(db) )
1202 db->link_failed = 1;
1203 dmfe_process_mode(db);
1204 /* SHOW_MEDIA_TYPE(db->op_mode); */
1207 /* HPNA remote command check */
1208 if (db->HPNA_command & 0xf00) {
1210 if (!db->HPNA_timer)
1211 dmfe_HPNA_remote_cmd_chk(db);
1214 /* Timer active again */
1215 db->timer.expires = DMFE_TIMER_WUT;
1216 add_timer(&db->timer);
1217 spin_unlock_irqrestore(&db->lock, flags);
1222 * Dynamic reset the DM910X board
1224 * Free Tx/Rx allocated memory
1225 * Reset DM910X board
1226 * Re-initilize DM910X board
1229 static void dmfe_dynamic_reset(struct DEVICE *dev)
1231 struct dmfe_board_info *db = netdev_priv(dev);
1233 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1235 /* Sopt MAC controller */
1236 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1237 update_cr6(db->cr6_data, dev->base_addr);
1238 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1239 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1241 /* Disable upper layer interface */
1242 netif_stop_queue(dev);
1244 /* Free Rx Allocate buffer */
1245 dmfe_free_rxbuffer(db);
1247 /* system variable init */
1248 db->tx_packet_cnt = 0;
1249 db->tx_queue_cnt = 0;
1250 db->rx_avail_cnt = 0;
1251 db->link_failed = 1;
1254 /* Re-initilize DM910X board */
1255 dmfe_init_dm910x(dev);
1257 /* Restart upper layer interface */
1258 netif_wake_queue(dev);
1263 * free all allocated rx buffer
1266 static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1268 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1270 /* free allocated rx buffer */
1271 while (db->rx_avail_cnt) {
1272 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1273 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1280 * Reuse the SK buffer
1283 static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1285 struct rx_desc *rxptr = db->rx_insert_ptr;
1287 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1288 rxptr->rx_skb_ptr = skb;
1289 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1290 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1292 rxptr->rdes0 = cpu_to_le32(0x80000000);
1294 db->rx_insert_ptr = rxptr->next_rx_desc;
1296 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1301 * Initialize transmit/Receive descriptor
1302 * Using Chain structure, and allocate Tx/Rx buffer
1305 static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1307 struct tx_desc *tmp_tx;
1308 struct rx_desc *tmp_rx;
1309 unsigned char *tmp_buf;
1310 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1311 dma_addr_t tmp_buf_dma;
1314 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1316 /* tx descriptor start pointer */
1317 db->tx_insert_ptr = db->first_tx_desc;
1318 db->tx_remove_ptr = db->first_tx_desc;
1319 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1321 /* rx descriptor start pointer */
1322 db->first_rx_desc = (void *)db->first_tx_desc +
1323 sizeof(struct tx_desc) * TX_DESC_CNT;
1325 db->first_rx_desc_dma = db->first_tx_desc_dma +
1326 sizeof(struct tx_desc) * TX_DESC_CNT;
1327 db->rx_insert_ptr = db->first_rx_desc;
1328 db->rx_ready_ptr = db->first_rx_desc;
1329 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1331 /* Init Transmit chain */
1332 tmp_buf = db->buf_pool_start;
1333 tmp_buf_dma = db->buf_pool_dma_start;
1334 tmp_tx_dma = db->first_tx_desc_dma;
1335 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1336 tmp_tx->tx_buf_ptr = tmp_buf;
1337 tmp_tx->tdes0 = cpu_to_le32(0);
1338 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1339 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1340 tmp_tx_dma += sizeof(struct tx_desc);
1341 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1342 tmp_tx->next_tx_desc = tmp_tx + 1;
1343 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1344 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1346 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1347 tmp_tx->next_tx_desc = db->first_tx_desc;
1349 /* Init Receive descriptor chain */
1350 tmp_rx_dma=db->first_rx_desc_dma;
1351 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1352 tmp_rx->rdes0 = cpu_to_le32(0);
1353 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1354 tmp_rx_dma += sizeof(struct rx_desc);
1355 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1356 tmp_rx->next_rx_desc = tmp_rx + 1;
1358 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1359 tmp_rx->next_rx_desc = db->first_rx_desc;
1361 /* pre-allocate Rx buffer */
1362 allocate_rx_buffer(db);
1368 * Firstly stop DM910X , then written value and start
1371 static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1375 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1376 outl(cr6_tmp, ioaddr + DCR6);
1378 outl(cr6_data, ioaddr + DCR6);
1384 * Send a setup frame for DM9132
1385 * This setup frame initilize DM910X address filter mode
1388 static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1390 struct dev_mc_list *mcptr;
1392 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1394 u16 i, hash_table[4];
1396 DMFE_DBUG(0, "dm9132_id_table()", 0);
1399 addrptr = (u16 *) dev->dev_addr;
1400 outw(addrptr[0], ioaddr);
1402 outw(addrptr[1], ioaddr);
1404 outw(addrptr[2], ioaddr);
1407 /* Clear Hash Table */
1408 for (i = 0; i < 4; i++)
1409 hash_table[i] = 0x0;
1411 /* broadcast address */
1412 hash_table[3] = 0x8000;
1414 /* the multicast address in Hash Table : 64 bits */
1415 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1416 hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1417 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1420 /* Write the hash table to MAC MD table */
1421 for (i = 0; i < 4; i++, ioaddr += 4)
1422 outw(hash_table[i], ioaddr);
1427 * Send a setup frame for DM9102/DM9102A
1428 * This setup frame initilize DM910X address filter mode
1431 static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
1433 struct dmfe_board_info *db = netdev_priv(dev);
1434 struct dev_mc_list *mcptr;
1435 struct tx_desc *txptr;
1440 DMFE_DBUG(0, "send_filter_frame()", 0);
1442 txptr = db->tx_insert_ptr;
1443 suptr = (u32 *) txptr->tx_buf_ptr;
1446 addrptr = (u16 *) dev->dev_addr;
1447 *suptr++ = addrptr[0];
1448 *suptr++ = addrptr[1];
1449 *suptr++ = addrptr[2];
1451 /* broadcast address */
1456 /* fit the multicast address */
1457 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1458 addrptr = (u16 *) mcptr->dmi_addr;
1459 *suptr++ = addrptr[0];
1460 *suptr++ = addrptr[1];
1461 *suptr++ = addrptr[2];
1470 /* prepare the setup frame */
1471 db->tx_insert_ptr = txptr->next_tx_desc;
1472 txptr->tdes1 = cpu_to_le32(0x890000c0);
1474 /* Resource Check and Send the setup packet */
1475 if (!db->tx_packet_cnt) {
1476 /* Resource Empty */
1477 db->tx_packet_cnt++;
1478 txptr->tdes0 = cpu_to_le32(0x80000000);
1479 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1480 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1481 update_cr6(db->cr6_data, dev->base_addr);
1482 dev->trans_start = jiffies;
1484 db->tx_queue_cnt++; /* Put in TX queue */
1489 * Allocate rx buffer,
1490 * As possible as allocate maxiumn Rx buffer
1493 static void allocate_rx_buffer(struct dmfe_board_info *db)
1495 struct rx_desc *rxptr;
1496 struct sk_buff *skb;
1498 rxptr = db->rx_insert_ptr;
1500 while(db->rx_avail_cnt < RX_DESC_CNT) {
1501 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1503 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1504 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1505 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1507 rxptr->rdes0 = cpu_to_le32(0x80000000);
1508 rxptr = rxptr->next_rx_desc;
1512 db->rx_insert_ptr = rxptr;
1517 * Read one word data from the serial ROM
1520 static u16 read_srom_word(long ioaddr, int offset)
1524 long cr9_ioaddr = ioaddr + DCR9;
1526 outl(CR9_SROM_READ, cr9_ioaddr);
1527 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1529 /* Send the Read Command 110b */
1530 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1531 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1532 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1534 /* Send the offset */
1535 for (i = 5; i >= 0; i--) {
1536 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1537 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1540 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1542 for (i = 16; i > 0; i--) {
1543 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1545 srom_data = (srom_data << 1) |
1546 ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1547 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1551 outl(CR9_SROM_READ, cr9_ioaddr);
1557 * Auto sense the media mode
1560 static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1565 /* CR6 bit18=0, select 10/100M */
1566 update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1568 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1569 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1571 if ( (phy_mode & 0x24) == 0x24 ) {
1572 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
1573 phy_mode = phy_read(db->ioaddr,
1574 db->phy_addr, 7, db->chip_id) & 0xf000;
1575 else /* DM9102/DM9102A */
1576 phy_mode = phy_read(db->ioaddr,
1577 db->phy_addr, 17, db->chip_id) & 0xf000;
1578 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1580 case 0x1000: db->op_mode = DMFE_10MHF; break;
1581 case 0x2000: db->op_mode = DMFE_10MFD; break;
1582 case 0x4000: db->op_mode = DMFE_100MHF; break;
1583 case 0x8000: db->op_mode = DMFE_100MFD; break;
1584 default: db->op_mode = DMFE_10MHF;
1589 db->op_mode = DMFE_10MHF;
1590 DMFE_DBUG(0, "Link Failed :", phy_mode);
1599 * Set 10/100 phyxcer capability
1600 * AUTO mode : phyxcer register4 is NIC capability
1601 * Force mode: phyxcer register4 is the force media
1604 static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1608 /* Select 10/100M phyxcer */
1609 db->cr6_data &= ~0x40000;
1610 update_cr6(db->cr6_data, db->ioaddr);
1612 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1613 if (db->chip_id == PCI_DM9009_ID) {
1614 phy_reg = phy_read(db->ioaddr,
1615 db->phy_addr, 18, db->chip_id) & ~0x1000;
1617 phy_write(db->ioaddr,
1618 db->phy_addr, 18, phy_reg, db->chip_id);
1621 /* Phyxcer capability setting */
1622 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1624 if (db->media_mode & DMFE_AUTO) {
1626 phy_reg |= db->PHY_reg4;
1629 switch(db->media_mode) {
1630 case DMFE_10MHF: phy_reg |= 0x20; break;
1631 case DMFE_10MFD: phy_reg |= 0x40; break;
1632 case DMFE_100MHF: phy_reg |= 0x80; break;
1633 case DMFE_100MFD: phy_reg |= 0x100; break;
1635 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1638 /* Write new capability to Phyxcer Reg4 */
1639 if ( !(phy_reg & 0x01e0)) {
1640 phy_reg|=db->PHY_reg4;
1641 db->media_mode|=DMFE_AUTO;
1643 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1645 /* Restart Auto-Negotiation */
1646 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1647 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1648 if ( !db->chip_type )
1649 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1655 * AUTO mode : PHY controller in Auto-negotiation Mode
1656 * Force mode: PHY controller in force mode with HUB
1657 * N-way force capability with SWITCH
1660 static void dmfe_process_mode(struct dmfe_board_info *db)
1664 /* Full Duplex Mode Check */
1665 if (db->op_mode & 0x4)
1666 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1668 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1670 /* Transciver Selection */
1671 if (db->op_mode & 0x10) /* 1M HomePNA */
1672 db->cr6_data |= 0x40000;/* External MII select */
1674 db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1676 update_cr6(db->cr6_data, db->ioaddr);
1678 /* 10/100M phyxcer force mode need */
1679 if ( !(db->media_mode & 0x18)) {
1681 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1682 if ( !(phy_reg & 0x1) ) {
1683 /* parter without N-Way capability */
1685 switch(db->op_mode) {
1686 case DMFE_10MHF: phy_reg = 0x0; break;
1687 case DMFE_10MFD: phy_reg = 0x100; break;
1688 case DMFE_100MHF: phy_reg = 0x2000; break;
1689 case DMFE_100MFD: phy_reg = 0x2100; break;
1691 phy_write(db->ioaddr,
1692 db->phy_addr, 0, phy_reg, db->chip_id);
1693 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1695 phy_write(db->ioaddr,
1696 db->phy_addr, 0, phy_reg, db->chip_id);
1703 * Write a word to Phy register
1706 static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1707 u16 phy_data, u32 chip_id)
1710 unsigned long ioaddr;
1712 if (chip_id == PCI_DM9132_ID) {
1713 ioaddr = iobase + 0x80 + offset * 4;
1714 outw(phy_data, ioaddr);
1716 /* DM9102/DM9102A Chip */
1717 ioaddr = iobase + DCR9;
1719 /* Send 33 synchronization clock to Phy controller */
1720 for (i = 0; i < 35; i++)
1721 phy_write_1bit(ioaddr, PHY_DATA_1);
1723 /* Send start command(01) to Phy */
1724 phy_write_1bit(ioaddr, PHY_DATA_0);
1725 phy_write_1bit(ioaddr, PHY_DATA_1);
1727 /* Send write command(01) to Phy */
1728 phy_write_1bit(ioaddr, PHY_DATA_0);
1729 phy_write_1bit(ioaddr, PHY_DATA_1);
1731 /* Send Phy address */
1732 for (i = 0x10; i > 0; i = i >> 1)
1733 phy_write_1bit(ioaddr,
1734 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1736 /* Send register address */
1737 for (i = 0x10; i > 0; i = i >> 1)
1738 phy_write_1bit(ioaddr,
1739 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1741 /* written trasnition */
1742 phy_write_1bit(ioaddr, PHY_DATA_1);
1743 phy_write_1bit(ioaddr, PHY_DATA_0);
1745 /* Write a word data to PHY controller */
1746 for ( i = 0x8000; i > 0; i >>= 1)
1747 phy_write_1bit(ioaddr,
1748 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1754 * Read a word data from phy register
1757 static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1761 unsigned long ioaddr;
1763 if (chip_id == PCI_DM9132_ID) {
1765 ioaddr = iobase + 0x80 + offset * 4;
1766 phy_data = inw(ioaddr);
1768 /* DM9102/DM9102A Chip */
1769 ioaddr = iobase + DCR9;
1771 /* Send 33 synchronization clock to Phy controller */
1772 for (i = 0; i < 35; i++)
1773 phy_write_1bit(ioaddr, PHY_DATA_1);
1775 /* Send start command(01) to Phy */
1776 phy_write_1bit(ioaddr, PHY_DATA_0);
1777 phy_write_1bit(ioaddr, PHY_DATA_1);
1779 /* Send read command(10) to Phy */
1780 phy_write_1bit(ioaddr, PHY_DATA_1);
1781 phy_write_1bit(ioaddr, PHY_DATA_0);
1783 /* Send Phy address */
1784 for (i = 0x10; i > 0; i = i >> 1)
1785 phy_write_1bit(ioaddr,
1786 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1788 /* Send register address */
1789 for (i = 0x10; i > 0; i = i >> 1)
1790 phy_write_1bit(ioaddr,
1791 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1793 /* Skip transition state */
1794 phy_read_1bit(ioaddr);
1796 /* read 16bit data */
1797 for (phy_data = 0, i = 0; i < 16; i++) {
1799 phy_data |= phy_read_1bit(ioaddr);
1808 * Write one bit data to Phy Controller
1811 static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1813 outl(phy_data, ioaddr); /* MII Clock Low */
1815 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1817 outl(phy_data, ioaddr); /* MII Clock Low */
1823 * Read one bit phy data from PHY controller
1826 static u16 phy_read_1bit(unsigned long ioaddr)
1830 outl(0x50000, ioaddr);
1832 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1833 outl(0x40000, ioaddr);
1841 * Parser SROM and media mode
1844 static void dmfe_parse_srom(struct dmfe_board_info * db)
1846 char * srom = db->srom;
1847 int dmfe_mode, tmp_reg;
1849 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1852 db->cr15_data = CR15_DEFAULT;
1854 /* Check SROM Version */
1855 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1857 /* Get NIC support media mode */
1858 db->NIC_capability = le16_to_cpup((__le16 *)srom + 34/2);
1860 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1861 switch( db->NIC_capability & tmp_reg ) {
1862 case 0x1: db->PHY_reg4 |= 0x0020; break;
1863 case 0x2: db->PHY_reg4 |= 0x0040; break;
1864 case 0x4: db->PHY_reg4 |= 0x0080; break;
1865 case 0x8: db->PHY_reg4 |= 0x0100; break;
1869 /* Media Mode Force or not check */
1870 dmfe_mode = le32_to_cpup((__le32 *)srom + 34/4) &
1871 le32_to_cpup((__le32 *)srom + 36/4);
1873 case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1874 case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */
1875 case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1877 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1880 /* Special Function setting */
1882 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1883 db->cr15_data |= 0x40;
1886 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1887 db->cr15_data |= 0x400;
1889 /* TX pause packet */
1890 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1891 db->cr15_data |= 0x9800;
1894 /* Parse HPNA parameter */
1895 db->HPNA_command = 1;
1897 /* Accept remote command or not */
1898 if (HPNA_rx_cmd == 0)
1899 db->HPNA_command |= 0x8000;
1901 /* Issue remote command & operation mode */
1902 if (HPNA_tx_cmd == 1)
1903 switch(HPNA_mode) { /* Issue Remote Command */
1904 case 0: db->HPNA_command |= 0x0904; break;
1905 case 1: db->HPNA_command |= 0x0a00; break;
1906 case 2: db->HPNA_command |= 0x0506; break;
1907 case 3: db->HPNA_command |= 0x0602; break;
1910 switch(HPNA_mode) { /* Don't Issue */
1911 case 0: db->HPNA_command |= 0x0004; break;
1912 case 1: db->HPNA_command |= 0x0000; break;
1913 case 2: db->HPNA_command |= 0x0006; break;
1914 case 3: db->HPNA_command |= 0x0002; break;
1917 /* Check DM9801 or DM9802 present or not */
1918 db->HPNA_present = 0;
1919 update_cr6(db->cr6_data|0x40000, db->ioaddr);
1920 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1921 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1922 /* DM9801 or DM9802 present */
1924 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1925 /* DM9801 HomeRun */
1926 db->HPNA_present = 1;
1927 dmfe_program_DM9801(db, tmp_reg);
1929 /* DM9802 LongRun */
1930 db->HPNA_present = 2;
1931 dmfe_program_DM9802(db);
1939 * Init HomeRun DM9801
1942 static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
1946 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
1948 case 0xb900: /* DM9801 E3 */
1949 db->HPNA_command |= 0x1000;
1950 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
1951 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
1952 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1954 case 0xb901: /* DM9801 E4 */
1955 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1956 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
1957 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1958 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
1960 case 0xb902: /* DM9801 E5 */
1961 case 0xb903: /* DM9801 E6 */
1963 db->HPNA_command |= 0x1000;
1964 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1965 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
1966 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1967 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
1970 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1971 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
1972 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
1977 * Init HomeRun DM9802
1980 static void dmfe_program_DM9802(struct dmfe_board_info * db)
1984 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
1985 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1986 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1987 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
1988 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
1993 * Check remote HPNA power and speed status. If not correct,
1994 * issue command again.
1997 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2001 /* Got remote device status */
2002 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2004 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2005 case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2006 case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2007 case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2010 /* Check remote device status match our setting ot not */
2011 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2012 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2016 db->HPNA_timer=600; /* Match, every 10 minutes, check */
2021 static struct pci_device_id dmfe_pci_tbl[] = {
2022 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2023 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2024 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2025 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2028 MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2031 static struct pci_driver dmfe_driver = {
2033 .id_table = dmfe_pci_tbl,
2034 .probe = dmfe_init_one,
2035 .remove = __devexit_p(dmfe_remove_one),
2038 MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2039 MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2040 MODULE_LICENSE("GPL");
2041 MODULE_VERSION(DRV_VERSION);
2043 module_param(debug, int, 0);
2044 module_param(mode, byte, 0);
2045 module_param(cr6set, int, 0);
2046 module_param(chkmode, byte, 0);
2047 module_param(HPNA_mode, byte, 0);
2048 module_param(HPNA_rx_cmd, byte, 0);
2049 module_param(HPNA_tx_cmd, byte, 0);
2050 module_param(HPNA_NoiseFloor, byte, 0);
2051 module_param(SF_mode, byte, 0);
2052 MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2053 MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2054 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2056 MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2057 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2060 * when user used insmod to add module, system invoked init_module()
2061 * to initilize and register.
2064 static int __init dmfe_init_module(void)
2069 printed_version = 1;
2071 DMFE_DBUG(0, "init_module() ", debug);
2074 dmfe_debug = debug; /* set debug flag */
2076 dmfe_cr6_user_set = cr6set;
2084 dmfe_media_mode = mode;
2086 default:dmfe_media_mode = DMFE_AUTO;
2091 HPNA_mode = 0; /* Default: LP/HS */
2092 if (HPNA_rx_cmd > 1)
2093 HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */
2094 if (HPNA_tx_cmd > 1)
2095 HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */
2096 if (HPNA_NoiseFloor > 15)
2097 HPNA_NoiseFloor = 0;
2099 rc = pci_register_driver(&dmfe_driver);
2109 * when user used rmmod to delete module, system invoked clean_module()
2110 * to un-register all registered services.
2113 static void __exit dmfe_cleanup_module(void)
2115 DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2116 pci_unregister_driver(&dmfe_driver);
2119 module_init(dmfe_init_module);
2120 module_exit(dmfe_cleanup_module);