2 * drivers/net/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
10 * Maintainer: Kumar Gala
12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
13 * Copyright (c) 2007 MontaVista Software, Inc.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Gianfar: AKA Lambda Draconis, "Dragon"
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
35 * last descriptor of the ring.
37 * When a packet is received, the RXF bit in the
38 * IEVENT register is set, triggering an interrupt when the
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
42 * of frames or amount of time have passed). In NAPI, the
43 * interrupt handler will signal there is work to be done, and
44 * exit. This method will start at the last known empty
45 * descriptor, and process every subsequent descriptor until there
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
64 #include <linux/kernel.h>
65 #include <linux/string.h>
66 #include <linux/errno.h>
67 #include <linux/unistd.h>
68 #include <linux/slab.h>
69 #include <linux/interrupt.h>
70 #include <linux/init.h>
71 #include <linux/delay.h>
72 #include <linux/netdevice.h>
73 #include <linux/etherdevice.h>
74 #include <linux/skbuff.h>
75 #include <linux/if_vlan.h>
76 #include <linux/spinlock.h>
78 #include <linux/of_platform.h>
80 #include <linux/tcp.h>
81 #include <linux/udp.h>
86 #include <asm/uaccess.h>
87 #include <linux/module.h>
88 #include <linux/dma-mapping.h>
89 #include <linux/crc32.h>
90 #include <linux/mii.h>
91 #include <linux/phy.h>
92 #include <linux/phy_fixed.h>
96 #include "gianfar_mii.h"
98 #define TX_TIMEOUT (1*HZ)
99 #undef BRIEF_GFAR_ERRORS
100 #undef VERBOSE_GFAR_ERRORS
102 const char gfar_driver_name[] = "Gianfar Ethernet";
103 const char gfar_driver_version[] = "1.3";
105 static int gfar_enet_open(struct net_device *dev);
106 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
107 static void gfar_reset_task(struct work_struct *work);
108 static void gfar_timeout(struct net_device *dev);
109 static int gfar_close(struct net_device *dev);
110 struct sk_buff *gfar_new_skb(struct net_device *dev);
111 static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
112 struct sk_buff *skb);
113 static int gfar_set_mac_address(struct net_device *dev);
114 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
115 static irqreturn_t gfar_error(int irq, void *dev_id);
116 static irqreturn_t gfar_transmit(int irq, void *dev_id);
117 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
118 static void adjust_link(struct net_device *dev);
119 static void init_registers(struct net_device *dev);
120 static int init_phy(struct net_device *dev);
121 static int gfar_probe(struct of_device *ofdev,
122 const struct of_device_id *match);
123 static int gfar_remove(struct of_device *ofdev);
124 static void free_skb_resources(struct gfar_private *priv);
125 static void gfar_set_multi(struct net_device *dev);
126 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
127 static void gfar_configure_serdes(struct net_device *dev);
128 static int gfar_poll(struct napi_struct *napi, int budget);
129 #ifdef CONFIG_NET_POLL_CONTROLLER
130 static void gfar_netpoll(struct net_device *dev);
132 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
133 static int gfar_clean_tx_ring(struct net_device *dev);
134 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
135 static void gfar_vlan_rx_register(struct net_device *netdev,
136 struct vlan_group *grp);
137 void gfar_halt(struct net_device *dev);
138 static void gfar_halt_nodisable(struct net_device *dev);
139 void gfar_start(struct net_device *dev);
140 static void gfar_clear_exact_match(struct net_device *dev);
141 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
143 extern const struct ethtool_ops gfar_ethtool_ops;
145 MODULE_AUTHOR("Freescale Semiconductor, Inc");
146 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
147 MODULE_LICENSE("GPL");
149 /* Returns 1 if incoming frames use an FCB */
150 static inline int gfar_uses_fcb(struct gfar_private *priv)
152 return priv->vlgrp || priv->rx_csum_enable;
155 static int gfar_of_init(struct net_device *dev)
157 struct device_node *phy, *mdio;
158 const unsigned int *id;
161 const void *mac_addr;
165 struct gfar_private *priv = netdev_priv(dev);
166 struct device_node *np = priv->node;
167 char bus_name[MII_BUS_ID_SIZE];
169 if (!np || !of_device_is_available(np))
172 /* get a pointer to the register memory */
173 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
174 priv->regs = ioremap(addr, size);
176 if (priv->regs == NULL)
179 priv->interruptTransmit = irq_of_parse_and_map(np, 0);
181 model = of_get_property(np, "model", NULL);
183 /* If we aren't the FEC we have multiple interrupts */
184 if (model && strcasecmp(model, "FEC")) {
185 priv->interruptReceive = irq_of_parse_and_map(np, 1);
187 priv->interruptError = irq_of_parse_and_map(np, 2);
189 if (priv->interruptTransmit < 0 ||
190 priv->interruptReceive < 0 ||
191 priv->interruptError < 0) {
197 mac_addr = of_get_mac_address(np);
199 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
201 if (model && !strcasecmp(model, "TSEC"))
203 FSL_GIANFAR_DEV_HAS_GIGABIT |
204 FSL_GIANFAR_DEV_HAS_COALESCE |
205 FSL_GIANFAR_DEV_HAS_RMON |
206 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
207 if (model && !strcasecmp(model, "eTSEC"))
209 FSL_GIANFAR_DEV_HAS_GIGABIT |
210 FSL_GIANFAR_DEV_HAS_COALESCE |
211 FSL_GIANFAR_DEV_HAS_RMON |
212 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
213 FSL_GIANFAR_DEV_HAS_CSUM |
214 FSL_GIANFAR_DEV_HAS_VLAN |
215 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
216 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
218 ctype = of_get_property(np, "phy-connection-type", NULL);
220 /* We only care about rgmii-id. The rest are autodetected */
221 if (ctype && !strcmp(ctype, "rgmii-id"))
222 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
224 priv->interface = PHY_INTERFACE_MODE_MII;
226 if (of_get_property(np, "fsl,magic-packet", NULL))
227 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
229 ph = of_get_property(np, "phy-handle", NULL);
233 fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL);
239 snprintf(priv->phy_bus_id, BUS_ID_SIZE, PHY_ID_FMT, "0",
242 phy = of_find_node_by_phandle(*ph);
249 mdio = of_get_parent(phy);
251 id = of_get_property(phy, "reg", NULL);
256 gfar_mdio_bus_name(bus_name, mdio);
257 snprintf(priv->phy_bus_id, BUS_ID_SIZE, "%s:%02x",
261 /* Find the TBI PHY. If it's not there, we don't support SGMII */
262 ph = of_get_property(np, "tbi-handle", NULL);
264 struct device_node *tbi = of_find_node_by_phandle(*ph);
265 struct of_device *ofdev;
271 mdio = of_get_parent(tbi);
275 ofdev = of_find_device_by_node(mdio);
279 id = of_get_property(tbi, "reg", NULL);
285 bus = dev_get_drvdata(&ofdev->dev);
287 priv->tbiphy = bus->phy_map[*id];
297 /* Set up the ethernet device structure, private data,
298 * and anything else we need before we start */
299 static int gfar_probe(struct of_device *ofdev,
300 const struct of_device_id *match)
303 struct net_device *dev = NULL;
304 struct gfar_private *priv = NULL;
306 DECLARE_MAC_BUF(mac);
308 /* Create an ethernet device instance */
309 dev = alloc_etherdev(sizeof (*priv));
314 priv = netdev_priv(dev);
316 priv->node = ofdev->node;
318 err = gfar_of_init(dev);
323 spin_lock_init(&priv->txlock);
324 spin_lock_init(&priv->rxlock);
325 spin_lock_init(&priv->bflock);
326 INIT_WORK(&priv->reset_task, gfar_reset_task);
328 dev_set_drvdata(&ofdev->dev, priv);
330 /* Stop the DMA engine now, in case it was running before */
331 /* (The firmware could have used it, and left it running). */
334 /* Reset MAC layer */
335 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
337 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
338 gfar_write(&priv->regs->maccfg1, tempval);
340 /* Initialize MACCFG2. */
341 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
343 /* Initialize ECNTRL */
344 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
346 /* Set the dev->base_addr to the gfar reg region */
347 dev->base_addr = (unsigned long) (priv->regs);
349 SET_NETDEV_DEV(dev, &ofdev->dev);
351 /* Fill in the dev structure */
352 dev->open = gfar_enet_open;
353 dev->hard_start_xmit = gfar_start_xmit;
354 dev->tx_timeout = gfar_timeout;
355 dev->watchdog_timeo = TX_TIMEOUT;
356 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
357 #ifdef CONFIG_NET_POLL_CONTROLLER
358 dev->poll_controller = gfar_netpoll;
360 dev->stop = gfar_close;
361 dev->change_mtu = gfar_change_mtu;
363 dev->set_multicast_list = gfar_set_multi;
365 dev->ethtool_ops = &gfar_ethtool_ops;
367 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
368 priv->rx_csum_enable = 1;
369 dev->features |= NETIF_F_IP_CSUM;
371 priv->rx_csum_enable = 0;
375 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
376 dev->vlan_rx_register = gfar_vlan_rx_register;
378 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
381 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
382 priv->extended_hash = 1;
383 priv->hash_width = 9;
385 priv->hash_regs[0] = &priv->regs->igaddr0;
386 priv->hash_regs[1] = &priv->regs->igaddr1;
387 priv->hash_regs[2] = &priv->regs->igaddr2;
388 priv->hash_regs[3] = &priv->regs->igaddr3;
389 priv->hash_regs[4] = &priv->regs->igaddr4;
390 priv->hash_regs[5] = &priv->regs->igaddr5;
391 priv->hash_regs[6] = &priv->regs->igaddr6;
392 priv->hash_regs[7] = &priv->regs->igaddr7;
393 priv->hash_regs[8] = &priv->regs->gaddr0;
394 priv->hash_regs[9] = &priv->regs->gaddr1;
395 priv->hash_regs[10] = &priv->regs->gaddr2;
396 priv->hash_regs[11] = &priv->regs->gaddr3;
397 priv->hash_regs[12] = &priv->regs->gaddr4;
398 priv->hash_regs[13] = &priv->regs->gaddr5;
399 priv->hash_regs[14] = &priv->regs->gaddr6;
400 priv->hash_regs[15] = &priv->regs->gaddr7;
403 priv->extended_hash = 0;
404 priv->hash_width = 8;
406 priv->hash_regs[0] = &priv->regs->gaddr0;
407 priv->hash_regs[1] = &priv->regs->gaddr1;
408 priv->hash_regs[2] = &priv->regs->gaddr2;
409 priv->hash_regs[3] = &priv->regs->gaddr3;
410 priv->hash_regs[4] = &priv->regs->gaddr4;
411 priv->hash_regs[5] = &priv->regs->gaddr5;
412 priv->hash_regs[6] = &priv->regs->gaddr6;
413 priv->hash_regs[7] = &priv->regs->gaddr7;
416 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
417 priv->padding = DEFAULT_PADDING;
421 if (dev->features & NETIF_F_IP_CSUM)
422 dev->hard_header_len += GMAC_FCB_LEN;
424 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
425 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
426 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
428 priv->txcoalescing = DEFAULT_TX_COALESCE;
429 priv->txic = DEFAULT_TXIC;
430 priv->rxcoalescing = DEFAULT_RX_COALESCE;
431 priv->rxic = DEFAULT_RXIC;
433 /* Enable most messages by default */
434 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
436 /* Carrier starts down, phylib will bring it up */
437 netif_carrier_off(dev);
439 err = register_netdev(dev);
442 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
447 /* Create all the sysfs files */
448 gfar_init_sysfs(dev);
450 /* Print out the device info */
451 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
453 /* Even more device info helps when determining which kernel */
454 /* provided which set of benchmarks. */
455 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
456 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
457 dev->name, priv->rx_ring_size, priv->tx_ring_size);
468 static int gfar_remove(struct of_device *ofdev)
470 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
472 dev_set_drvdata(&ofdev->dev, NULL);
475 free_netdev(priv->dev);
481 static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
483 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
484 struct net_device *dev = priv->dev;
488 int magic_packet = priv->wol_en &&
489 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
491 netif_device_detach(dev);
493 if (netif_running(dev)) {
494 spin_lock_irqsave(&priv->txlock, flags);
495 spin_lock(&priv->rxlock);
497 gfar_halt_nodisable(dev);
499 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
500 tempval = gfar_read(&priv->regs->maccfg1);
502 tempval &= ~MACCFG1_TX_EN;
505 tempval &= ~MACCFG1_RX_EN;
507 gfar_write(&priv->regs->maccfg1, tempval);
509 spin_unlock(&priv->rxlock);
510 spin_unlock_irqrestore(&priv->txlock, flags);
512 napi_disable(&priv->napi);
515 /* Enable interrupt on Magic Packet */
516 gfar_write(&priv->regs->imask, IMASK_MAG);
518 /* Enable Magic Packet mode */
519 tempval = gfar_read(&priv->regs->maccfg2);
520 tempval |= MACCFG2_MPEN;
521 gfar_write(&priv->regs->maccfg2, tempval);
523 phy_stop(priv->phydev);
530 static int gfar_resume(struct of_device *ofdev)
532 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
533 struct net_device *dev = priv->dev;
536 int magic_packet = priv->wol_en &&
537 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
539 if (!netif_running(dev)) {
540 netif_device_attach(dev);
544 if (!magic_packet && priv->phydev)
545 phy_start(priv->phydev);
547 /* Disable Magic Packet mode, in case something
551 spin_lock_irqsave(&priv->txlock, flags);
552 spin_lock(&priv->rxlock);
554 tempval = gfar_read(&priv->regs->maccfg2);
555 tempval &= ~MACCFG2_MPEN;
556 gfar_write(&priv->regs->maccfg2, tempval);
560 spin_unlock(&priv->rxlock);
561 spin_unlock_irqrestore(&priv->txlock, flags);
563 netif_device_attach(dev);
565 napi_enable(&priv->napi);
570 #define gfar_suspend NULL
571 #define gfar_resume NULL
574 /* Reads the controller's registers to determine what interface
575 * connects it to the PHY.
577 static phy_interface_t gfar_get_interface(struct net_device *dev)
579 struct gfar_private *priv = netdev_priv(dev);
580 u32 ecntrl = gfar_read(&priv->regs->ecntrl);
582 if (ecntrl & ECNTRL_SGMII_MODE)
583 return PHY_INTERFACE_MODE_SGMII;
585 if (ecntrl & ECNTRL_TBI_MODE) {
586 if (ecntrl & ECNTRL_REDUCED_MODE)
587 return PHY_INTERFACE_MODE_RTBI;
589 return PHY_INTERFACE_MODE_TBI;
592 if (ecntrl & ECNTRL_REDUCED_MODE) {
593 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
594 return PHY_INTERFACE_MODE_RMII;
596 phy_interface_t interface = priv->interface;
599 * This isn't autodetected right now, so it must
600 * be set by the device tree or platform code.
602 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
603 return PHY_INTERFACE_MODE_RGMII_ID;
605 return PHY_INTERFACE_MODE_RGMII;
609 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
610 return PHY_INTERFACE_MODE_GMII;
612 return PHY_INTERFACE_MODE_MII;
616 /* Initializes driver's PHY state, and attaches to the PHY.
617 * Returns 0 on success.
619 static int init_phy(struct net_device *dev)
621 struct gfar_private *priv = netdev_priv(dev);
622 uint gigabit_support =
623 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
624 SUPPORTED_1000baseT_Full : 0;
625 struct phy_device *phydev;
626 phy_interface_t interface;
630 priv->oldduplex = -1;
632 interface = gfar_get_interface(dev);
634 phydev = phy_connect(dev, priv->phy_bus_id, &adjust_link, 0, interface);
636 if (interface == PHY_INTERFACE_MODE_SGMII)
637 gfar_configure_serdes(dev);
639 if (IS_ERR(phydev)) {
640 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
641 return PTR_ERR(phydev);
644 /* Remove any features not supported by the controller */
645 phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
646 phydev->advertising = phydev->supported;
648 priv->phydev = phydev;
654 * Initialize TBI PHY interface for communicating with the
655 * SERDES lynx PHY on the chip. We communicate with this PHY
656 * through the MDIO bus on each controller, treating it as a
657 * "normal" PHY at the address found in the TBIPA register. We assume
658 * that the TBIPA register is valid. Either the MDIO bus code will set
659 * it to a value that doesn't conflict with other PHYs on the bus, or the
660 * value doesn't matter, as there are no other PHYs on the bus.
662 static void gfar_configure_serdes(struct net_device *dev)
664 struct gfar_private *priv = netdev_priv(dev);
667 printk(KERN_WARNING "SGMII mode requires that the device "
668 "tree specify a tbi-handle\n");
673 * If the link is already up, we must already be ok, and don't need to
674 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
675 * everything for us? Resetting it takes the link down and requires
676 * several seconds for it to come back.
678 if (phy_read(priv->tbiphy, MII_BMSR) & BMSR_LSTATUS)
681 /* Single clk mode, mii mode off(for serdes communication) */
682 phy_write(priv->tbiphy, MII_TBICON, TBICON_CLK_SELECT);
684 phy_write(priv->tbiphy, MII_ADVERTISE,
685 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
686 ADVERTISE_1000XPSE_ASYM);
688 phy_write(priv->tbiphy, MII_BMCR, BMCR_ANENABLE |
689 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
692 static void init_registers(struct net_device *dev)
694 struct gfar_private *priv = netdev_priv(dev);
697 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
699 /* Initialize IMASK */
700 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
702 /* Init hash registers to zero */
703 gfar_write(&priv->regs->igaddr0, 0);
704 gfar_write(&priv->regs->igaddr1, 0);
705 gfar_write(&priv->regs->igaddr2, 0);
706 gfar_write(&priv->regs->igaddr3, 0);
707 gfar_write(&priv->regs->igaddr4, 0);
708 gfar_write(&priv->regs->igaddr5, 0);
709 gfar_write(&priv->regs->igaddr6, 0);
710 gfar_write(&priv->regs->igaddr7, 0);
712 gfar_write(&priv->regs->gaddr0, 0);
713 gfar_write(&priv->regs->gaddr1, 0);
714 gfar_write(&priv->regs->gaddr2, 0);
715 gfar_write(&priv->regs->gaddr3, 0);
716 gfar_write(&priv->regs->gaddr4, 0);
717 gfar_write(&priv->regs->gaddr5, 0);
718 gfar_write(&priv->regs->gaddr6, 0);
719 gfar_write(&priv->regs->gaddr7, 0);
721 /* Zero out the rmon mib registers if it has them */
722 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
723 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
725 /* Mask off the CAM interrupts */
726 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
727 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
730 /* Initialize the max receive buffer length */
731 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
733 /* Initialize the Minimum Frame Length Register */
734 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
738 /* Halt the receive and transmit queues */
739 static void gfar_halt_nodisable(struct net_device *dev)
741 struct gfar_private *priv = netdev_priv(dev);
742 struct gfar __iomem *regs = priv->regs;
745 /* Mask all interrupts */
746 gfar_write(®s->imask, IMASK_INIT_CLEAR);
748 /* Clear all interrupts */
749 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
751 /* Stop the DMA, and wait for it to stop */
752 tempval = gfar_read(&priv->regs->dmactrl);
753 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
754 != (DMACTRL_GRS | DMACTRL_GTS)) {
755 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
756 gfar_write(&priv->regs->dmactrl, tempval);
758 while (!(gfar_read(&priv->regs->ievent) &
759 (IEVENT_GRSC | IEVENT_GTSC)))
764 /* Halt the receive and transmit queues */
765 void gfar_halt(struct net_device *dev)
767 struct gfar_private *priv = netdev_priv(dev);
768 struct gfar __iomem *regs = priv->regs;
771 gfar_halt_nodisable(dev);
773 /* Disable Rx and Tx */
774 tempval = gfar_read(®s->maccfg1);
775 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
776 gfar_write(®s->maccfg1, tempval);
779 void stop_gfar(struct net_device *dev)
781 struct gfar_private *priv = netdev_priv(dev);
782 struct gfar __iomem *regs = priv->regs;
785 phy_stop(priv->phydev);
788 spin_lock_irqsave(&priv->txlock, flags);
789 spin_lock(&priv->rxlock);
793 spin_unlock(&priv->rxlock);
794 spin_unlock_irqrestore(&priv->txlock, flags);
797 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
798 free_irq(priv->interruptError, dev);
799 free_irq(priv->interruptTransmit, dev);
800 free_irq(priv->interruptReceive, dev);
802 free_irq(priv->interruptTransmit, dev);
805 free_skb_resources(priv);
807 dma_free_coherent(&dev->dev,
808 sizeof(struct txbd8)*priv->tx_ring_size
809 + sizeof(struct rxbd8)*priv->rx_ring_size,
811 gfar_read(®s->tbase0));
814 /* If there are any tx skbs or rx skbs still around, free them.
815 * Then free tx_skbuff and rx_skbuff */
816 static void free_skb_resources(struct gfar_private *priv)
822 /* Go through all the buffer descriptors and free their data buffers */
823 txbdp = priv->tx_bd_base;
825 for (i = 0; i < priv->tx_ring_size; i++) {
827 if (priv->tx_skbuff[i]) {
828 dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
831 dev_kfree_skb_any(priv->tx_skbuff[i]);
832 priv->tx_skbuff[i] = NULL;
838 kfree(priv->tx_skbuff);
840 rxbdp = priv->rx_bd_base;
842 /* rx_skbuff is not guaranteed to be allocated, so only
843 * free it and its contents if it is allocated */
844 if(priv->rx_skbuff != NULL) {
845 for (i = 0; i < priv->rx_ring_size; i++) {
846 if (priv->rx_skbuff[i]) {
847 dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr,
848 priv->rx_buffer_size,
851 dev_kfree_skb_any(priv->rx_skbuff[i]);
852 priv->rx_skbuff[i] = NULL;
862 kfree(priv->rx_skbuff);
866 void gfar_start(struct net_device *dev)
868 struct gfar_private *priv = netdev_priv(dev);
869 struct gfar __iomem *regs = priv->regs;
872 /* Enable Rx and Tx in MACCFG1 */
873 tempval = gfar_read(®s->maccfg1);
874 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
875 gfar_write(®s->maccfg1, tempval);
877 /* Initialize DMACTRL to have WWR and WOP */
878 tempval = gfar_read(&priv->regs->dmactrl);
879 tempval |= DMACTRL_INIT_SETTINGS;
880 gfar_write(&priv->regs->dmactrl, tempval);
882 /* Make sure we aren't stopped */
883 tempval = gfar_read(&priv->regs->dmactrl);
884 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
885 gfar_write(&priv->regs->dmactrl, tempval);
887 /* Clear THLT/RHLT, so that the DMA starts polling now */
888 gfar_write(®s->tstat, TSTAT_CLEAR_THALT);
889 gfar_write(®s->rstat, RSTAT_CLEAR_RHALT);
891 /* Unmask the interrupts we look for */
892 gfar_write(®s->imask, IMASK_DEFAULT);
894 dev->trans_start = jiffies;
897 /* Bring the controller up and running */
898 int startup_gfar(struct net_device *dev)
905 struct gfar_private *priv = netdev_priv(dev);
906 struct gfar __iomem *regs = priv->regs;
911 gfar_write(®s->imask, IMASK_INIT_CLEAR);
913 /* Allocate memory for the buffer descriptors */
914 vaddr = (unsigned long) dma_alloc_coherent(&dev->dev,
915 sizeof (struct txbd8) * priv->tx_ring_size +
916 sizeof (struct rxbd8) * priv->rx_ring_size,
920 if (netif_msg_ifup(priv))
921 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
926 priv->tx_bd_base = (struct txbd8 *) vaddr;
928 /* enet DMA only understands physical addresses */
929 gfar_write(®s->tbase0, addr);
931 /* Start the rx descriptor ring where the tx ring leaves off */
932 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
933 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
934 priv->rx_bd_base = (struct rxbd8 *) vaddr;
935 gfar_write(®s->rbase0, addr);
937 /* Setup the skbuff rings */
939 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
940 priv->tx_ring_size, GFP_KERNEL);
942 if (NULL == priv->tx_skbuff) {
943 if (netif_msg_ifup(priv))
944 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
950 for (i = 0; i < priv->tx_ring_size; i++)
951 priv->tx_skbuff[i] = NULL;
954 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
955 priv->rx_ring_size, GFP_KERNEL);
957 if (NULL == priv->rx_skbuff) {
958 if (netif_msg_ifup(priv))
959 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
965 for (i = 0; i < priv->rx_ring_size; i++)
966 priv->rx_skbuff[i] = NULL;
968 /* Initialize some variables in our dev structure */
969 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
970 priv->cur_rx = priv->rx_bd_base;
971 priv->skb_curtx = priv->skb_dirtytx = 0;
974 /* Initialize Transmit Descriptor Ring */
975 txbdp = priv->tx_bd_base;
976 for (i = 0; i < priv->tx_ring_size; i++) {
983 /* Set the last descriptor in the ring to indicate wrap */
985 txbdp->status |= TXBD_WRAP;
987 rxbdp = priv->rx_bd_base;
988 for (i = 0; i < priv->rx_ring_size; i++) {
991 skb = gfar_new_skb(dev);
994 printk(KERN_ERR "%s: Can't allocate RX buffers\n",
997 goto err_rxalloc_fail;
1000 priv->rx_skbuff[i] = skb;
1002 gfar_new_rxbdp(dev, rxbdp, skb);
1007 /* Set the last descriptor in the ring to wrap */
1009 rxbdp->status |= RXBD_WRAP;
1011 /* If the device has multiple interrupts, register for
1012 * them. Otherwise, only register for the one */
1013 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1014 /* Install our interrupt handlers for Error,
1015 * Transmit, and Receive */
1016 if (request_irq(priv->interruptError, gfar_error,
1017 0, "enet_error", dev) < 0) {
1018 if (netif_msg_intr(priv))
1019 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1020 dev->name, priv->interruptError);
1026 if (request_irq(priv->interruptTransmit, gfar_transmit,
1027 0, "enet_tx", dev) < 0) {
1028 if (netif_msg_intr(priv))
1029 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1030 dev->name, priv->interruptTransmit);
1037 if (request_irq(priv->interruptReceive, gfar_receive,
1038 0, "enet_rx", dev) < 0) {
1039 if (netif_msg_intr(priv))
1040 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
1041 dev->name, priv->interruptReceive);
1047 if (request_irq(priv->interruptTransmit, gfar_interrupt,
1048 0, "gfar_interrupt", dev) < 0) {
1049 if (netif_msg_intr(priv))
1050 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1051 dev->name, priv->interruptError);
1058 phy_start(priv->phydev);
1060 /* Configure the coalescing support */
1061 gfar_write(®s->txic, 0);
1062 if (priv->txcoalescing)
1063 gfar_write(®s->txic, priv->txic);
1065 gfar_write(®s->rxic, 0);
1066 if (priv->rxcoalescing)
1067 gfar_write(®s->rxic, priv->rxic);
1069 if (priv->rx_csum_enable)
1070 rctrl |= RCTRL_CHECKSUMMING;
1072 if (priv->extended_hash) {
1073 rctrl |= RCTRL_EXTHASH;
1075 gfar_clear_exact_match(dev);
1076 rctrl |= RCTRL_EMEN;
1079 if (priv->padding) {
1080 rctrl &= ~RCTRL_PAL_MASK;
1081 rctrl |= RCTRL_PADDING(priv->padding);
1084 /* Init rctrl based on our settings */
1085 gfar_write(&priv->regs->rctrl, rctrl);
1087 if (dev->features & NETIF_F_IP_CSUM)
1088 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
1090 /* Set the extraction length and index */
1091 attrs = ATTRELI_EL(priv->rx_stash_size) |
1092 ATTRELI_EI(priv->rx_stash_index);
1094 gfar_write(&priv->regs->attreli, attrs);
1096 /* Start with defaults, and add stashing or locking
1097 * depending on the approprate variables */
1098 attrs = ATTR_INIT_SETTINGS;
1100 if (priv->bd_stash_en)
1101 attrs |= ATTR_BDSTASH;
1103 if (priv->rx_stash_size != 0)
1104 attrs |= ATTR_BUFSTASH;
1106 gfar_write(&priv->regs->attr, attrs);
1108 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
1109 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
1110 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
1112 /* Start the controller */
1118 free_irq(priv->interruptTransmit, dev);
1120 free_irq(priv->interruptError, dev);
1124 free_skb_resources(priv);
1126 dma_free_coherent(&dev->dev,
1127 sizeof(struct txbd8)*priv->tx_ring_size
1128 + sizeof(struct rxbd8)*priv->rx_ring_size,
1130 gfar_read(®s->tbase0));
1135 /* Called when something needs to use the ethernet device */
1136 /* Returns 0 for success. */
1137 static int gfar_enet_open(struct net_device *dev)
1139 struct gfar_private *priv = netdev_priv(dev);
1142 napi_enable(&priv->napi);
1144 /* Initialize a bunch of registers */
1145 init_registers(dev);
1147 gfar_set_mac_address(dev);
1149 err = init_phy(dev);
1152 napi_disable(&priv->napi);
1156 err = startup_gfar(dev);
1158 napi_disable(&priv->napi);
1162 netif_start_queue(dev);
1167 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
1169 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
1171 memset(fcb, 0, GMAC_FCB_LEN);
1176 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1180 /* If we're here, it's a IP packet with a TCP or UDP
1181 * payload. We set it to checksum, using a pseudo-header
1184 flags = TXFCB_DEFAULT;
1186 /* Tell the controller what the protocol is */
1187 /* And provide the already calculated phcs */
1188 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1190 fcb->phcs = udp_hdr(skb)->check;
1192 fcb->phcs = tcp_hdr(skb)->check;
1194 /* l3os is the distance between the start of the
1195 * frame (skb->data) and the start of the IP hdr.
1196 * l4os is the distance between the start of the
1197 * l3 hdr and the l4 hdr */
1198 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
1199 fcb->l4os = skb_network_header_len(skb);
1204 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1206 fcb->flags |= TXFCB_VLN;
1207 fcb->vlctl = vlan_tx_tag_get(skb);
1210 /* This is called by the kernel when a frame is ready for transmission. */
1211 /* It is pointed to by the dev->hard_start_xmit function pointer */
1212 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1214 struct gfar_private *priv = netdev_priv(dev);
1215 struct txfcb *fcb = NULL;
1216 struct txbd8 *txbdp;
1218 unsigned long flags;
1220 /* Update transmit stats */
1221 dev->stats.tx_bytes += skb->len;
1224 spin_lock_irqsave(&priv->txlock, flags);
1226 /* Point at the first free tx descriptor */
1227 txbdp = priv->cur_tx;
1229 /* Clear all but the WRAP status flags */
1230 status = txbdp->status & TXBD_WRAP;
1232 /* Set up checksumming */
1233 if (CHECKSUM_PARTIAL == skb->ip_summed) {
1234 fcb = gfar_add_fcb(skb, txbdp);
1236 gfar_tx_checksum(skb, fcb);
1239 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
1240 if (unlikely(NULL == fcb)) {
1241 fcb = gfar_add_fcb(skb, txbdp);
1245 gfar_tx_vlan(skb, fcb);
1248 /* Set buffer length and pointer */
1249 txbdp->length = skb->len;
1250 txbdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1251 skb->len, DMA_TO_DEVICE);
1253 /* Save the skb pointer so we can free it later */
1254 priv->tx_skbuff[priv->skb_curtx] = skb;
1256 /* Update the current skb pointer (wrapping if this was the last) */
1258 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1260 /* Flag the BD as interrupt-causing */
1261 status |= TXBD_INTERRUPT;
1263 /* Flag the BD as ready to go, last in frame, and */
1264 /* in need of CRC */
1265 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
1267 dev->trans_start = jiffies;
1269 /* The powerpc-specific eieio() is used, as wmb() has too strong
1270 * semantics (it requires synchronization between cacheable and
1271 * uncacheable mappings, which eieio doesn't provide and which we
1272 * don't need), thus requiring a more expensive sync instruction. At
1273 * some point, the set of architecture-independent barrier functions
1274 * should be expanded to include weaker barriers.
1278 txbdp->status = status;
1280 /* If this was the last BD in the ring, the next one */
1281 /* is at the beginning of the ring */
1282 if (txbdp->status & TXBD_WRAP)
1283 txbdp = priv->tx_bd_base;
1287 /* If the next BD still needs to be cleaned up, then the bds
1288 are full. We need to tell the kernel to stop sending us stuff. */
1289 if (txbdp == priv->dirty_tx) {
1290 netif_stop_queue(dev);
1292 dev->stats.tx_fifo_errors++;
1295 /* Update the current txbd to the next one */
1296 priv->cur_tx = txbdp;
1298 /* Tell the DMA to go go go */
1299 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1302 spin_unlock_irqrestore(&priv->txlock, flags);
1307 /* Stops the kernel queue, and halts the controller */
1308 static int gfar_close(struct net_device *dev)
1310 struct gfar_private *priv = netdev_priv(dev);
1312 napi_disable(&priv->napi);
1314 cancel_work_sync(&priv->reset_task);
1317 /* Disconnect from the PHY */
1318 phy_disconnect(priv->phydev);
1319 priv->phydev = NULL;
1321 netif_stop_queue(dev);
1326 /* Changes the mac address if the controller is not running. */
1327 static int gfar_set_mac_address(struct net_device *dev)
1329 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1335 /* Enables and disables VLAN insertion/extraction */
1336 static void gfar_vlan_rx_register(struct net_device *dev,
1337 struct vlan_group *grp)
1339 struct gfar_private *priv = netdev_priv(dev);
1340 unsigned long flags;
1341 struct vlan_group *old_grp;
1344 spin_lock_irqsave(&priv->rxlock, flags);
1346 old_grp = priv->vlgrp;
1352 /* Enable VLAN tag insertion */
1353 tempval = gfar_read(&priv->regs->tctrl);
1354 tempval |= TCTRL_VLINS;
1356 gfar_write(&priv->regs->tctrl, tempval);
1358 /* Enable VLAN tag extraction */
1359 tempval = gfar_read(&priv->regs->rctrl);
1360 tempval |= RCTRL_VLEX;
1361 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
1362 gfar_write(&priv->regs->rctrl, tempval);
1364 /* Disable VLAN tag insertion */
1365 tempval = gfar_read(&priv->regs->tctrl);
1366 tempval &= ~TCTRL_VLINS;
1367 gfar_write(&priv->regs->tctrl, tempval);
1369 /* Disable VLAN tag extraction */
1370 tempval = gfar_read(&priv->regs->rctrl);
1371 tempval &= ~RCTRL_VLEX;
1372 /* If parse is no longer required, then disable parser */
1373 if (tempval & RCTRL_REQ_PARSER)
1374 tempval |= RCTRL_PRSDEP_INIT;
1376 tempval &= ~RCTRL_PRSDEP_INIT;
1377 gfar_write(&priv->regs->rctrl, tempval);
1380 gfar_change_mtu(dev, dev->mtu);
1382 spin_unlock_irqrestore(&priv->rxlock, flags);
1385 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1387 int tempsize, tempval;
1388 struct gfar_private *priv = netdev_priv(dev);
1389 int oldsize = priv->rx_buffer_size;
1390 int frame_size = new_mtu + ETH_HLEN;
1393 frame_size += VLAN_HLEN;
1395 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1396 if (netif_msg_drv(priv))
1397 printk(KERN_ERR "%s: Invalid MTU setting\n",
1402 if (gfar_uses_fcb(priv))
1403 frame_size += GMAC_FCB_LEN;
1405 frame_size += priv->padding;
1408 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1409 INCREMENTAL_BUFFER_SIZE;
1411 /* Only stop and start the controller if it isn't already
1412 * stopped, and we changed something */
1413 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1416 priv->rx_buffer_size = tempsize;
1420 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1421 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1423 /* If the mtu is larger than the max size for standard
1424 * ethernet frames (ie, a jumbo frame), then set maccfg2
1425 * to allow huge frames, and to check the length */
1426 tempval = gfar_read(&priv->regs->maccfg2);
1428 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1429 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1431 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1433 gfar_write(&priv->regs->maccfg2, tempval);
1435 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1441 /* gfar_reset_task gets scheduled when a packet has not been
1442 * transmitted after a set amount of time.
1443 * For now, assume that clearing out all the structures, and
1444 * starting over will fix the problem.
1446 static void gfar_reset_task(struct work_struct *work)
1448 struct gfar_private *priv = container_of(work, struct gfar_private,
1450 struct net_device *dev = priv->dev;
1452 if (dev->flags & IFF_UP) {
1457 netif_tx_schedule_all(dev);
1460 static void gfar_timeout(struct net_device *dev)
1462 struct gfar_private *priv = netdev_priv(dev);
1464 dev->stats.tx_errors++;
1465 schedule_work(&priv->reset_task);
1468 /* Interrupt Handler for Transmit complete */
1469 static int gfar_clean_tx_ring(struct net_device *dev)
1472 struct gfar_private *priv = netdev_priv(dev);
1475 bdp = priv->dirty_tx;
1476 while ((bdp->status & TXBD_READY) == 0) {
1477 /* If dirty_tx and cur_tx are the same, then either the */
1478 /* ring is empty or full now (it could only be full in the beginning, */
1479 /* obviously). If it is empty, we are done. */
1480 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1485 /* Deferred means some collisions occurred during transmit, */
1486 /* but we eventually sent the packet. */
1487 if (bdp->status & TXBD_DEF)
1488 dev->stats.collisions++;
1490 /* Unmap the DMA memory */
1491 dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
1492 bdp->length, DMA_TO_DEVICE);
1494 /* Free the sk buffer associated with this TxBD */
1495 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1497 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1499 (priv->skb_dirtytx +
1500 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1502 /* Clean BD length for empty detection */
1505 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1506 if (bdp->status & TXBD_WRAP)
1507 bdp = priv->tx_bd_base;
1511 /* Move dirty_tx to be the next bd */
1512 priv->dirty_tx = bdp;
1514 /* We freed a buffer, so now we can restart transmission */
1515 if (netif_queue_stopped(dev))
1516 netif_wake_queue(dev);
1517 } /* while ((bdp->status & TXBD_READY) == 0) */
1519 dev->stats.tx_packets += howmany;
1524 /* Interrupt Handler for Transmit complete */
1525 static irqreturn_t gfar_transmit(int irq, void *dev_id)
1527 struct net_device *dev = (struct net_device *) dev_id;
1528 struct gfar_private *priv = netdev_priv(dev);
1531 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1534 spin_lock(&priv->txlock);
1536 gfar_clean_tx_ring(dev);
1538 /* If we are coalescing the interrupts, reset the timer */
1539 /* Otherwise, clear it */
1540 if (likely(priv->txcoalescing)) {
1541 gfar_write(&priv->regs->txic, 0);
1542 gfar_write(&priv->regs->txic, priv->txic);
1545 spin_unlock(&priv->txlock);
1550 static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
1551 struct sk_buff *skb)
1553 struct gfar_private *priv = netdev_priv(dev);
1554 u32 * status_len = (u32 *)bdp;
1557 bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1558 priv->rx_buffer_size, DMA_FROM_DEVICE);
1560 flags = RXBD_EMPTY | RXBD_INTERRUPT;
1562 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
1567 *status_len = (u32)flags << 16;
1571 struct sk_buff * gfar_new_skb(struct net_device *dev)
1573 unsigned int alignamount;
1574 struct gfar_private *priv = netdev_priv(dev);
1575 struct sk_buff *skb = NULL;
1577 /* We have to allocate the skb, so keep trying till we succeed */
1578 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
1583 alignamount = RXBUF_ALIGNMENT -
1584 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
1586 /* We need the data buffer to be aligned properly. We will reserve
1587 * as many bytes as needed to align the data properly
1589 skb_reserve(skb, alignamount);
1594 static inline void count_errors(unsigned short status, struct net_device *dev)
1596 struct gfar_private *priv = netdev_priv(dev);
1597 struct net_device_stats *stats = &dev->stats;
1598 struct gfar_extra_stats *estats = &priv->extra_stats;
1600 /* If the packet was truncated, none of the other errors
1602 if (status & RXBD_TRUNCATED) {
1603 stats->rx_length_errors++;
1609 /* Count the errors, if there were any */
1610 if (status & (RXBD_LARGE | RXBD_SHORT)) {
1611 stats->rx_length_errors++;
1613 if (status & RXBD_LARGE)
1618 if (status & RXBD_NONOCTET) {
1619 stats->rx_frame_errors++;
1620 estats->rx_nonoctet++;
1622 if (status & RXBD_CRCERR) {
1623 estats->rx_crcerr++;
1624 stats->rx_crc_errors++;
1626 if (status & RXBD_OVERRUN) {
1627 estats->rx_overrun++;
1628 stats->rx_crc_errors++;
1632 irqreturn_t gfar_receive(int irq, void *dev_id)
1634 struct net_device *dev = (struct net_device *) dev_id;
1635 struct gfar_private *priv = netdev_priv(dev);
1639 /* Clear IEVENT, so interrupts aren't called again
1640 * because of the packets that have already arrived */
1641 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1643 if (netif_rx_schedule_prep(dev, &priv->napi)) {
1644 tempval = gfar_read(&priv->regs->imask);
1645 tempval &= IMASK_RTX_DISABLED;
1646 gfar_write(&priv->regs->imask, tempval);
1648 __netif_rx_schedule(dev, &priv->napi);
1650 if (netif_msg_rx_err(priv))
1651 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1652 dev->name, gfar_read(&priv->regs->ievent),
1653 gfar_read(&priv->regs->imask));
1659 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1661 /* If valid headers were found, and valid sums
1662 * were verified, then we tell the kernel that no
1663 * checksumming is necessary. Otherwise, it is */
1664 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1665 skb->ip_summed = CHECKSUM_UNNECESSARY;
1667 skb->ip_summed = CHECKSUM_NONE;
1671 static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1673 struct rxfcb *fcb = (struct rxfcb *)skb->data;
1675 /* Remove the FCB from the skb */
1676 skb_pull(skb, GMAC_FCB_LEN);
1681 /* gfar_process_frame() -- handle one incoming packet if skb
1683 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1686 struct gfar_private *priv = netdev_priv(dev);
1687 struct rxfcb *fcb = NULL;
1690 if (netif_msg_rx_err(priv))
1691 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1692 dev->stats.rx_dropped++;
1693 priv->extra_stats.rx_skbmissing++;
1697 /* Prep the skb for the packet */
1698 skb_put(skb, length);
1700 /* Grab the FCB if there is one */
1701 if (gfar_uses_fcb(priv))
1702 fcb = gfar_get_fcb(skb);
1704 /* Remove the padded bytes, if there are any */
1706 skb_pull(skb, priv->padding);
1708 if (priv->rx_csum_enable)
1709 gfar_rx_checksum(skb, fcb);
1711 /* Tell the skb what kind of packet this is */
1712 skb->protocol = eth_type_trans(skb, dev);
1714 /* Send the packet up the stack */
1715 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) {
1716 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp,
1719 ret = netif_receive_skb(skb);
1721 if (NET_RX_DROP == ret)
1722 priv->extra_stats.kernel_dropped++;
1728 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1729 * until the budget/quota has been reached. Returns the number
1732 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1735 struct sk_buff *skb;
1738 struct gfar_private *priv = netdev_priv(dev);
1740 /* Get the first full descriptor */
1743 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1744 struct sk_buff *newskb;
1747 /* Add another skb for the future */
1748 newskb = gfar_new_skb(dev);
1750 skb = priv->rx_skbuff[priv->skb_currx];
1752 dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
1753 priv->rx_buffer_size, DMA_FROM_DEVICE);
1755 /* We drop the frame if we failed to allocate a new buffer */
1756 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
1757 bdp->status & RXBD_ERR)) {
1758 count_errors(bdp->status, dev);
1760 if (unlikely(!newskb))
1764 dev_kfree_skb_any(skb);
1766 /* Increment the number of packets */
1767 dev->stats.rx_packets++;
1770 /* Remove the FCS from the packet length */
1771 pkt_len = bdp->length - 4;
1773 gfar_process_frame(dev, skb, pkt_len);
1775 dev->stats.rx_bytes += pkt_len;
1778 priv->rx_skbuff[priv->skb_currx] = newskb;
1780 /* Setup the new bdp */
1781 gfar_new_rxbdp(dev, bdp, newskb);
1783 /* Update to the next pointer */
1784 if (bdp->status & RXBD_WRAP)
1785 bdp = priv->rx_bd_base;
1789 /* update to point at the next skb */
1791 (priv->skb_currx + 1) &
1792 RX_RING_MOD_MASK(priv->rx_ring_size);
1795 /* Update the current rxbd pointer to be the next one */
1801 static int gfar_poll(struct napi_struct *napi, int budget)
1803 struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
1804 struct net_device *dev = priv->dev;
1806 unsigned long flags;
1808 /* If we fail to get the lock, don't bother with the TX BDs */
1809 if (spin_trylock_irqsave(&priv->txlock, flags)) {
1810 gfar_clean_tx_ring(dev);
1811 spin_unlock_irqrestore(&priv->txlock, flags);
1814 howmany = gfar_clean_rx_ring(dev, budget);
1816 if (howmany < budget) {
1817 netif_rx_complete(dev, napi);
1819 /* Clear the halt bit in RSTAT */
1820 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1822 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1824 /* If we are coalescing interrupts, update the timer */
1825 /* Otherwise, clear it */
1826 if (likely(priv->rxcoalescing)) {
1827 gfar_write(&priv->regs->rxic, 0);
1828 gfar_write(&priv->regs->rxic, priv->rxic);
1835 #ifdef CONFIG_NET_POLL_CONTROLLER
1837 * Polling 'interrupt' - used by things like netconsole to send skbs
1838 * without having to re-enable interrupts. It's not called while
1839 * the interrupt routine is executing.
1841 static void gfar_netpoll(struct net_device *dev)
1843 struct gfar_private *priv = netdev_priv(dev);
1845 /* If the device has multiple interrupts, run tx/rx */
1846 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1847 disable_irq(priv->interruptTransmit);
1848 disable_irq(priv->interruptReceive);
1849 disable_irq(priv->interruptError);
1850 gfar_interrupt(priv->interruptTransmit, dev);
1851 enable_irq(priv->interruptError);
1852 enable_irq(priv->interruptReceive);
1853 enable_irq(priv->interruptTransmit);
1855 disable_irq(priv->interruptTransmit);
1856 gfar_interrupt(priv->interruptTransmit, dev);
1857 enable_irq(priv->interruptTransmit);
1862 /* The interrupt handler for devices with one interrupt */
1863 static irqreturn_t gfar_interrupt(int irq, void *dev_id)
1865 struct net_device *dev = dev_id;
1866 struct gfar_private *priv = netdev_priv(dev);
1868 /* Save ievent for future reference */
1869 u32 events = gfar_read(&priv->regs->ievent);
1871 /* Check for reception */
1872 if (events & IEVENT_RX_MASK)
1873 gfar_receive(irq, dev_id);
1875 /* Check for transmit completion */
1876 if (events & IEVENT_TX_MASK)
1877 gfar_transmit(irq, dev_id);
1879 /* Check for errors */
1880 if (events & IEVENT_ERR_MASK)
1881 gfar_error(irq, dev_id);
1886 /* Called every time the controller might need to be made
1887 * aware of new link state. The PHY code conveys this
1888 * information through variables in the phydev structure, and this
1889 * function converts those variables into the appropriate
1890 * register values, and can bring down the device if needed.
1892 static void adjust_link(struct net_device *dev)
1894 struct gfar_private *priv = netdev_priv(dev);
1895 struct gfar __iomem *regs = priv->regs;
1896 unsigned long flags;
1897 struct phy_device *phydev = priv->phydev;
1900 spin_lock_irqsave(&priv->txlock, flags);
1902 u32 tempval = gfar_read(®s->maccfg2);
1903 u32 ecntrl = gfar_read(®s->ecntrl);
1905 /* Now we make sure that we can be in full duplex mode.
1906 * If not, we operate in half-duplex mode. */
1907 if (phydev->duplex != priv->oldduplex) {
1909 if (!(phydev->duplex))
1910 tempval &= ~(MACCFG2_FULL_DUPLEX);
1912 tempval |= MACCFG2_FULL_DUPLEX;
1914 priv->oldduplex = phydev->duplex;
1917 if (phydev->speed != priv->oldspeed) {
1919 switch (phydev->speed) {
1922 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1927 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1929 /* Reduced mode distinguishes
1930 * between 10 and 100 */
1931 if (phydev->speed == SPEED_100)
1932 ecntrl |= ECNTRL_R100;
1934 ecntrl &= ~(ECNTRL_R100);
1937 if (netif_msg_link(priv))
1939 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
1940 dev->name, phydev->speed);
1944 priv->oldspeed = phydev->speed;
1947 gfar_write(®s->maccfg2, tempval);
1948 gfar_write(®s->ecntrl, ecntrl);
1950 if (!priv->oldlink) {
1954 } else if (priv->oldlink) {
1958 priv->oldduplex = -1;
1961 if (new_state && netif_msg_link(priv))
1962 phy_print_status(phydev);
1964 spin_unlock_irqrestore(&priv->txlock, flags);
1967 /* Update the hash table based on the current list of multicast
1968 * addresses we subscribe to. Also, change the promiscuity of
1969 * the device based on the flags (this function is called
1970 * whenever dev->flags is changed */
1971 static void gfar_set_multi(struct net_device *dev)
1973 struct dev_mc_list *mc_ptr;
1974 struct gfar_private *priv = netdev_priv(dev);
1975 struct gfar __iomem *regs = priv->regs;
1978 if(dev->flags & IFF_PROMISC) {
1979 /* Set RCTRL to PROM */
1980 tempval = gfar_read(®s->rctrl);
1981 tempval |= RCTRL_PROM;
1982 gfar_write(®s->rctrl, tempval);
1984 /* Set RCTRL to not PROM */
1985 tempval = gfar_read(®s->rctrl);
1986 tempval &= ~(RCTRL_PROM);
1987 gfar_write(®s->rctrl, tempval);
1990 if(dev->flags & IFF_ALLMULTI) {
1991 /* Set the hash to rx all multicast frames */
1992 gfar_write(®s->igaddr0, 0xffffffff);
1993 gfar_write(®s->igaddr1, 0xffffffff);
1994 gfar_write(®s->igaddr2, 0xffffffff);
1995 gfar_write(®s->igaddr3, 0xffffffff);
1996 gfar_write(®s->igaddr4, 0xffffffff);
1997 gfar_write(®s->igaddr5, 0xffffffff);
1998 gfar_write(®s->igaddr6, 0xffffffff);
1999 gfar_write(®s->igaddr7, 0xffffffff);
2000 gfar_write(®s->gaddr0, 0xffffffff);
2001 gfar_write(®s->gaddr1, 0xffffffff);
2002 gfar_write(®s->gaddr2, 0xffffffff);
2003 gfar_write(®s->gaddr3, 0xffffffff);
2004 gfar_write(®s->gaddr4, 0xffffffff);
2005 gfar_write(®s->gaddr5, 0xffffffff);
2006 gfar_write(®s->gaddr6, 0xffffffff);
2007 gfar_write(®s->gaddr7, 0xffffffff);
2012 /* zero out the hash */
2013 gfar_write(®s->igaddr0, 0x0);
2014 gfar_write(®s->igaddr1, 0x0);
2015 gfar_write(®s->igaddr2, 0x0);
2016 gfar_write(®s->igaddr3, 0x0);
2017 gfar_write(®s->igaddr4, 0x0);
2018 gfar_write(®s->igaddr5, 0x0);
2019 gfar_write(®s->igaddr6, 0x0);
2020 gfar_write(®s->igaddr7, 0x0);
2021 gfar_write(®s->gaddr0, 0x0);
2022 gfar_write(®s->gaddr1, 0x0);
2023 gfar_write(®s->gaddr2, 0x0);
2024 gfar_write(®s->gaddr3, 0x0);
2025 gfar_write(®s->gaddr4, 0x0);
2026 gfar_write(®s->gaddr5, 0x0);
2027 gfar_write(®s->gaddr6, 0x0);
2028 gfar_write(®s->gaddr7, 0x0);
2030 /* If we have extended hash tables, we need to
2031 * clear the exact match registers to prepare for
2033 if (priv->extended_hash) {
2034 em_num = GFAR_EM_NUM + 1;
2035 gfar_clear_exact_match(dev);
2042 if(dev->mc_count == 0)
2045 /* Parse the list, and set the appropriate bits */
2046 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
2048 gfar_set_mac_for_addr(dev, idx,
2052 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
2060 /* Clears each of the exact match registers to zero, so they
2061 * don't interfere with normal reception */
2062 static void gfar_clear_exact_match(struct net_device *dev)
2065 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
2067 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
2068 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
2071 /* Set the appropriate hash bit for the given addr */
2072 /* The algorithm works like so:
2073 * 1) Take the Destination Address (ie the multicast address), and
2074 * do a CRC on it (little endian), and reverse the bits of the
2076 * 2) Use the 8 most significant bits as a hash into a 256-entry
2077 * table. The table is controlled through 8 32-bit registers:
2078 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
2079 * gaddr7. This means that the 3 most significant bits in the
2080 * hash index which gaddr register to use, and the 5 other bits
2081 * indicate which bit (assuming an IBM numbering scheme, which
2082 * for PowerPC (tm) is usually the case) in the register holds
2084 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2087 struct gfar_private *priv = netdev_priv(dev);
2088 u32 result = ether_crc(MAC_ADDR_LEN, addr);
2089 int width = priv->hash_width;
2090 u8 whichbit = (result >> (32 - width)) & 0x1f;
2091 u8 whichreg = result >> (32 - width + 5);
2092 u32 value = (1 << (31-whichbit));
2094 tempval = gfar_read(priv->hash_regs[whichreg]);
2096 gfar_write(priv->hash_regs[whichreg], tempval);
2102 /* There are multiple MAC Address register pairs on some controllers
2103 * This function sets the numth pair to a given address
2105 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2107 struct gfar_private *priv = netdev_priv(dev);
2109 char tmpbuf[MAC_ADDR_LEN];
2111 u32 __iomem *macptr = &priv->regs->macstnaddr1;
2115 /* Now copy it into the mac registers backwards, cuz */
2116 /* little endian is silly */
2117 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
2118 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
2120 gfar_write(macptr, *((u32 *) (tmpbuf)));
2122 tempval = *((u32 *) (tmpbuf + 4));
2124 gfar_write(macptr+1, tempval);
2127 /* GFAR error interrupt handler */
2128 static irqreturn_t gfar_error(int irq, void *dev_id)
2130 struct net_device *dev = dev_id;
2131 struct gfar_private *priv = netdev_priv(dev);
2133 /* Save ievent for future reference */
2134 u32 events = gfar_read(&priv->regs->ievent);
2137 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK);
2139 /* Magic Packet is not an error. */
2140 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2141 (events & IEVENT_MAG))
2142 events &= ~IEVENT_MAG;
2145 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2146 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2147 dev->name, events, gfar_read(&priv->regs->imask));
2149 /* Update the error counters */
2150 if (events & IEVENT_TXE) {
2151 dev->stats.tx_errors++;
2153 if (events & IEVENT_LC)
2154 dev->stats.tx_window_errors++;
2155 if (events & IEVENT_CRL)
2156 dev->stats.tx_aborted_errors++;
2157 if (events & IEVENT_XFUN) {
2158 if (netif_msg_tx_err(priv))
2159 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2160 "packet dropped.\n", dev->name);
2161 dev->stats.tx_dropped++;
2162 priv->extra_stats.tx_underrun++;
2164 /* Reactivate the Tx Queues */
2165 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
2167 if (netif_msg_tx_err(priv))
2168 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
2170 if (events & IEVENT_BSY) {
2171 dev->stats.rx_errors++;
2172 priv->extra_stats.rx_bsy++;
2174 gfar_receive(irq, dev_id);
2176 if (netif_msg_rx_err(priv))
2177 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2178 dev->name, gfar_read(&priv->regs->rstat));
2180 if (events & IEVENT_BABR) {
2181 dev->stats.rx_errors++;
2182 priv->extra_stats.rx_babr++;
2184 if (netif_msg_rx_err(priv))
2185 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
2187 if (events & IEVENT_EBERR) {
2188 priv->extra_stats.eberr++;
2189 if (netif_msg_rx_err(priv))
2190 printk(KERN_DEBUG "%s: bus error\n", dev->name);
2192 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
2193 printk(KERN_DEBUG "%s: control frame\n", dev->name);
2195 if (events & IEVENT_BABT) {
2196 priv->extra_stats.tx_babt++;
2197 if (netif_msg_tx_err(priv))
2198 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
2203 /* work with hotplug and coldplug */
2204 MODULE_ALIAS("platform:fsl-gianfar");
2206 static struct of_device_id gfar_match[] =
2210 .compatible = "gianfar",
2215 /* Structure for a device driver */
2216 static struct of_platform_driver gfar_driver = {
2217 .name = "fsl-gianfar",
2218 .match_table = gfar_match,
2220 .probe = gfar_probe,
2221 .remove = gfar_remove,
2222 .suspend = gfar_suspend,
2223 .resume = gfar_resume,
2226 static int __init gfar_init(void)
2228 int err = gfar_mdio_init();
2233 err = of_register_platform_driver(&gfar_driver);
2241 static void __exit gfar_exit(void)
2243 of_unregister_platform_driver(&gfar_driver);
2247 module_init(gfar_init);
2248 module_exit(gfar_exit);