1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
18 #include <linux/tcp.h>
20 #include <linux/crc32.h>
21 #include <linux/ethtool.h>
22 #include <linux/topology.h>
23 #include "net_driver.h"
33 #define EFX_MAX_MTU (9 * 1024)
35 /* RX slow fill workqueue. If memory allocation fails in the fast path,
36 * a work item is pushed onto this work queue to retry the allocation later,
37 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
38 * workqueue, there is nothing to be gained in making it per NIC
40 static struct workqueue_struct *refill_workqueue;
42 /**************************************************************************
46 *************************************************************************/
49 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
51 * This sets the default for new devices. It can be controlled later
54 static int lro = true;
55 module_param(lro, int, 0644);
56 MODULE_PARM_DESC(lro, "Large receive offload acceleration");
59 * Use separate channels for TX and RX events
61 * Set this to 1 to use separate channels for TX and RX. It allows us to
62 * apply a higher level of interrupt moderation to TX events.
64 * This is forced to 0 for MSI interrupt mode as the interrupt vector
67 static unsigned int separate_tx_and_rx_channels = true;
69 /* This is the weight assigned to each of the (per-channel) virtual
72 static int napi_weight = 64;
74 /* This is the time (in jiffies) between invocations of the hardware
75 * monitor, which checks for known hardware bugs and resets the
76 * hardware and driver as necessary.
78 unsigned int efx_monitor_interval = 1 * HZ;
80 /* This controls whether or not the hardware monitor will trigger a
81 * reset when it detects an error condition.
83 static unsigned int monitor_reset = true;
85 /* This controls whether or not the driver will initialise devices
86 * with invalid MAC addresses stored in the EEPROM or flash. If true,
87 * such devices will be initialised with a random locally-generated
88 * MAC address. This allows for loading the sfc_mtd driver to
89 * reprogram the flash, even if the flash contents (including the MAC
90 * address) have previously been erased.
92 static unsigned int allow_bad_hwaddr;
94 /* Initial interrupt moderation settings. They can be modified after
95 * module load with ethtool.
97 * The default for RX should strike a balance between increasing the
98 * round-trip latency and reducing overhead.
100 static unsigned int rx_irq_mod_usec = 60;
102 /* Initial interrupt moderation settings. They can be modified after
103 * module load with ethtool.
105 * This default is chosen to ensure that a 10G link does not go idle
106 * while a TX queue is stopped after it has become full. A queue is
107 * restarted when it drops below half full. The time this takes (assuming
108 * worst case 3 descriptors per packet and 1024 descriptors) is
109 * 512 / 3 * 1.2 = 205 usec.
111 static unsigned int tx_irq_mod_usec = 150;
113 /* This is the first interrupt mode to try out of:
118 static unsigned int interrupt_mode;
120 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
121 * i.e. the number of CPUs among which we may distribute simultaneous
122 * interrupt handling.
124 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
125 * The default (0) means to assign an interrupt to each package (level II cache)
127 static unsigned int rss_cpus;
128 module_param(rss_cpus, uint, 0444);
129 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
131 /**************************************************************************
133 * Utility functions and prototypes
135 *************************************************************************/
136 static void efx_remove_channel(struct efx_channel *channel);
137 static void efx_remove_port(struct efx_nic *efx);
138 static void efx_fini_napi(struct efx_nic *efx);
139 static void efx_fini_channels(struct efx_nic *efx);
141 #define EFX_ASSERT_RESET_SERIALISED(efx) \
143 if (efx->state == STATE_RUNNING) \
147 /**************************************************************************
149 * Event queue processing
151 *************************************************************************/
153 /* Process channel's event queue
155 * This function is responsible for processing the event queue of a
156 * single channel. The caller must guarantee that this function will
157 * never be concurrently called more than once on the same channel,
158 * though different channels may be being processed concurrently.
160 static int efx_process_channel(struct efx_channel *channel, int rx_quota)
162 struct efx_nic *efx = channel->efx;
165 if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
169 rx_packets = falcon_process_eventq(channel, rx_quota);
173 /* Deliver last RX packet. */
174 if (channel->rx_pkt) {
175 __efx_rx_packet(channel, channel->rx_pkt,
176 channel->rx_pkt_csummed);
177 channel->rx_pkt = NULL;
180 efx_flush_lro(channel);
181 efx_rx_strategy(channel);
183 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
188 /* Mark channel as finished processing
190 * Note that since we will not receive further interrupts for this
191 * channel before we finish processing and call the eventq_read_ack()
192 * method, there is no need to use the interrupt hold-off timers.
194 static inline void efx_channel_processed(struct efx_channel *channel)
196 /* The interrupt handler for this channel may set work_pending
197 * as soon as we acknowledge the events we've seen. Make sure
198 * it's cleared before then. */
199 channel->work_pending = false;
202 falcon_eventq_read_ack(channel);
207 * NAPI guarantees serialisation of polls of the same device, which
208 * provides the guarantee required by efx_process_channel().
210 static int efx_poll(struct napi_struct *napi, int budget)
212 struct efx_channel *channel =
213 container_of(napi, struct efx_channel, napi_str);
214 struct net_device *napi_dev = channel->napi_dev;
217 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
218 channel->channel, raw_smp_processor_id());
220 rx_packets = efx_process_channel(channel, budget);
222 if (rx_packets < budget) {
223 /* There is no race here; although napi_disable() will
224 * only wait for netif_rx_complete(), this isn't a problem
225 * since efx_channel_processed() will have no effect if
226 * interrupts have already been disabled.
228 netif_rx_complete(napi_dev, napi);
229 efx_channel_processed(channel);
235 /* Process the eventq of the specified channel immediately on this CPU
237 * Disable hardware generated interrupts, wait for any existing
238 * processing to finish, then directly poll (and ack ) the eventq.
239 * Finally reenable NAPI and interrupts.
241 * Since we are touching interrupts the caller should hold the suspend lock
243 void efx_process_channel_now(struct efx_channel *channel)
245 struct efx_nic *efx = channel->efx;
247 BUG_ON(!channel->used_flags);
248 BUG_ON(!channel->enabled);
250 /* Disable interrupts and wait for ISRs to complete */
251 falcon_disable_interrupts(efx);
253 synchronize_irq(efx->legacy_irq);
255 synchronize_irq(channel->irq);
257 /* Wait for any NAPI processing to complete */
258 napi_disable(&channel->napi_str);
260 /* Poll the channel */
261 efx_process_channel(channel, efx->type->evq_size);
263 /* Ack the eventq. This may cause an interrupt to be generated
264 * when they are reenabled */
265 efx_channel_processed(channel);
267 napi_enable(&channel->napi_str);
268 falcon_enable_interrupts(efx);
271 /* Create event queue
272 * Event queue memory allocations are done only once. If the channel
273 * is reset, the memory buffer will be reused; this guards against
274 * errors during channel reset and also simplifies interrupt handling.
276 static int efx_probe_eventq(struct efx_channel *channel)
278 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
280 return falcon_probe_eventq(channel);
283 /* Prepare channel's event queue */
284 static void efx_init_eventq(struct efx_channel *channel)
286 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
288 channel->eventq_read_ptr = 0;
290 falcon_init_eventq(channel);
293 static void efx_fini_eventq(struct efx_channel *channel)
295 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
297 falcon_fini_eventq(channel);
300 static void efx_remove_eventq(struct efx_channel *channel)
302 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
304 falcon_remove_eventq(channel);
307 /**************************************************************************
311 *************************************************************************/
313 static int efx_probe_channel(struct efx_channel *channel)
315 struct efx_tx_queue *tx_queue;
316 struct efx_rx_queue *rx_queue;
319 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
321 rc = efx_probe_eventq(channel);
325 efx_for_each_channel_tx_queue(tx_queue, channel) {
326 rc = efx_probe_tx_queue(tx_queue);
331 efx_for_each_channel_rx_queue(rx_queue, channel) {
332 rc = efx_probe_rx_queue(rx_queue);
337 channel->n_rx_frm_trunc = 0;
342 efx_for_each_channel_rx_queue(rx_queue, channel)
343 efx_remove_rx_queue(rx_queue);
345 efx_for_each_channel_tx_queue(tx_queue, channel)
346 efx_remove_tx_queue(tx_queue);
352 /* Channels are shutdown and reinitialised whilst the NIC is running
353 * to propagate configuration changes (mtu, checksum offload), or
354 * to clear hardware error conditions
356 static void efx_init_channels(struct efx_nic *efx)
358 struct efx_tx_queue *tx_queue;
359 struct efx_rx_queue *rx_queue;
360 struct efx_channel *channel;
362 /* Calculate the rx buffer allocation parameters required to
363 * support the current MTU, including padding for header
364 * alignment and overruns.
366 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
367 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
368 efx->type->rx_buffer_padding);
369 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
371 /* Initialise the channels */
372 efx_for_each_channel(channel, efx) {
373 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
375 efx_init_eventq(channel);
377 efx_for_each_channel_tx_queue(tx_queue, channel)
378 efx_init_tx_queue(tx_queue);
380 /* The rx buffer allocation strategy is MTU dependent */
381 efx_rx_strategy(channel);
383 efx_for_each_channel_rx_queue(rx_queue, channel)
384 efx_init_rx_queue(rx_queue);
386 WARN_ON(channel->rx_pkt != NULL);
387 efx_rx_strategy(channel);
391 /* This enables event queue processing and packet transmission.
393 * Note that this function is not allowed to fail, since that would
394 * introduce too much complexity into the suspend/resume path.
396 static void efx_start_channel(struct efx_channel *channel)
398 struct efx_rx_queue *rx_queue;
400 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
402 if (!(channel->efx->net_dev->flags & IFF_UP))
403 netif_napi_add(channel->napi_dev, &channel->napi_str,
404 efx_poll, napi_weight);
406 /* The interrupt handler for this channel may set work_pending
407 * as soon as we enable it. Make sure it's cleared before
408 * then. Similarly, make sure it sees the enabled flag set. */
409 channel->work_pending = false;
410 channel->enabled = true;
413 napi_enable(&channel->napi_str);
415 /* Load up RX descriptors */
416 efx_for_each_channel_rx_queue(rx_queue, channel)
417 efx_fast_push_rx_descriptors(rx_queue);
420 /* This disables event queue processing and packet transmission.
421 * This function does not guarantee that all queue processing
422 * (e.g. RX refill) is complete.
424 static void efx_stop_channel(struct efx_channel *channel)
426 struct efx_rx_queue *rx_queue;
428 if (!channel->enabled)
431 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
433 channel->enabled = false;
434 napi_disable(&channel->napi_str);
436 /* Ensure that any worker threads have exited or will be no-ops */
437 efx_for_each_channel_rx_queue(rx_queue, channel) {
438 spin_lock_bh(&rx_queue->add_lock);
439 spin_unlock_bh(&rx_queue->add_lock);
443 static void efx_fini_channels(struct efx_nic *efx)
445 struct efx_channel *channel;
446 struct efx_tx_queue *tx_queue;
447 struct efx_rx_queue *rx_queue;
449 EFX_ASSERT_RESET_SERIALISED(efx);
450 BUG_ON(efx->port_enabled);
452 efx_for_each_channel(channel, efx) {
453 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
455 efx_for_each_channel_rx_queue(rx_queue, channel)
456 efx_fini_rx_queue(rx_queue);
457 efx_for_each_channel_tx_queue(tx_queue, channel)
458 efx_fini_tx_queue(tx_queue);
461 /* Do the event queues last so that we can handle flush events
462 * for all DMA queues. */
463 efx_for_each_channel(channel, efx) {
464 EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
466 efx_fini_eventq(channel);
470 static void efx_remove_channel(struct efx_channel *channel)
472 struct efx_tx_queue *tx_queue;
473 struct efx_rx_queue *rx_queue;
475 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
477 efx_for_each_channel_rx_queue(rx_queue, channel)
478 efx_remove_rx_queue(rx_queue);
479 efx_for_each_channel_tx_queue(tx_queue, channel)
480 efx_remove_tx_queue(tx_queue);
481 efx_remove_eventq(channel);
483 channel->used_flags = 0;
486 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
488 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
491 /**************************************************************************
495 **************************************************************************/
497 /* This ensures that the kernel is kept informed (via
498 * netif_carrier_on/off) of the link status, and also maintains the
499 * link status's stop on the port's TX queue.
501 static void efx_link_status_changed(struct efx_nic *efx)
503 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
504 * that no events are triggered between unregister_netdev() and the
505 * driver unloading. A more general condition is that NETDEV_CHANGE
506 * can only be generated between NETDEV_UP and NETDEV_DOWN */
507 if (!netif_running(efx->net_dev))
510 if (efx->port_inhibited) {
511 netif_carrier_off(efx->net_dev);
515 if (efx->link_up != netif_carrier_ok(efx->net_dev)) {
516 efx->n_link_state_changes++;
519 netif_carrier_on(efx->net_dev);
521 netif_carrier_off(efx->net_dev);
524 /* Status message for kernel log */
526 struct mii_if_info *gmii = &efx->mii;
528 /* NONE here means direct XAUI from the controller, with no
529 * MDIO-attached device we can query. */
530 if (efx->phy_type != PHY_TYPE_NONE) {
531 adv = gmii_advertised(gmii);
532 lpa = gmii_lpa(gmii);
534 lpa = GM_LPA_10000 | LPA_DUPLEX;
537 EFX_INFO(efx, "link up at %dMbps %s-duplex "
538 "(adv %04x lpa %04x) (MTU %d)%s\n",
539 (efx->link_options & GM_LPA_10000 ? 10000 :
540 (efx->link_options & GM_LPA_1000 ? 1000 :
541 (efx->link_options & GM_LPA_100 ? 100 :
543 (efx->link_options & GM_LPA_DUPLEX ?
547 (efx->promiscuous ? " [PROMISC]" : ""));
549 EFX_INFO(efx, "link down\n");
554 /* This call reinitialises the MAC to pick up new PHY settings. The
555 * caller must hold the mac_lock */
556 void __efx_reconfigure_port(struct efx_nic *efx)
558 WARN_ON(!mutex_is_locked(&efx->mac_lock));
560 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
561 raw_smp_processor_id());
563 falcon_reconfigure_xmac(efx);
565 /* Inform kernel of loss/gain of carrier */
566 efx_link_status_changed(efx);
569 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
571 void efx_reconfigure_port(struct efx_nic *efx)
573 EFX_ASSERT_RESET_SERIALISED(efx);
575 mutex_lock(&efx->mac_lock);
576 __efx_reconfigure_port(efx);
577 mutex_unlock(&efx->mac_lock);
580 /* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
581 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
582 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
583 static void efx_reconfigure_work(struct work_struct *data)
585 struct efx_nic *efx = container_of(data, struct efx_nic,
588 mutex_lock(&efx->mac_lock);
589 if (efx->port_enabled)
590 __efx_reconfigure_port(efx);
591 mutex_unlock(&efx->mac_lock);
594 static int efx_probe_port(struct efx_nic *efx)
598 EFX_LOG(efx, "create port\n");
600 /* Connect up MAC/PHY operations table and read MAC address */
601 rc = falcon_probe_port(efx);
605 /* Sanity check MAC address */
606 if (is_valid_ether_addr(efx->mac_address)) {
607 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
609 DECLARE_MAC_BUF(mac);
611 EFX_ERR(efx, "invalid MAC address %s\n",
612 print_mac(mac, efx->mac_address));
613 if (!allow_bad_hwaddr) {
617 random_ether_addr(efx->net_dev->dev_addr);
618 EFX_INFO(efx, "using locally-generated MAC %s\n",
619 print_mac(mac, efx->net_dev->dev_addr));
625 efx_remove_port(efx);
629 static int efx_init_port(struct efx_nic *efx)
633 EFX_LOG(efx, "init port\n");
635 /* Initialise the MAC and PHY */
636 rc = falcon_init_xmac(efx);
640 efx->port_initialized = true;
641 efx->stats_enabled = true;
643 /* Reconfigure port to program MAC registers */
644 falcon_reconfigure_xmac(efx);
649 /* Allow efx_reconfigure_port() to be scheduled, and close the window
650 * between efx_stop_port and efx_flush_all whereby a previously scheduled
651 * efx_reconfigure_port() may have been cancelled */
652 static void efx_start_port(struct efx_nic *efx)
654 EFX_LOG(efx, "start port\n");
655 BUG_ON(efx->port_enabled);
657 mutex_lock(&efx->mac_lock);
658 efx->port_enabled = true;
659 __efx_reconfigure_port(efx);
660 mutex_unlock(&efx->mac_lock);
663 /* Prevent efx_reconfigure_work and efx_monitor() from executing, and
664 * efx_set_multicast_list() from scheduling efx_reconfigure_work.
665 * efx_reconfigure_work can still be scheduled via NAPI processing
666 * until efx_flush_all() is called */
667 static void efx_stop_port(struct efx_nic *efx)
669 EFX_LOG(efx, "stop port\n");
671 mutex_lock(&efx->mac_lock);
672 efx->port_enabled = false;
673 mutex_unlock(&efx->mac_lock);
675 /* Serialise against efx_set_multicast_list() */
676 if (efx_dev_registered(efx)) {
677 netif_addr_lock_bh(efx->net_dev);
678 netif_addr_unlock_bh(efx->net_dev);
682 static void efx_fini_port(struct efx_nic *efx)
684 EFX_LOG(efx, "shut down port\n");
686 if (!efx->port_initialized)
689 falcon_fini_xmac(efx);
690 efx->port_initialized = false;
692 efx->link_up = false;
693 efx_link_status_changed(efx);
696 static void efx_remove_port(struct efx_nic *efx)
698 EFX_LOG(efx, "destroying port\n");
700 falcon_remove_port(efx);
703 /**************************************************************************
707 **************************************************************************/
709 /* This configures the PCI device to enable I/O and DMA. */
710 static int efx_init_io(struct efx_nic *efx)
712 struct pci_dev *pci_dev = efx->pci_dev;
713 dma_addr_t dma_mask = efx->type->max_dma_mask;
716 EFX_LOG(efx, "initialising I/O\n");
718 rc = pci_enable_device(pci_dev);
720 EFX_ERR(efx, "failed to enable PCI device\n");
724 pci_set_master(pci_dev);
726 /* Set the PCI DMA mask. Try all possibilities from our
727 * genuine mask down to 32 bits, because some architectures
728 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
729 * masks event though they reject 46 bit masks.
731 while (dma_mask > 0x7fffffffUL) {
732 if (pci_dma_supported(pci_dev, dma_mask) &&
733 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
738 EFX_ERR(efx, "could not find a suitable DMA mask\n");
741 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
742 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
744 /* pci_set_consistent_dma_mask() is not *allowed* to
745 * fail with a mask that pci_set_dma_mask() accepted,
746 * but just in case...
748 EFX_ERR(efx, "failed to set consistent DMA mask\n");
752 efx->membase_phys = pci_resource_start(efx->pci_dev,
754 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
756 EFX_ERR(efx, "request for memory BAR failed\n");
760 efx->membase = ioremap_nocache(efx->membase_phys,
761 efx->type->mem_map_size);
763 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
765 (unsigned long long)efx->membase_phys,
766 efx->type->mem_map_size);
770 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
771 efx->type->mem_bar, (unsigned long long)efx->membase_phys,
772 efx->type->mem_map_size, efx->membase);
777 release_mem_region(efx->membase_phys, efx->type->mem_map_size);
779 efx->membase_phys = 0;
781 pci_disable_device(efx->pci_dev);
786 static void efx_fini_io(struct efx_nic *efx)
788 EFX_LOG(efx, "shutting down I/O\n");
791 iounmap(efx->membase);
795 if (efx->membase_phys) {
796 pci_release_region(efx->pci_dev, efx->type->mem_bar);
797 efx->membase_phys = 0;
800 pci_disable_device(efx->pci_dev);
803 /* Get number of RX queues wanted. Return number of online CPU
804 * packages in the expectation that an IRQ balancer will spread
805 * interrupts across them. */
806 static int efx_wanted_rx_queues(void)
812 cpus_clear(core_mask);
814 for_each_online_cpu(cpu) {
815 if (!cpu_isset(cpu, core_mask)) {
817 cpus_or(core_mask, core_mask,
818 topology_core_siblings(cpu));
825 /* Probe the number and type of interrupts we are able to obtain, and
826 * the resulting numbers of channels and RX queues.
828 static void efx_probe_interrupts(struct efx_nic *efx)
831 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
834 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
835 struct msix_entry xentries[EFX_MAX_CHANNELS];
838 /* We want one RX queue and interrupt per CPU package
839 * (or as specified by the rss_cpus module parameter).
840 * We will need one channel per interrupt.
842 wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
843 efx->n_rx_queues = min(wanted_ints, max_channels);
845 for (i = 0; i < efx->n_rx_queues; i++)
846 xentries[i].entry = i;
847 rc = pci_enable_msix(efx->pci_dev, xentries, efx->n_rx_queues);
849 EFX_BUG_ON_PARANOID(rc >= efx->n_rx_queues);
850 efx->n_rx_queues = rc;
851 rc = pci_enable_msix(efx->pci_dev, xentries,
856 for (i = 0; i < efx->n_rx_queues; i++)
857 efx->channel[i].irq = xentries[i].vector;
859 /* Fall back to single channel MSI */
860 efx->interrupt_mode = EFX_INT_MODE_MSI;
861 EFX_ERR(efx, "could not enable MSI-X\n");
865 /* Try single interrupt MSI */
866 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
867 efx->n_rx_queues = 1;
868 rc = pci_enable_msi(efx->pci_dev);
870 efx->channel[0].irq = efx->pci_dev->irq;
872 EFX_ERR(efx, "could not enable MSI\n");
873 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
877 /* Assume legacy interrupts */
878 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
879 efx->n_rx_queues = 1;
880 efx->legacy_irq = efx->pci_dev->irq;
884 static void efx_remove_interrupts(struct efx_nic *efx)
886 struct efx_channel *channel;
888 /* Remove MSI/MSI-X interrupts */
889 efx_for_each_channel(channel, efx)
891 pci_disable_msi(efx->pci_dev);
892 pci_disable_msix(efx->pci_dev);
894 /* Remove legacy interrupt */
898 static void efx_set_channels(struct efx_nic *efx)
900 struct efx_tx_queue *tx_queue;
901 struct efx_rx_queue *rx_queue;
903 efx_for_each_tx_queue(tx_queue, efx) {
904 if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels)
905 tx_queue->channel = &efx->channel[1];
907 tx_queue->channel = &efx->channel[0];
908 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
911 efx_for_each_rx_queue(rx_queue, efx) {
912 rx_queue->channel = &efx->channel[rx_queue->queue];
913 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
917 static int efx_probe_nic(struct efx_nic *efx)
921 EFX_LOG(efx, "creating NIC\n");
923 /* Carry out hardware-type specific initialisation */
924 rc = falcon_probe_nic(efx);
928 /* Determine the number of channels and RX queues by trying to hook
929 * in MSI-X interrupts. */
930 efx_probe_interrupts(efx);
932 efx_set_channels(efx);
934 /* Initialise the interrupt moderation settings */
935 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
940 static void efx_remove_nic(struct efx_nic *efx)
942 EFX_LOG(efx, "destroying NIC\n");
944 efx_remove_interrupts(efx);
945 falcon_remove_nic(efx);
948 /**************************************************************************
950 * NIC startup/shutdown
952 *************************************************************************/
954 static int efx_probe_all(struct efx_nic *efx)
956 struct efx_channel *channel;
960 rc = efx_probe_nic(efx);
962 EFX_ERR(efx, "failed to create NIC\n");
967 rc = efx_probe_port(efx);
969 EFX_ERR(efx, "failed to create port\n");
973 /* Create channels */
974 efx_for_each_channel(channel, efx) {
975 rc = efx_probe_channel(channel);
977 EFX_ERR(efx, "failed to create channel %d\n",
986 efx_for_each_channel(channel, efx)
987 efx_remove_channel(channel);
988 efx_remove_port(efx);
995 /* Called after previous invocation(s) of efx_stop_all, restarts the
996 * port, kernel transmit queue, NAPI processing and hardware interrupts,
997 * and ensures that the port is scheduled to be reconfigured.
998 * This function is safe to call multiple times when the NIC is in any
1000 static void efx_start_all(struct efx_nic *efx)
1002 struct efx_channel *channel;
1004 EFX_ASSERT_RESET_SERIALISED(efx);
1006 /* Check that it is appropriate to restart the interface. All
1007 * of these flags are safe to read under just the rtnl lock */
1008 if (efx->port_enabled)
1010 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1012 if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
1015 /* Mark the port as enabled so port reconfigurations can start, then
1016 * restart the transmit interface early so the watchdog timer stops */
1017 efx_start_port(efx);
1018 if (efx_dev_registered(efx))
1019 efx_wake_queue(efx);
1021 efx_for_each_channel(channel, efx)
1022 efx_start_channel(channel);
1024 falcon_enable_interrupts(efx);
1026 /* Start hardware monitor if we're in RUNNING */
1027 if (efx->state == STATE_RUNNING)
1028 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1029 efx_monitor_interval);
1032 /* Flush all delayed work. Should only be called when no more delayed work
1033 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1034 * since we're holding the rtnl_lock at this point. */
1035 static void efx_flush_all(struct efx_nic *efx)
1037 struct efx_rx_queue *rx_queue;
1039 /* Make sure the hardware monitor is stopped */
1040 cancel_delayed_work_sync(&efx->monitor_work);
1042 /* Ensure that all RX slow refills are complete. */
1043 efx_for_each_rx_queue(rx_queue, efx)
1044 cancel_delayed_work_sync(&rx_queue->work);
1046 /* Stop scheduled port reconfigurations */
1047 cancel_work_sync(&efx->reconfigure_work);
1051 /* Quiesce hardware and software without bringing the link down.
1052 * Safe to call multiple times, when the nic and interface is in any
1053 * state. The caller is guaranteed to subsequently be in a position
1054 * to modify any hardware and software state they see fit without
1056 static void efx_stop_all(struct efx_nic *efx)
1058 struct efx_channel *channel;
1060 EFX_ASSERT_RESET_SERIALISED(efx);
1062 /* port_enabled can be read safely under the rtnl lock */
1063 if (!efx->port_enabled)
1066 /* Disable interrupts and wait for ISR to complete */
1067 falcon_disable_interrupts(efx);
1068 if (efx->legacy_irq)
1069 synchronize_irq(efx->legacy_irq);
1070 efx_for_each_channel(channel, efx) {
1072 synchronize_irq(channel->irq);
1075 /* Stop all NAPI processing and synchronous rx refills */
1076 efx_for_each_channel(channel, efx)
1077 efx_stop_channel(channel);
1079 /* Stop all asynchronous port reconfigurations. Since all
1080 * event processing has already been stopped, there is no
1081 * window to loose phy events */
1084 /* Flush reconfigure_work, refill_workqueue, monitor_work */
1087 /* Isolate the MAC from the TX and RX engines, so that queue
1088 * flushes will complete in a timely fashion. */
1089 falcon_deconfigure_mac_wrapper(efx);
1090 falcon_drain_tx_fifo(efx);
1092 /* Stop the kernel transmit interface late, so the watchdog
1093 * timer isn't ticking over the flush */
1094 if (efx_dev_registered(efx)) {
1095 efx_stop_queue(efx);
1096 netif_tx_lock_bh(efx->net_dev);
1097 netif_tx_unlock_bh(efx->net_dev);
1101 static void efx_remove_all(struct efx_nic *efx)
1103 struct efx_channel *channel;
1105 efx_for_each_channel(channel, efx)
1106 efx_remove_channel(channel);
1107 efx_remove_port(efx);
1108 efx_remove_nic(efx);
1111 /* A convinience function to safely flush all the queues */
1112 void efx_flush_queues(struct efx_nic *efx)
1114 EFX_ASSERT_RESET_SERIALISED(efx);
1118 efx_fini_channels(efx);
1119 efx_init_channels(efx);
1124 /**************************************************************************
1126 * Interrupt moderation
1128 **************************************************************************/
1130 /* Set interrupt moderation parameters */
1131 void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
1133 struct efx_tx_queue *tx_queue;
1134 struct efx_rx_queue *rx_queue;
1136 EFX_ASSERT_RESET_SERIALISED(efx);
1138 efx_for_each_tx_queue(tx_queue, efx)
1139 tx_queue->channel->irq_moderation = tx_usecs;
1141 efx_for_each_rx_queue(rx_queue, efx)
1142 rx_queue->channel->irq_moderation = rx_usecs;
1145 /**************************************************************************
1149 **************************************************************************/
1151 /* Run periodically off the general workqueue. Serialised against
1152 * efx_reconfigure_port via the mac_lock */
1153 static void efx_monitor(struct work_struct *data)
1155 struct efx_nic *efx = container_of(data, struct efx_nic,
1159 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1160 raw_smp_processor_id());
1163 /* If the mac_lock is already held then it is likely a port
1164 * reconfiguration is already in place, which will likely do
1165 * most of the work of check_hw() anyway. */
1166 if (!mutex_trylock(&efx->mac_lock)) {
1167 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1168 efx_monitor_interval);
1172 if (efx->port_enabled)
1173 rc = falcon_check_xmac(efx);
1174 mutex_unlock(&efx->mac_lock);
1177 if (monitor_reset) {
1178 EFX_ERR(efx, "hardware monitor detected a fault: "
1179 "triggering reset\n");
1180 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1182 EFX_ERR(efx, "hardware monitor detected a fault, "
1183 "skipping reset\n");
1187 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1188 efx_monitor_interval);
1191 /**************************************************************************
1195 *************************************************************************/
1198 * Context: process, rtnl_lock() held.
1200 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1202 struct efx_nic *efx = netdev_priv(net_dev);
1204 EFX_ASSERT_RESET_SERIALISED(efx);
1206 return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
1209 /**************************************************************************
1213 **************************************************************************/
1215 static int efx_init_napi(struct efx_nic *efx)
1217 struct efx_channel *channel;
1220 efx_for_each_channel(channel, efx) {
1221 channel->napi_dev = efx->net_dev;
1222 rc = efx_lro_init(&channel->lro_mgr, efx);
1232 static void efx_fini_napi(struct efx_nic *efx)
1234 struct efx_channel *channel;
1236 efx_for_each_channel(channel, efx) {
1237 efx_lro_fini(&channel->lro_mgr);
1238 channel->napi_dev = NULL;
1242 /**************************************************************************
1244 * Kernel netpoll interface
1246 *************************************************************************/
1248 #ifdef CONFIG_NET_POLL_CONTROLLER
1250 /* Although in the common case interrupts will be disabled, this is not
1251 * guaranteed. However, all our work happens inside the NAPI callback,
1252 * so no locking is required.
1254 static void efx_netpoll(struct net_device *net_dev)
1256 struct efx_nic *efx = netdev_priv(net_dev);
1257 struct efx_channel *channel;
1259 efx_for_each_channel(channel, efx)
1260 efx_schedule_channel(channel);
1265 /**************************************************************************
1267 * Kernel net device interface
1269 *************************************************************************/
1271 /* Context: process, rtnl_lock() held. */
1272 static int efx_net_open(struct net_device *net_dev)
1274 struct efx_nic *efx = netdev_priv(net_dev);
1275 EFX_ASSERT_RESET_SERIALISED(efx);
1277 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1278 raw_smp_processor_id());
1280 if (efx->phy_mode & PHY_MODE_SPECIAL)
1287 /* Context: process, rtnl_lock() held.
1288 * Note that the kernel will ignore our return code; this method
1289 * should really be a void.
1291 static int efx_net_stop(struct net_device *net_dev)
1293 struct efx_nic *efx = netdev_priv(net_dev);
1295 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1296 raw_smp_processor_id());
1298 /* Stop the device and flush all the channels */
1300 efx_fini_channels(efx);
1301 efx_init_channels(efx);
1306 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
1307 static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1309 struct efx_nic *efx = netdev_priv(net_dev);
1310 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1311 struct net_device_stats *stats = &net_dev->stats;
1313 /* Update stats if possible, but do not wait if another thread
1314 * is updating them (or resetting the NIC); slightly stale
1315 * stats are acceptable.
1317 if (!spin_trylock(&efx->stats_lock))
1319 if (efx->stats_enabled) {
1320 falcon_update_stats_xmac(efx);
1321 falcon_update_nic_stats(efx);
1323 spin_unlock(&efx->stats_lock);
1325 stats->rx_packets = mac_stats->rx_packets;
1326 stats->tx_packets = mac_stats->tx_packets;
1327 stats->rx_bytes = mac_stats->rx_bytes;
1328 stats->tx_bytes = mac_stats->tx_bytes;
1329 stats->multicast = mac_stats->rx_multicast;
1330 stats->collisions = mac_stats->tx_collision;
1331 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1332 mac_stats->rx_length_error);
1333 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1334 stats->rx_crc_errors = mac_stats->rx_bad;
1335 stats->rx_frame_errors = mac_stats->rx_align_error;
1336 stats->rx_fifo_errors = mac_stats->rx_overflow;
1337 stats->rx_missed_errors = mac_stats->rx_missed;
1338 stats->tx_window_errors = mac_stats->tx_late_collision;
1340 stats->rx_errors = (stats->rx_length_errors +
1341 stats->rx_over_errors +
1342 stats->rx_crc_errors +
1343 stats->rx_frame_errors +
1344 stats->rx_fifo_errors +
1345 stats->rx_missed_errors +
1346 mac_stats->rx_symbol_error);
1347 stats->tx_errors = (stats->tx_window_errors +
1353 /* Context: netif_tx_lock held, BHs disabled. */
1354 static void efx_watchdog(struct net_device *net_dev)
1356 struct efx_nic *efx = netdev_priv(net_dev);
1358 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
1359 atomic_read(&efx->netif_stop_count), efx->port_enabled,
1360 monitor_reset ? "resetting channels" : "skipping reset");
1363 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1367 /* Context: process, rtnl_lock() held. */
1368 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1370 struct efx_nic *efx = netdev_priv(net_dev);
1373 EFX_ASSERT_RESET_SERIALISED(efx);
1375 if (new_mtu > EFX_MAX_MTU)
1380 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1382 efx_fini_channels(efx);
1383 net_dev->mtu = new_mtu;
1384 efx_init_channels(efx);
1390 static int efx_set_mac_address(struct net_device *net_dev, void *data)
1392 struct efx_nic *efx = netdev_priv(net_dev);
1393 struct sockaddr *addr = data;
1394 char *new_addr = addr->sa_data;
1396 EFX_ASSERT_RESET_SERIALISED(efx);
1398 if (!is_valid_ether_addr(new_addr)) {
1399 DECLARE_MAC_BUF(mac);
1400 EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n",
1401 print_mac(mac, new_addr));
1405 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1407 /* Reconfigure the MAC */
1408 efx_reconfigure_port(efx);
1413 /* Context: netif_tx_lock held, BHs disabled. */
1414 static void efx_set_multicast_list(struct net_device *net_dev)
1416 struct efx_nic *efx = netdev_priv(net_dev);
1417 struct dev_mc_list *mc_list = net_dev->mc_list;
1418 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1424 /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
1425 promiscuous = !!(net_dev->flags & IFF_PROMISC);
1426 if (efx->promiscuous != promiscuous) {
1427 efx->promiscuous = promiscuous;
1428 /* Close the window between efx_stop_port() and efx_flush_all()
1429 * by only queuing work when the port is enabled. */
1430 if (efx->port_enabled)
1431 queue_work(efx->workqueue, &efx->reconfigure_work);
1434 /* Build multicast hash table */
1435 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1436 memset(mc_hash, 0xff, sizeof(*mc_hash));
1438 memset(mc_hash, 0x00, sizeof(*mc_hash));
1439 for (i = 0; i < net_dev->mc_count; i++) {
1440 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1441 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1442 set_bit_le(bit, mc_hash->byte);
1443 mc_list = mc_list->next;
1447 /* Create and activate new global multicast hash table */
1448 falcon_set_multicast_hash(efx);
1451 static int efx_netdev_event(struct notifier_block *this,
1452 unsigned long event, void *ptr)
1454 struct net_device *net_dev = ptr;
1456 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
1457 struct efx_nic *efx = netdev_priv(net_dev);
1459 strcpy(efx->name, net_dev->name);
1465 static struct notifier_block efx_netdev_notifier = {
1466 .notifier_call = efx_netdev_event,
1469 static int efx_register_netdev(struct efx_nic *efx)
1471 struct net_device *net_dev = efx->net_dev;
1474 net_dev->watchdog_timeo = 5 * HZ;
1475 net_dev->irq = efx->pci_dev->irq;
1476 net_dev->open = efx_net_open;
1477 net_dev->stop = efx_net_stop;
1478 net_dev->get_stats = efx_net_stats;
1479 net_dev->tx_timeout = &efx_watchdog;
1480 net_dev->hard_start_xmit = efx_hard_start_xmit;
1481 net_dev->do_ioctl = efx_ioctl;
1482 net_dev->change_mtu = efx_change_mtu;
1483 net_dev->set_mac_address = efx_set_mac_address;
1484 net_dev->set_multicast_list = efx_set_multicast_list;
1485 #ifdef CONFIG_NET_POLL_CONTROLLER
1486 net_dev->poll_controller = efx_netpoll;
1488 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1489 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1491 /* Always start with carrier off; PHY events will detect the link */
1492 netif_carrier_off(efx->net_dev);
1494 /* Clear MAC statistics */
1495 falcon_update_stats_xmac(efx);
1496 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1498 rc = register_netdev(net_dev);
1500 EFX_ERR(efx, "could not register net dev\n");
1503 strcpy(efx->name, net_dev->name);
1508 static void efx_unregister_netdev(struct efx_nic *efx)
1510 struct efx_tx_queue *tx_queue;
1515 BUG_ON(netdev_priv(efx->net_dev) != efx);
1517 /* Free up any skbs still remaining. This has to happen before
1518 * we try to unregister the netdev as running their destructors
1519 * may be needed to get the device ref. count to 0. */
1520 efx_for_each_tx_queue(tx_queue, efx)
1521 efx_release_tx_buffers(tx_queue);
1523 if (efx_dev_registered(efx)) {
1524 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1525 unregister_netdev(efx->net_dev);
1529 /**************************************************************************
1531 * Device reset and suspend
1533 **************************************************************************/
1535 /* Tears down the entire software state and most of the hardware state
1537 void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1541 EFX_ASSERT_RESET_SERIALISED(efx);
1543 /* The net_dev->get_stats handler is quite slow, and will fail
1544 * if a fetch is pending over reset. Serialise against it. */
1545 spin_lock(&efx->stats_lock);
1546 efx->stats_enabled = false;
1547 spin_unlock(&efx->stats_lock);
1550 mutex_lock(&efx->mac_lock);
1552 rc = falcon_xmac_get_settings(efx, ecmd);
1554 EFX_ERR(efx, "could not back up PHY settings\n");
1556 efx_fini_channels(efx);
1559 /* This function will always ensure that the locks acquired in
1560 * efx_reset_down() are released. A failure return code indicates
1561 * that we were unable to reinitialise the hardware, and the
1562 * driver should be disabled. If ok is false, then the rx and tx
1563 * engines are not restarted, pending a RESET_DISABLE. */
1564 int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
1568 EFX_ASSERT_RESET_SERIALISED(efx);
1570 rc = falcon_init_nic(efx);
1572 EFX_ERR(efx, "failed to initialise NIC\n");
1577 efx_init_channels(efx);
1579 if (falcon_xmac_set_settings(efx, ecmd))
1580 EFX_ERR(efx, "could not restore PHY settings\n");
1583 mutex_unlock(&efx->mac_lock);
1587 efx->stats_enabled = true;
1592 /* Reset the NIC as transparently as possible. Do not reset the PHY
1593 * Note that the reset may fail, in which case the card will be left
1594 * in a most-probably-unusable state.
1596 * This function will sleep. You cannot reset from within an atomic
1597 * state; use efx_schedule_reset() instead.
1599 * Grabs the rtnl_lock.
1601 static int efx_reset(struct efx_nic *efx)
1603 struct ethtool_cmd ecmd;
1604 enum reset_type method = efx->reset_pending;
1607 /* Serialise with kernel interfaces */
1610 /* If we're not RUNNING then don't reset. Leave the reset_pending
1611 * flag set so that efx_pci_probe_main will be retried */
1612 if (efx->state != STATE_RUNNING) {
1613 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1617 EFX_INFO(efx, "resetting (%d)\n", method);
1619 efx_reset_down(efx, &ecmd);
1621 rc = falcon_reset_hw(efx, method);
1623 EFX_ERR(efx, "failed to reset hardware\n");
1627 /* Allow resets to be rescheduled. */
1628 efx->reset_pending = RESET_TYPE_NONE;
1630 /* Reinitialise bus-mastering, which may have been turned off before
1631 * the reset was scheduled. This is still appropriate, even in the
1632 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1633 * can respond to requests. */
1634 pci_set_master(efx->pci_dev);
1636 /* Leave device stopped if necessary */
1637 if (method == RESET_TYPE_DISABLE) {
1642 rc = efx_reset_up(efx, &ecmd, true);
1646 EFX_LOG(efx, "reset complete\n");
1652 efx_reset_up(efx, &ecmd, false);
1654 EFX_ERR(efx, "has been disabled\n");
1655 efx->state = STATE_DISABLED;
1658 efx_unregister_netdev(efx);
1663 /* The worker thread exists so that code that cannot sleep can
1664 * schedule a reset for later.
1666 static void efx_reset_work(struct work_struct *data)
1668 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1673 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1675 enum reset_type method;
1677 if (efx->reset_pending != RESET_TYPE_NONE) {
1678 EFX_INFO(efx, "quenching already scheduled reset\n");
1683 case RESET_TYPE_INVISIBLE:
1684 case RESET_TYPE_ALL:
1685 case RESET_TYPE_WORLD:
1686 case RESET_TYPE_DISABLE:
1689 case RESET_TYPE_RX_RECOVERY:
1690 case RESET_TYPE_RX_DESC_FETCH:
1691 case RESET_TYPE_TX_DESC_FETCH:
1692 case RESET_TYPE_TX_SKIP:
1693 method = RESET_TYPE_INVISIBLE;
1696 method = RESET_TYPE_ALL;
1701 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
1703 EFX_LOG(efx, "scheduling reset (%d)\n", method);
1705 efx->reset_pending = method;
1707 queue_work(efx->reset_workqueue, &efx->reset_work);
1710 /**************************************************************************
1712 * List of NICs we support
1714 **************************************************************************/
1716 /* PCI device ID table */
1717 static struct pci_device_id efx_pci_table[] __devinitdata = {
1718 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1719 .driver_data = (unsigned long) &falcon_a_nic_type},
1720 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1721 .driver_data = (unsigned long) &falcon_b_nic_type},
1722 {0} /* end of list */
1725 /**************************************************************************
1727 * Dummy PHY/MAC/Board operations
1729 * Can be used for some unimplemented operations
1730 * Needed so all function pointers are valid and do not have to be tested
1733 **************************************************************************/
1734 int efx_port_dummy_op_int(struct efx_nic *efx)
1738 void efx_port_dummy_op_void(struct efx_nic *efx) {}
1739 void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
1741 static struct efx_phy_operations efx_dummy_phy_operations = {
1742 .init = efx_port_dummy_op_int,
1743 .reconfigure = efx_port_dummy_op_void,
1744 .check_hw = efx_port_dummy_op_int,
1745 .fini = efx_port_dummy_op_void,
1746 .clear_interrupt = efx_port_dummy_op_void,
1747 .reset_xaui = efx_port_dummy_op_void,
1750 static struct efx_board efx_dummy_board_info = {
1751 .init = efx_port_dummy_op_int,
1752 .init_leds = efx_port_dummy_op_int,
1753 .set_fault_led = efx_port_dummy_op_blink,
1754 .blink = efx_port_dummy_op_blink,
1755 .fini = efx_port_dummy_op_void,
1758 /**************************************************************************
1762 **************************************************************************/
1764 /* This zeroes out and then fills in the invariants in a struct
1765 * efx_nic (including all sub-structures).
1767 static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1768 struct pci_dev *pci_dev, struct net_device *net_dev)
1770 struct efx_channel *channel;
1771 struct efx_tx_queue *tx_queue;
1772 struct efx_rx_queue *rx_queue;
1775 /* Initialise common structures */
1776 memset(efx, 0, sizeof(*efx));
1777 spin_lock_init(&efx->biu_lock);
1778 spin_lock_init(&efx->phy_lock);
1779 INIT_WORK(&efx->reset_work, efx_reset_work);
1780 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1781 efx->pci_dev = pci_dev;
1782 efx->state = STATE_INIT;
1783 efx->reset_pending = RESET_TYPE_NONE;
1784 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1785 efx->board_info = efx_dummy_board_info;
1787 efx->net_dev = net_dev;
1788 efx->rx_checksum_enabled = true;
1789 spin_lock_init(&efx->netif_stop_lock);
1790 spin_lock_init(&efx->stats_lock);
1791 mutex_init(&efx->mac_lock);
1792 efx->phy_op = &efx_dummy_phy_operations;
1793 efx->mii.dev = net_dev;
1794 INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
1795 atomic_set(&efx->netif_stop_count, 1);
1797 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1798 channel = &efx->channel[i];
1800 channel->channel = i;
1801 channel->work_pending = false;
1803 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
1804 tx_queue = &efx->tx_queue[i];
1805 tx_queue->efx = efx;
1806 tx_queue->queue = i;
1807 tx_queue->buffer = NULL;
1808 tx_queue->channel = &efx->channel[0]; /* for safety */
1809 tx_queue->tso_headers_free = NULL;
1811 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1812 rx_queue = &efx->rx_queue[i];
1813 rx_queue->efx = efx;
1814 rx_queue->queue = i;
1815 rx_queue->channel = &efx->channel[0]; /* for safety */
1816 rx_queue->buffer = NULL;
1817 spin_lock_init(&rx_queue->add_lock);
1818 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
1823 /* Sanity-check NIC type */
1824 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1825 (efx->type->txd_ring_mask + 1));
1826 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1827 (efx->type->rxd_ring_mask + 1));
1828 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1829 (efx->type->evq_size - 1));
1830 /* As close as we can get to guaranteeing that we don't overflow */
1831 EFX_BUG_ON_PARANOID(efx->type->evq_size <
1832 (efx->type->txd_ring_mask + 1 +
1833 efx->type->rxd_ring_mask + 1));
1834 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1836 /* Higher numbered interrupt modes are less capable! */
1837 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1840 efx->workqueue = create_singlethread_workqueue("sfc_work");
1841 if (!efx->workqueue) {
1846 efx->reset_workqueue = create_singlethread_workqueue("sfc_reset");
1847 if (!efx->reset_workqueue) {
1855 destroy_workqueue(efx->workqueue);
1856 efx->workqueue = NULL;
1862 static void efx_fini_struct(struct efx_nic *efx)
1864 if (efx->reset_workqueue) {
1865 destroy_workqueue(efx->reset_workqueue);
1866 efx->reset_workqueue = NULL;
1868 if (efx->workqueue) {
1869 destroy_workqueue(efx->workqueue);
1870 efx->workqueue = NULL;
1874 /**************************************************************************
1878 **************************************************************************/
1880 /* Main body of final NIC shutdown code
1881 * This is called only at module unload (or hotplug removal).
1883 static void efx_pci_remove_main(struct efx_nic *efx)
1885 EFX_ASSERT_RESET_SERIALISED(efx);
1887 /* Skip everything if we never obtained a valid membase */
1891 efx_fini_channels(efx);
1894 /* Shutdown the board, then the NIC and board state */
1895 efx->board_info.fini(efx);
1896 falcon_fini_interrupt(efx);
1899 efx_remove_all(efx);
1902 /* Final NIC shutdown
1903 * This is called only at module unload (or hotplug removal).
1905 static void efx_pci_remove(struct pci_dev *pci_dev)
1907 struct efx_nic *efx;
1909 efx = pci_get_drvdata(pci_dev);
1913 /* Mark the NIC as fini, then stop the interface */
1915 efx->state = STATE_FINI;
1916 dev_close(efx->net_dev);
1918 /* Allow any queued efx_resets() to complete */
1921 if (efx->membase == NULL)
1924 efx_unregister_netdev(efx);
1926 /* Wait for any scheduled resets to complete. No more will be
1927 * scheduled from this point because efx_stop_all() has been
1928 * called, we are no longer registered with driverlink, and
1929 * the net_device's have been removed. */
1930 flush_workqueue(efx->reset_workqueue);
1932 efx_pci_remove_main(efx);
1936 EFX_LOG(efx, "shutdown successful\n");
1938 pci_set_drvdata(pci_dev, NULL);
1939 efx_fini_struct(efx);
1940 free_netdev(efx->net_dev);
1943 /* Main body of NIC initialisation
1944 * This is called at module load (or hotplug insertion, theoretically).
1946 static int efx_pci_probe_main(struct efx_nic *efx)
1950 /* Do start-of-day initialisation */
1951 rc = efx_probe_all(efx);
1955 rc = efx_init_napi(efx);
1959 /* Initialise the board */
1960 rc = efx->board_info.init(efx);
1962 EFX_ERR(efx, "failed to initialise board\n");
1966 rc = falcon_init_nic(efx);
1968 EFX_ERR(efx, "failed to initialise NIC\n");
1972 rc = efx_init_port(efx);
1974 EFX_ERR(efx, "failed to initialise port\n");
1978 efx_init_channels(efx);
1980 rc = falcon_init_interrupt(efx);
1987 efx_fini_channels(efx);
1994 efx_remove_all(efx);
1999 /* NIC initialisation
2001 * This is called at module load (or hotplug insertion,
2002 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2003 * sets up and registers the network devices with the kernel and hooks
2004 * the interrupt service routine. It does not prepare the device for
2005 * transmission; this is left to the first time one of the network
2006 * interfaces is brought up (i.e. efx_net_open).
2008 static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2009 const struct pci_device_id *entry)
2011 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
2012 struct net_device *net_dev;
2013 struct efx_nic *efx;
2016 /* Allocate and initialise a struct net_device and struct efx_nic */
2017 net_dev = alloc_etherdev(sizeof(*efx));
2020 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2021 NETIF_F_HIGHDMA | NETIF_F_TSO);
2023 net_dev->features |= NETIF_F_LRO;
2024 /* Mask for features that also apply to VLAN devices */
2025 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2026 NETIF_F_HIGHDMA | NETIF_F_TSO);
2027 efx = netdev_priv(net_dev);
2028 pci_set_drvdata(pci_dev, efx);
2029 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2033 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2035 /* Set up basic I/O (BAR mappings etc) */
2036 rc = efx_init_io(efx);
2040 /* No serialisation is required with the reset path because
2041 * we're in STATE_INIT. */
2042 for (i = 0; i < 5; i++) {
2043 rc = efx_pci_probe_main(efx);
2047 /* Serialise against efx_reset(). No more resets will be
2048 * scheduled since efx_stop_all() has been called, and we
2049 * have not and never have been registered with either
2050 * the rtnetlink or driverlink layers. */
2051 flush_workqueue(efx->reset_workqueue);
2053 /* Retry if a recoverably reset event has been scheduled */
2054 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2055 (efx->reset_pending != RESET_TYPE_ALL))
2058 efx->reset_pending = RESET_TYPE_NONE;
2062 EFX_ERR(efx, "Could not reset NIC\n");
2066 /* Switch to the running state before we expose the device to
2067 * the OS. This is to ensure that the initial gathering of
2068 * MAC stats succeeds. */
2070 efx->state = STATE_RUNNING;
2073 rc = efx_register_netdev(efx);
2077 EFX_LOG(efx, "initialisation successful\n");
2082 efx_pci_remove_main(efx);
2087 efx_fini_struct(efx);
2089 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2090 free_netdev(net_dev);
2094 static struct pci_driver efx_pci_driver = {
2095 .name = EFX_DRIVER_NAME,
2096 .id_table = efx_pci_table,
2097 .probe = efx_pci_probe,
2098 .remove = efx_pci_remove,
2101 /**************************************************************************
2103 * Kernel module interface
2105 *************************************************************************/
2107 module_param(interrupt_mode, uint, 0444);
2108 MODULE_PARM_DESC(interrupt_mode,
2109 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2111 static int __init efx_init_module(void)
2115 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2117 rc = register_netdevice_notifier(&efx_netdev_notifier);
2121 refill_workqueue = create_workqueue("sfc_refill");
2122 if (!refill_workqueue) {
2127 rc = pci_register_driver(&efx_pci_driver);
2134 destroy_workqueue(refill_workqueue);
2136 unregister_netdevice_notifier(&efx_netdev_notifier);
2141 static void __exit efx_exit_module(void)
2143 printk(KERN_INFO "Solarflare NET driver unloading\n");
2145 pci_unregister_driver(&efx_pci_driver);
2146 destroy_workqueue(refill_workqueue);
2147 unregister_netdevice_notifier(&efx_netdev_notifier);
2151 module_init(efx_init_module);
2152 module_exit(efx_exit_module);
2154 MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2155 "Solarflare Communications");
2156 MODULE_DESCRIPTION("Solarflare Communications network driver");
2157 MODULE_LICENSE("GPL");
2158 MODULE_DEVICE_TABLE(pci, efx_pci_table);