]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
cxgb: delete non NAPI code from the driver.
authorFrancois Romieu <romieu@fr.zoreil.com>
Thu, 10 Jul 2008 22:29:19 +0000 (00:29 +0200)
committerJeff Garzik <jgarzik@redhat.com>
Fri, 11 Jul 2008 05:11:42 +0000 (01:11 -0400)
Compile-tested only.

Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
drivers/net/Kconfig
drivers/net/chelsio/cxgb2.c
drivers/net/chelsio/sge.c

index 4675c1bd6fb9045db6f1bb6ea2ebba91205f34a1..50ca1cf1271ea729bea8031d5a597e28d5617544 100644 (file)
@@ -2378,14 +2378,6 @@ config CHELSIO_T1_1G
           Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
           are using only 10G cards say 'N' here.
 
-config CHELSIO_T1_NAPI
-       bool "Use Rx Polling (NAPI)"
-       depends on CHELSIO_T1
-       default y
-       help
-         NAPI is a driver API designed to reduce CPU and interrupt load
-         when the driver is receiving lots of packets from the card.
-
 config CHELSIO_T3
        tristate "Chelsio Communications T3 10Gb Ethernet support"
        depends on PCI && INET
index a509337eab2dd44d562e131f814599138d133dfe..638c9a27a7a64902f00dd2251382ed4ff8b1de55 100644 (file)
@@ -1153,9 +1153,7 @@ static int __devinit init_one(struct pci_dev *pdev,
 #ifdef CONFIG_NET_POLL_CONTROLLER
                netdev->poll_controller = t1_netpoll;
 #endif
-#ifdef CONFIG_CHELSIO_T1_NAPI
                netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
-#endif
 
                SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
        }
index 8a7efd38e95bba6ce63d9d1f0efbd5a7dd380838..d6c7d2aa761b503da15a96126ee1f2f3895e4f0a 100644 (file)
@@ -1396,20 +1396,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
 
        if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
                st->vlan_xtract++;
-#ifdef CONFIG_CHELSIO_T1_NAPI
-                       vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
-                                                ntohs(p->vlan));
-#else
-                       vlan_hwaccel_rx(skb, adapter->vlan_grp,
-                                       ntohs(p->vlan));
-#endif
-       } else {
-#ifdef CONFIG_CHELSIO_T1_NAPI
+               vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
+                                        ntohs(p->vlan));
+       } else
                netif_receive_skb(skb);
-#else
-               netif_rx(skb);
-#endif
-       }
 }
 
 /*
@@ -1568,7 +1558,6 @@ static inline int responses_pending(const struct adapter *adapter)
        return (e->GenerationBit == Q->genbit);
 }
 
-#ifdef CONFIG_CHELSIO_T1_NAPI
 /*
  * A simpler version of process_responses() that handles only pure (i.e.,
  * non data-carrying) responses.  Such respones are too light-weight to justify
@@ -1636,9 +1625,6 @@ int t1_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
-/*
- * NAPI version of the main interrupt handler.
- */
 irqreturn_t t1_interrupt(int irq, void *data)
 {
        struct adapter *adapter = data;
@@ -1656,7 +1642,8 @@ irqreturn_t t1_interrupt(int irq, void *data)
                        else {
                                /* no data, no NAPI needed */
                                writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
-                               napi_enable(&adapter->napi);    /* undo schedule_prep */
+                               /* undo schedule_prep */
+                               napi_enable(&adapter->napi);
                        }
                }
                return IRQ_HANDLED;
@@ -1672,53 +1659,6 @@ irqreturn_t t1_interrupt(int irq, void *data)
        return IRQ_RETVAL(handled != 0);
 }
 
-#else
-/*
- * Main interrupt handler, optimized assuming that we took a 'DATA'
- * interrupt.
- *
- * 1. Clear the interrupt
- * 2. Loop while we find valid descriptors and process them; accumulate
- *      information that can be processed after the loop
- * 3. Tell the SGE at which index we stopped processing descriptors
- * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
- *      outstanding TX buffers waiting, replenish RX buffers, potentially
- *      reenable upper layers if they were turned off due to lack of TX
- *      resources which are available again.
- * 5. If we took an interrupt, but no valid respQ descriptors was found we
- *      let the slow_intr_handler run and do error handling.
- */
-irqreturn_t t1_interrupt(int irq, void *cookie)
-{
-       int work_done;
-       struct adapter *adapter = cookie;
-       struct respQ *Q = &adapter->sge->respQ;
-
-       spin_lock(&adapter->async_lock);
-
-       writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
-
-       if (likely(responses_pending(adapter)))
-               work_done = process_responses(adapter, -1);
-       else
-               work_done = t1_slow_intr_handler(adapter);
-
-       /*
-        * The unconditional clearing of the PL_CAUSE above may have raced
-        * with DMA completion and the corresponding generation of a response
-        * to cause us to miss the resulting data interrupt.  The next write
-        * is also unconditional to recover the missed interrupt and render
-        * this race harmless.
-        */
-       writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
-
-       if (!work_done)
-               adapter->sge->stats.unhandled_irqs++;
-       spin_unlock(&adapter->async_lock);
-       return IRQ_RETVAL(work_done != 0);
-}
-#endif
-
 /*
  * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
  *