]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/net/chelsio/sge.c
[netdrvr] fix array overflows in Chelsio driver
[linux-2.6-omap-h63xx.git] / drivers / net / chelsio / sge.c
index bcf8b1e939b00ede6a32651d893a725b618c7941..30ff8ea1a402602761bb75994a092b759c662835 100644 (file)
@@ -1,8 +1,8 @@
 /*****************************************************************************
  *                                                                           *
  * File: sge.c                                                               *
- * $Revision: 1.13 $                                                         *
- * $Date: 2005/03/23 07:41:27 $                                              *
+ * $Revision: 1.26 $                                                         *
+ * $Date: 2005/06/21 18:29:48 $                                              *
  * Description:                                                              *
  *  DMA engine.                                                              *
  *  part of the Chelsio 10Gb Ethernet Driver.                                *
 #include "regs.h"
 #include "espi.h"
 
+
+#ifdef NETIF_F_TSO
 #include <linux/tcp.h>
+#endif
 
 #define SGE_CMDQ_N             2
 #define SGE_FREELQ_N           2
-#define SGE_CMDQ0_E_N          512
+#define SGE_CMDQ0_E_N          1024
 #define SGE_CMDQ1_E_N          128
 #define SGE_FREEL_SIZE         4096
 #define SGE_JUMBO_FREEL_SIZE   512
 #define SGE_FREEL_REFILL_THRESH        16
 #define SGE_RESPQ_E_N          1024
-#define SGE_INTR_BUCKETSIZE    100
-#define SGE_INTR_LATBUCKETS    5
-#define SGE_INTR_MAXBUCKETS    11
-#define SGE_INTRTIMER0         1
-#define SGE_INTRTIMER1         50
-#define SGE_INTRTIMER_NRES     10000
-#define SGE_RX_COPY_THRESHOLD  256
+#define SGE_INTRTIMER_NRES     1000
+#define SGE_RX_COPY_THRES      256
 #define SGE_RX_SM_BUF_SIZE     1536
 
-#define SGE_RESPQ_REPLENISH_THRES ((3 * SGE_RESPQ_E_N) / 4)
+# define SGE_RX_DROP_THRES 2
+
+#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
+
+/*
+ * Period of the TX buffer reclaim timer.  This timer does not need to run
+ * frequently as TX buffers are usually reclaimed by new TX packets.
+ */
+#define TX_RECLAIM_PERIOD (HZ / 4)
 
-#define SGE_RX_OFFSET 2
 #ifndef NET_IP_ALIGN
-# define NET_IP_ALIGN SGE_RX_OFFSET
+# define NET_IP_ALIGN 2
 #endif
 
+#define M_CMD_LEN       0x7fffffff
+#define V_CMD_LEN(v)    (v)
+#define G_CMD_LEN(v)    ((v) & M_CMD_LEN)
+#define V_CMD_GEN1(v)   ((v) << 31)
+#define V_CMD_GEN2(v)   (v)
+#define F_CMD_DATAVALID (1 << 1)
+#define F_CMD_SOP       (1 << 2)
+#define V_CMD_EOP(v)    ((v) << 3)
+
 /*
- * Memory Mapped HW Command, Freelist and Response Queue Descriptors
+ * Command queue, receive buffer list, and response queue descriptors.
  */
 #if defined(__BIG_ENDIAN_BITFIELD)
 struct cmdQ_e {
-       u32 AddrLow;
-       u32 GenerationBit       : 1;
-       u32 BufferLength        : 31;
-       u32 RespQueueSelector   : 4;
-       u32 ResponseTokens      : 12;
-       u32 CmdId               : 8;
-       u32 Reserved            : 3;
-       u32 TokenValid          : 1;
-       u32 Eop                 : 1;
-       u32 Sop                 : 1;
-       u32 DataValid           : 1;
-       u32 GenerationBit2      : 1;
-       u32 AddrHigh;
+       u32 addr_lo;
+       u32 len_gen;
+       u32 flags;
+       u32 addr_hi;
 };
 
 struct freelQ_e {
-       u32 AddrLow;
-       u32 GenerationBit       : 1;
-       u32 BufferLength        : 31;
-       u32 Reserved            : 31;
-       u32 GenerationBit2      : 1;
-       u32 AddrHigh;
+       u32 addr_lo;
+       u32 len_gen;
+       u32 gen2;
+       u32 addr_hi;
 };
 
 struct respQ_e {
@@ -128,31 +131,19 @@ struct respQ_e {
        u32 GenerationBit       : 1;
        u32 BufferLength;
 };
-
 #elif defined(__LITTLE_ENDIAN_BITFIELD)
 struct cmdQ_e {
-       u32 BufferLength        : 31;
-       u32 GenerationBit       : 1;
-       u32 AddrLow;
-       u32 AddrHigh;
-       u32 GenerationBit2      : 1;
-       u32 DataValid           : 1;
-       u32 Sop                 : 1;
-       u32 Eop                 : 1;
-       u32 TokenValid          : 1;
-       u32 Reserved            : 3;
-       u32 CmdId               : 8;
-       u32 ResponseTokens      : 12;
-       u32 RespQueueSelector   : 4;
+       u32 len_gen;
+       u32 addr_lo;
+       u32 addr_hi;
+       u32 flags;
 };
 
 struct freelQ_e {
-       u32 BufferLength        : 31;
-       u32 GenerationBit       : 1;
-       u32 AddrLow;
-       u32 AddrHigh;
-       u32 GenerationBit2      : 1;
-       u32 Reserved            : 31;
+       u32 len_gen;
+       u32 addr_lo;
+       u32 addr_hi;
+       u32 gen2;
 };
 
 struct respQ_e {
@@ -179,7 +170,6 @@ struct cmdQ_ce {
        struct sk_buff *skb;
        DECLARE_PCI_UNMAP_ADDR(dma_addr);
        DECLARE_PCI_UNMAP_LEN(dma_len);
-       unsigned int single;
 };
 
 struct freelQ_ce {
@@ -189,44 +179,52 @@ struct freelQ_ce {
 };
 
 /*
- * SW Command, Freelist and Response Queue
+ * SW command, freelist and response rings
  */
 struct cmdQ {
-       atomic_t        asleep;         /* HW DMA Fetch status */
-       atomic_t        credits;        /* # available descriptors for TX */
-       atomic_t        pio_pidx;       /* Variable updated on Doorbell */
-       u16             entries_n;      /* # descriptors for TX */
-       u16             pidx;           /* producer index (SW) */
-       u16             cidx;           /* consumer index (HW) */
-       u8              genbit;         /* current generation (=valid) bit */
-       struct cmdQ_e  *entries;        /* HW command descriptor Q */
-       struct cmdQ_ce *centries;       /* SW command context descriptor Q */
-       spinlock_t      Qlock;          /* Lock to protect cmdQ enqueuing */
-       dma_addr_t      dma_addr;       /* DMA addr HW command descriptor Q */
+       unsigned long   status;         /* HW DMA fetch status */
+       unsigned int    in_use;         /* # of in-use command descriptors */
+       unsigned int    size;           /* # of descriptors */
+       unsigned int    processed;      /* total # of descs HW has processed */
+       unsigned int    cleaned;        /* total # of descs SW has reclaimed */
+       unsigned int    stop_thres;     /* SW TX queue suspend threshold */
+       u16             pidx;           /* producer index (SW) */
+       u16             cidx;           /* consumer index (HW) */
+       u8              genbit;         /* current generation (=valid) bit */
+       u8              sop;            /* is next entry start of packet? */
+       struct cmdQ_e  *entries;        /* HW command descriptor Q */
+       struct cmdQ_ce *centries;       /* SW command context descriptor Q */
+       spinlock_t      lock;           /* Lock to protect cmdQ enqueuing */
+       dma_addr_t      dma_addr;       /* DMA addr HW command descriptor Q */
 };
 
 struct freelQ {
-       unsigned int    credits;        /* # of available RX buffers */
-       unsigned int    entries_n;      /* free list capacity */
-       u16             pidx;           /* producer index (SW) */
-       u16             cidx;           /* consumer index (HW) */
+       unsigned int    credits;        /* # of available RX buffers */
+       unsigned int    size;           /* free list capacity */
+       u16             pidx;           /* producer index (SW) */
+       u16             cidx;           /* consumer index (HW) */
        u16             rx_buffer_size; /* Buffer size on this free list */
        u16             dma_offset;     /* DMA offset to align IP headers */
-       u8              genbit;         /* current generation (=valid) bit */
-       struct freelQ_e *entries;       /* HW freelist descriptor Q */
-       struct freelQ_ce *centries;     /* SW freelist conext descriptor Q */
-       dma_addr_t      dma_addr;       /* DMA addr HW freelist descriptor Q */
+       u16             recycleq_idx;   /* skb recycle q to use */
+       u8              genbit;         /* current generation (=valid) bit */
+       struct freelQ_e *entries;       /* HW freelist descriptor Q */
+       struct freelQ_ce *centries;     /* SW freelist context descriptor Q */
+       dma_addr_t      dma_addr;       /* DMA addr HW freelist descriptor Q */
 };
 
 struct respQ {
-       u16             credits;        /* # of available respQ descriptors */
-       u16             credits_pend;   /* # of not yet returned descriptors */
-       u16             entries_n;      /* # of response Q descriptors */
-       u16             pidx;           /* producer index (HW) */
-       u16             cidx;           /* consumer index (SW) */
-       u8              genbit;         /* current generation(=valid) bit */
+       unsigned int    credits;        /* credits to be returned to SGE */
+       unsigned int    size;           /* # of response Q descriptors */
+       u16             cidx;           /* consumer index (SW) */
+       u8              genbit;         /* current generation(=valid) bit */
        struct respQ_e *entries;        /* HW response descriptor Q */
-       dma_addr_t      dma_addr;       /* DMA addr HW response descriptor Q */
+       dma_addr_t      dma_addr;       /* DMA addr HW response descriptor Q */
+};
+
+/* Bit flags for cmdQ.status */
+enum {
+       CMDQ_STAT_RUNNING = 1,          /* fetch engine is running */
+       CMDQ_STAT_LAST_PKT_DB = 2       /* last packet rung the doorbell */
 };
 
 /*
@@ -239,134 +237,50 @@ struct respQ {
  */
 struct sge {
        struct adapter *adapter;        /* adapter backpointer */
-       struct freelQ   freelQ[SGE_FREELQ_N]; /* freelist Q(s) */
-       struct respQ    respQ;          /* response Q instatiation */
+       struct net_device *netdev;      /* netdevice backpointer */
+       struct freelQ   freelQ[SGE_FREELQ_N]; /* buffer free lists */
+       struct respQ    respQ;          /* response Q */
+       unsigned long   stopped_tx_queues; /* bitmap of suspended Tx queues */
        unsigned int    rx_pkt_pad;     /* RX padding for L2 packets */
        unsigned int    jumbo_fl;       /* jumbo freelist Q index */
-       u32             intrtimer[SGE_INTR_MAXBUCKETS]; /* ! */
-       u32             currIndex;      /* current index into intrtimer[] */
-       u32             intrtimer_nres; /* no resource interrupt timer value */
-       u32             sge_control;    /* shadow content of sge control reg */
-       struct sge_intr_counts intr_cnt;
-       struct timer_list ptimer;
-       struct sk_buff  *pskb;
-       u32             ptimeout;
-       struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned; /* command Q(s)*/
+       unsigned int    intrtimer_nres; /* no-resource interrupt timer */
+       unsigned int    fixed_intrtimer;/* non-adaptive interrupt timer */
+       struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
+       struct timer_list espibug_timer;
+       unsigned int    espibug_timeout;
+       struct sk_buff  *espibug_skb;
+       u32             sge_control;    /* shadow value of sge control reg */
+       struct sge_intr_counts stats;
+       struct sge_port_stats port_stats[MAX_NPORTS];
+       struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
 };
 
-static unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
-                       unsigned int qid);
-
 /*
  * PIO to indicate that memory mapped Q contains valid descriptor(s).
  */
-static inline void doorbell_pio(struct sge *sge, u32 val)
+static inline void doorbell_pio(struct adapter *adapter, u32 val)
 {
        wmb();
-       t1_write_reg_4(sge->adapter, A_SG_DOORBELL, val);
-}
-
-/*
- * Disables the DMA engine.
- */
-void t1_sge_stop(struct sge *sge)
-{
-       t1_write_reg_4(sge->adapter, A_SG_CONTROL, 0);
-       t1_read_reg_4(sge->adapter, A_SG_CONTROL);     /* flush write */
-       if (is_T2(sge->adapter))
-               del_timer_sync(&sge->ptimer);
-}
-
-static u8 ch_mac_addr[ETH_ALEN] = {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
-static void t1_espi_workaround(void *data)
-{
-       struct adapter *adapter = (struct adapter *)data;
-       struct sge *sge = adapter->sge;
-
-       if (netif_running(adapter->port[0].dev) &&
-               atomic_read(&sge->cmdQ[0].asleep)) {
-
-               u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
-
-               if ((seop & 0xfff0fff) == 0xfff && sge->pskb) {
-                       struct sk_buff *skb = sge->pskb;
-                       if (!skb->cb[0]) {
-                               memcpy(skb->data+sizeof(struct cpl_tx_pkt), ch_mac_addr, ETH_ALEN);
-                               memcpy(skb->data+skb->len-10, ch_mac_addr, ETH_ALEN);
-
-                               skb->cb[0] = 0xff;
-                       }
-                       t1_sge_tx(skb, adapter,0);
-               }
-       }
-       mod_timer(&adapter->sge->ptimer, jiffies + sge->ptimeout);
-}
-
-/*
- * Enables the DMA engine.
- */
-void t1_sge_start(struct sge *sge)
-{
-       t1_write_reg_4(sge->adapter, A_SG_CONTROL, sge->sge_control);
-       t1_read_reg_4(sge->adapter, A_SG_CONTROL);     /* flush write */
-       if (is_T2(sge->adapter)) {
-               init_timer(&sge->ptimer);
-               sge->ptimer.function = (void *)&t1_espi_workaround;
-               sge->ptimer.data = (unsigned long)sge->adapter;
-               sge->ptimer.expires = jiffies + sge->ptimeout;
-               add_timer(&sge->ptimer);
-       }
-}
-
-/*
- * Creates a t1_sge structure and returns suggested resource parameters.
- */
-struct sge * __devinit t1_sge_create(struct adapter *adapter,
-                                    struct sge_params *p)
-{
-       struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
-
-       if (!sge)
-               return NULL;
-       memset(sge, 0, sizeof(*sge));
-
-       if (is_T2(adapter))
-               sge->ptimeout = 1;      /* finest allowed */
-
-       sge->adapter = adapter;
-       sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : SGE_RX_OFFSET;
-       sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
-
-       p->cmdQ_size[0] = SGE_CMDQ0_E_N;
-       p->cmdQ_size[1] = SGE_CMDQ1_E_N;
-       p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
-       p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
-       p->rx_coalesce_usecs = SGE_INTRTIMER1;
-       p->last_rx_coalesce_raw = SGE_INTRTIMER1 *
-         (board_info(sge->adapter)->clock_core / 1000000);
-       p->default_rx_coalesce_usecs = SGE_INTRTIMER1;
-       p->coalesce_enable = 0; /* Turn off adaptive algorithm by default */
-       p->sample_interval_usecs = 0;
-       return sge;
+       writel(val, adapter->regs + A_SG_DOORBELL);
 }
 
 /*
  * Frees all RX buffers on the freelist Q. The caller must make sure that
  * the SGE is turned off before calling this function.
  */
-static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *Q)
+static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
 {
-       unsigned int cidx = Q->cidx, credits = Q->credits;
+       unsigned int cidx = q->cidx;
 
-       while (credits--) {
-               struct freelQ_ce *ce = &Q->centries[cidx];
+       while (q->credits--) {
+               struct freelQ_ce *ce = &q->centries[cidx];
 
                pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
                                 pci_unmap_len(ce, dma_len),
                                 PCI_DMA_FROMDEVICE);
                dev_kfree_skb(ce->skb);
                ce->skb = NULL;
-               if (++cidx == Q->entries_n)
+               if (++cidx == q->size)
                        cidx = 0;
        }
 }
@@ -380,29 +294,29 @@ static void free_rx_resources(struct sge *sge)
        unsigned int size, i;
 
        if (sge->respQ.entries) {
-               size = sizeof(struct respQ_e) * sge->respQ.entries_n;
+               size = sizeof(struct respQ_e) * sge->respQ.size;
                pci_free_consistent(pdev, size, sge->respQ.entries,
                                    sge->respQ.dma_addr);
        }
 
        for (i = 0; i < SGE_FREELQ_N; i++) {
-               struct freelQ *Q = &sge->freelQ[i];
+               struct freelQ *q = &sge->freelQ[i];
 
-               if (Q->centries) {
-                       free_freelQ_buffers(pdev, Q);
-                       kfree(Q->centries);
+               if (q->centries) {
+                       free_freelQ_buffers(pdev, q);
+                       kfree(q->centries);
                }
-               if (Q->entries) {
-                       size = sizeof(struct freelQ_e) * Q->entries_n;
-                       pci_free_consistent(pdev, size, Q->entries,
-                                           Q->dma_addr);
+               if (q->entries) {
+                       size = sizeof(struct freelQ_e) * q->size;
+                       pci_free_consistent(pdev, size, q->entries,
+                                           q->dma_addr);
                }
        }
 }
 
 /*
  * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
- * response Q.
+ * response queue.
  */
 static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
 {
@@ -410,21 +324,22 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
        unsigned int size, i;
 
        for (i = 0; i < SGE_FREELQ_N; i++) {
-               struct freelQ *Q = &sge->freelQ[i];
-
-               Q->genbit = 1;
-               Q->entries_n = p->freelQ_size[i];
-               Q->dma_offset = SGE_RX_OFFSET - sge->rx_pkt_pad;
-               size = sizeof(struct freelQ_e) * Q->entries_n;
-               Q->entries = (struct freelQ_e *)
-                             pci_alloc_consistent(pdev, size, &Q->dma_addr);
-               if (!Q->entries)
+               struct freelQ *q = &sge->freelQ[i];
+
+               q->genbit = 1;
+               q->size = p->freelQ_size[i];
+               q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
+               size = sizeof(struct freelQ_e) * q->size;
+               q->entries = (struct freelQ_e *)
+                             pci_alloc_consistent(pdev, size, &q->dma_addr);
+               if (!q->entries)
                        goto err_no_mem;
-               memset(Q->entries, 0, size);
-               Q->centries = kcalloc(Q->entries_n, sizeof(struct freelQ_ce),
-                                     GFP_KERNEL);
-               if (!Q->centries)
+               memset(q->entries, 0, size);
+               size = sizeof(struct freelQ_ce) * q->size;
+               q->centries = kmalloc(size, GFP_KERNEL);
+               if (!q->centries)
                        goto err_no_mem;
+               memset(q->centries, 0, size);
        }
 
        /*
@@ -440,10 +355,17 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
        sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
+       /*
+        * Setup which skb recycle Q should be used when recycling buffers from
+        * each free list.
+        */
+       sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
+       sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
+
        sge->respQ.genbit = 1;
-       sge->respQ.entries_n = SGE_RESPQ_E_N;
-       sge->respQ.credits = SGE_RESPQ_E_N;
-       size = sizeof(struct respQ_e) * sge->respQ.entries_n;
+       sge->respQ.size = SGE_RESPQ_E_N;
+       sge->respQ.credits = 0;
+       size = sizeof(struct respQ_e) * sge->respQ.size;
        sge->respQ.entries = (struct respQ_e *)
                pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
        if (!sge->respQ.entries)
@@ -457,48 +379,37 @@ err_no_mem:
 }
 
 /*
- * Frees 'credits_pend' TX buffers and returns the credits to Q->credits.
- *
- * The adaptive algorithm receives the total size of the buffers freed
- * accumulated in @*totpayload. No initialization of this argument here.
- *
+ * Reclaims n TX descriptors and frees the buffers associated with them.
  */
-static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *Q,
-                             unsigned int credits_pend, unsigned int *totpayload)
+static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
 {
+       struct cmdQ_ce *ce;
        struct pci_dev *pdev = sge->adapter->pdev;
-       struct sk_buff *skb;
-       struct cmdQ_ce *ce, *cq = Q->centries;
-       unsigned int entries_n = Q->entries_n, cidx = Q->cidx,
-                    i = credits_pend;
+       unsigned int cidx = q->cidx;
 
-
-       ce = &cq[cidx];
-       while (i--) {
-               if (ce->single)
+       q->in_use -= n;
+       ce = &q->centries[cidx];
+       while (n--) {
+               if (q->sop)
                        pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
-                                        pci_unmap_len(ce, dma_len),
+                                        pci_unmap_len(ce, dma_len),
                                         PCI_DMA_TODEVICE);
                else
                        pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
-                                      pci_unmap_len(ce, dma_len),
+                                      pci_unmap_len(ce, dma_len),
                                       PCI_DMA_TODEVICE);
-               if (totpayload)
-                       *totpayload += pci_unmap_len(ce, dma_len);
-
-               skb = ce->skb;
-               if (skb)
-                       dev_kfree_skb_irq(skb);
-
+               q->sop = 0;
+               if (ce->skb) {
+                       dev_kfree_skb(ce->skb);
+                       q->sop = 1;
+               }
                ce++;
-               if (++cidx == entries_n) {
+               if (++cidx == q->size) {
                        cidx = 0;
-                       ce = cq;
+                       ce = q->centries;
                }
        }
-
-       Q->cidx = cidx;
-       atomic_add(credits_pend, &Q->credits);
+       q->cidx = cidx;
 }
 
 /*
@@ -512,20 +423,17 @@ static void free_tx_resources(struct sge *sge)
        unsigned int size, i;
 
        for (i = 0; i < SGE_CMDQ_N; i++) {
-               struct cmdQ *Q = &sge->cmdQ[i];
-
-               if (Q->centries) {
-                       unsigned int pending = Q->entries_n -
-                                              atomic_read(&Q->credits);
+               struct cmdQ *q = &sge->cmdQ[i];
 
-                       if (pending)
-                               free_cmdQ_buffers(sge, Q, pending, NULL);
-                       kfree(Q->centries);
+               if (q->centries) {
+                       if (q->in_use)
+                               free_cmdQ_buffers(sge, q, q->in_use);
+                       kfree(q->centries);
                }
-               if (Q->entries) {
-                       size = sizeof(struct cmdQ_e) * Q->entries_n;
-                       pci_free_consistent(pdev, size, Q->entries,
-                                           Q->dma_addr);
+               if (q->entries) {
+                       size = sizeof(struct cmdQ_e) * q->size;
+                       pci_free_consistent(pdev, size, q->entries,
+                                           q->dma_addr);
                }
        }
 }
@@ -539,25 +447,38 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
        unsigned int size, i;
 
        for (i = 0; i < SGE_CMDQ_N; i++) {
-               struct cmdQ *Q = &sge->cmdQ[i];
-
-               Q->genbit = 1;
-               Q->entries_n = p->cmdQ_size[i];
-               atomic_set(&Q->credits, Q->entries_n);
-               atomic_set(&Q->asleep, 1);
-               spin_lock_init(&Q->Qlock);
-               size = sizeof(struct cmdQ_e) * Q->entries_n;
-               Q->entries = (struct cmdQ_e *)
-                             pci_alloc_consistent(pdev, size, &Q->dma_addr);
-               if (!Q->entries)
+               struct cmdQ *q = &sge->cmdQ[i];
+
+               q->genbit = 1;
+               q->sop = 1;
+               q->size = p->cmdQ_size[i];
+               q->in_use = 0;
+               q->status = 0;
+               q->processed = q->cleaned = 0;
+               q->stop_thres = 0;
+               spin_lock_init(&q->lock);
+               size = sizeof(struct cmdQ_e) * q->size;
+               q->entries = (struct cmdQ_e *)
+                             pci_alloc_consistent(pdev, size, &q->dma_addr);
+               if (!q->entries)
                        goto err_no_mem;
-               memset(Q->entries, 0, size);
-               Q->centries = kcalloc(Q->entries_n, sizeof(struct cmdQ_ce),
-                                     GFP_KERNEL);
-               if (!Q->centries)
+               memset(q->entries, 0, size);
+               size = sizeof(struct cmdQ_ce) * q->size;
+               q->centries = kmalloc(size, GFP_KERNEL);
+               if (!q->centries)
                        goto err_no_mem;
+               memset(q->centries, 0, size);
        }
 
+       /*
+        * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
+        * only.  For queue 0 set the stop threshold so we can handle one more
+        * packet from each port, plus reserve an additional 24 entries for
+        * Ethernet packets only.  Queue 1 never suspends nor do we reserve
+        * space for Ethernet packets.
+        */
+       sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
+               (MAX_SKB_FRAGS + 1);
        return 0;
 
 err_no_mem:
@@ -569,9 +490,9 @@ static inline void setup_ring_params(struct adapter *adapter, u64 addr,
                                     u32 size, int base_reg_lo,
                                     int base_reg_hi, int size_reg)
 {
-       t1_write_reg_4(adapter, base_reg_lo, (u32)addr);
-       t1_write_reg_4(adapter, base_reg_hi, addr >> 32);
-       t1_write_reg_4(adapter, size_reg, size);
+       writel((u32)addr, adapter->regs + base_reg_lo);
+       writel(addr >> 32, adapter->regs + base_reg_hi);
+       writel(size, adapter->regs + size_reg);
 }
 
 /*
@@ -585,29 +506,11 @@ void t1_set_vlan_accel(struct adapter *adapter, int on_off)
        if (on_off)
                sge->sge_control |= F_VLAN_XTRACT;
        if (adapter->open_device_map) {
-               t1_write_reg_4(adapter, A_SG_CONTROL, sge->sge_control);
-               t1_read_reg_4(adapter, A_SG_CONTROL);   /* flush */
+               writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
+               readl(adapter->regs + A_SG_CONTROL); /* flush */
        }
 }
 
-/*
- * Sets the interrupt latency timer when the adaptive Rx coalescing
- * is turned off. Do nothing when it is turned on again.
- *
- * This routine relies on the fact that the caller has already set
- * the adaptive policy in adapter->sge_params before calling it.
-*/
-int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
-{
-       if (!p->coalesce_enable) {
-               u32 newTimer = p->rx_coalesce_usecs *
-                       (board_info(sge->adapter)->clock_core / 1000000);
-
-               t1_write_reg_4(sge->adapter, A_SG_INTRTIMER, newTimer);
-       }
-       return 0;
-}
-
 /*
  * Programs the various SGE registers. However, the engine is not yet enabled,
  * but sge->sge_control is setup and ready to go.
@@ -615,67 +518,40 @@ int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
 static void configure_sge(struct sge *sge, struct sge_params *p)
 {
        struct adapter *ap = sge->adapter;
-       int i;
-
-       t1_write_reg_4(ap, A_SG_CONTROL, 0);
-       setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].entries_n,
+       
+       writel(0, ap->regs + A_SG_CONTROL);
+       setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
                          A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
-       setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].entries_n,
+       setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
                          A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
        setup_ring_params(ap, sge->freelQ[0].dma_addr,
-                         sge->freelQ[0].entries_n, A_SG_FL0BASELWR,
+                         sge->freelQ[0].size, A_SG_FL0BASELWR,
                          A_SG_FL0BASEUPR, A_SG_FL0SIZE);
        setup_ring_params(ap, sge->freelQ[1].dma_addr,
-                         sge->freelQ[1].entries_n, A_SG_FL1BASELWR,
+                         sge->freelQ[1].size, A_SG_FL1BASELWR,
                          A_SG_FL1BASEUPR, A_SG_FL1SIZE);
 
        /* The threshold comparison uses <. */
-       t1_write_reg_4(ap, A_SG_FLTHRESHOLD, SGE_RX_SM_BUF_SIZE + 1);
+       writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
 
-       setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.entries_n,
-                       A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
-       t1_write_reg_4(ap, A_SG_RSPQUEUECREDIT, (u32)sge->respQ.entries_n);
+       setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
+                         A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
+       writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
 
        sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
                F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
                V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
+               F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
                V_RX_PKT_OFFSET(sge->rx_pkt_pad);
 
 #if defined(__BIG_ENDIAN_BITFIELD)
        sge->sge_control |= F_ENABLE_BIG_ENDIAN;
 #endif
 
-       /*
-        * Initialize the SGE Interrupt Timer arrray:
-        * intrtimer[0]       = (SGE_INTRTIMER0) usec
-        * intrtimer[0<i<5]   = (SGE_INTRTIMER0 + i*2) usec
-        * intrtimer[4<i<10]  = ((i - 3) * 6) usec
-        * intrtimer[10]      = (SGE_INTRTIMER1) usec
-        *
-        */
-       sge->intrtimer[0] = board_info(sge->adapter)->clock_core / 1000000;
-       for (i = 1; i < SGE_INTR_LATBUCKETS; ++i) {
-               sge->intrtimer[i] = SGE_INTRTIMER0 + (2 * i);
-               sge->intrtimer[i] *= sge->intrtimer[0];
-       }
-       for (i = SGE_INTR_LATBUCKETS; i < SGE_INTR_MAXBUCKETS - 1; ++i) {
-               sge->intrtimer[i] = (i - 3) * 6;
-               sge->intrtimer[i] *= sge->intrtimer[0];
-       }
-       sge->intrtimer[SGE_INTR_MAXBUCKETS - 1] =
-         sge->intrtimer[0] * SGE_INTRTIMER1;
-       /* Initialize resource timer */
-       sge->intrtimer_nres = sge->intrtimer[0] * SGE_INTRTIMER_NRES;
-       /* Finally finish initialization of intrtimer[0] */
-       sge->intrtimer[0] *= SGE_INTRTIMER0;
-       /* Initialize for a throughput oriented workload */
-       sge->currIndex = SGE_INTR_MAXBUCKETS - 1;
-
-       if (p->coalesce_enable)
-               t1_write_reg_4(ap, A_SG_INTRTIMER,
-                              sge->intrtimer[sge->currIndex]);
-       else
-               t1_sge_set_coalesce_params(sge, p);
+       /* Initialize no-resource timer */
+       sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
+
+       t1_sge_set_coalesce_params(sge, p);
 }
 
 /*
@@ -684,31 +560,8 @@ static void configure_sge(struct sge *sge, struct sge_params *p)
 static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
 {
        return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
-               sizeof(struct cpl_rx_data) - SGE_RX_OFFSET + sge->rx_pkt_pad;
-}
-
-/*
- * Allocates both RX and TX resources and configures the SGE. However,
- * the hardware is not enabled yet.
- */
-int t1_sge_configure(struct sge *sge, struct sge_params *p)
-{
-       if (alloc_rx_resources(sge, p))
-               return -ENOMEM;
-       if (alloc_tx_resources(sge, p)) {
-               free_rx_resources(sge);
-               return -ENOMEM;
-       }
-       configure_sge(sge, p);
-
-       /*
-        * Now that we have sized the free lists calculate the payload
-        * capacity of the large buffers.  Other parts of the driver use
-        * this to set the max offload coalescing size so that RX packets
-        * do not overflow our large buffers.
-        */
-       p->large_buf_capacity = jumbo_payload_capacity(sge);
-       return 0;
+               sge->freelQ[sge->jumbo_fl].dma_offset -
+               sizeof(struct cpl_rx_data);
 }
 
 /*
@@ -716,8 +569,9 @@ int t1_sge_configure(struct sge *sge, struct sge_params *p)
  */
 void t1_sge_destroy(struct sge *sge)
 {
-       if (sge->pskb)
-               dev_kfree_skb(sge->pskb);
+       if (sge->espibug_skb)
+               kfree_skb(sge->espibug_skb);
+
        free_tx_resources(sge);
        free_rx_resources(sge);
        kfree(sge);
@@ -735,75 +589,75 @@ void t1_sge_destroy(struct sge *sge)
  * we specify a RX_OFFSET in order to make sure that the IP header is 4B
  * aligned.
  */
-static void refill_free_list(struct sge *sge, struct freelQ *Q)
+static void refill_free_list(struct sge *sge, struct freelQ *q)
 {
        struct pci_dev *pdev = sge->adapter->pdev;
-       struct freelQ_ce *ce = &Q->centries[Q->pidx];
-       struct freelQ_e *e = &Q->entries[Q->pidx];
-       unsigned int dma_len = Q->rx_buffer_size - Q->dma_offset;
+       struct freelQ_ce *ce = &q->centries[q->pidx];
+       struct freelQ_e *e = &q->entries[q->pidx];
+       unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
 
 
-       while (Q->credits < Q->entries_n) {
-               if (e->GenerationBit != Q->genbit) {
-                       struct sk_buff *skb;
-                       dma_addr_t mapping;
+       while (q->credits < q->size) {
+               struct sk_buff *skb;
+               dma_addr_t mapping;
 
-                       skb = alloc_skb(Q->rx_buffer_size, GFP_ATOMIC);
-                       if (!skb)
-                               break;
-                       if (Q->dma_offset)
-                               skb_reserve(skb, Q->dma_offset);
-                       mapping = pci_map_single(pdev, skb->data, dma_len,
-                                                PCI_DMA_FROMDEVICE);
-                       ce->skb = skb;
-                       pci_unmap_addr_set(ce, dma_addr, mapping);
-                       pci_unmap_len_set(ce, dma_len, dma_len);
-                       e->AddrLow = (u32)mapping;
-                       e->AddrHigh = (u64)mapping >> 32;
-                       e->BufferLength = dma_len;
-                       e->GenerationBit = e->GenerationBit2 = Q->genbit;
-               }
+               skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
+               if (!skb)
+                       break;
+
+               skb_reserve(skb, q->dma_offset);
+               mapping = pci_map_single(pdev, skb->data, dma_len,
+                                        PCI_DMA_FROMDEVICE);
+               ce->skb = skb;
+               pci_unmap_addr_set(ce, dma_addr, mapping);
+               pci_unmap_len_set(ce, dma_len, dma_len);
+               e->addr_lo = (u32)mapping;
+               e->addr_hi = (u64)mapping >> 32;
+               e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
+               wmb();
+               e->gen2 = V_CMD_GEN2(q->genbit);
 
                e++;
                ce++;
-               if (++Q->pidx == Q->entries_n) {
-                       Q->pidx = 0;
-                       Q->genbit ^= 1;
-                       ce = Q->centries;
-                       e = Q->entries;
+               if (++q->pidx == q->size) {
+                       q->pidx = 0;
+                       q->genbit ^= 1;
+                       ce = q->centries;
+                       e = q->entries;
                }
-               Q->credits++;
+               q->credits++;
        }
 
 }
 
 /*
- * Calls refill_free_list for both freelist Qs. If we cannot
- * fill at least 1/4 of both Qs, we go into 'few interrupt mode' in order
- * to give the system time to free up resources.
+ * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
+ * of both rings, we go into 'few interrupt mode' in order to give the system
+ * time to free up resources.
  */
 static void freelQs_empty(struct sge *sge)
 {
-       u32 irq_reg = t1_read_reg_4(sge->adapter, A_SG_INT_ENABLE);
+       struct adapter *adapter = sge->adapter;
+       u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
        u32 irqholdoff_reg;
 
        refill_free_list(sge, &sge->freelQ[0]);
        refill_free_list(sge, &sge->freelQ[1]);
 
-       if (sge->freelQ[0].credits > (sge->freelQ[0].entries_n >> 2) &&
-           sge->freelQ[1].credits > (sge->freelQ[1].entries_n >> 2)) {
+       if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
+           sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
                irq_reg |= F_FL_EXHAUSTED;
-               irqholdoff_reg = sge->intrtimer[sge->currIndex];
+               irqholdoff_reg = sge->fixed_intrtimer;
        } else {
                /* Clear the F_FL_EXHAUSTED interrupts for now */
                irq_reg &= ~F_FL_EXHAUSTED;
                irqholdoff_reg = sge->intrtimer_nres;
        }
-       t1_write_reg_4(sge->adapter, A_SG_INTRTIMER, irqholdoff_reg);
-       t1_write_reg_4(sge->adapter, A_SG_INT_ENABLE, irq_reg);
+       writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
+       writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
 
        /* We reenable the Qs to force a freelist GTS interrupt later */
-       doorbell_pio(sge, F_FL0_ENABLE | F_FL1_ENABLE);
+       doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
 }
 
 #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
@@ -816,10 +670,10 @@ static void freelQs_empty(struct sge *sge)
  */
 void t1_sge_intr_disable(struct sge *sge)
 {
-       u32 val = t1_read_reg_4(sge->adapter, A_PL_ENABLE);
+       u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
 
-       t1_write_reg_4(sge->adapter, A_PL_ENABLE, val & ~SGE_PL_INTR_MASK);
-       t1_write_reg_4(sge->adapter, A_SG_INT_ENABLE, 0);
+       writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
+       writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
 }
 
 /*
@@ -828,12 +682,12 @@ void t1_sge_intr_disable(struct sge *sge)
 void t1_sge_intr_enable(struct sge *sge)
 {
        u32 en = SGE_INT_ENABLE;
-       u32 val = t1_read_reg_4(sge->adapter, A_PL_ENABLE);
+       u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
 
        if (sge->adapter->flags & TSO_CAPABLE)
                en &= ~F_PACKET_TOO_BIG;
-       t1_write_reg_4(sge->adapter, A_SG_INT_ENABLE, en);
-       t1_write_reg_4(sge->adapter, A_PL_ENABLE, val | SGE_PL_INTR_MASK);
+       writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
+       writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
 }
 
 /*
@@ -841,8 +695,8 @@ void t1_sge_intr_enable(struct sge *sge)
  */
 void t1_sge_intr_clear(struct sge *sge)
 {
-       t1_write_reg_4(sge->adapter, A_PL_CAUSE, SGE_PL_INTR_MASK);
-       t1_write_reg_4(sge->adapter, A_SG_INT_CAUSE, 0xffffffff);
+       writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
+       writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
 }
 
 /*
@@ -851,464 +705,674 @@ void t1_sge_intr_clear(struct sge *sge)
 int t1_sge_intr_error_handler(struct sge *sge)
 {
        struct adapter *adapter = sge->adapter;
-       u32 cause = t1_read_reg_4(adapter, A_SG_INT_CAUSE);
+       u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
 
        if (adapter->flags & TSO_CAPABLE)
                cause &= ~F_PACKET_TOO_BIG;
        if (cause & F_RESPQ_EXHAUSTED)
-               sge->intr_cnt.respQ_empty++;
+               sge->stats.respQ_empty++;
        if (cause & F_RESPQ_OVERFLOW) {
-               sge->intr_cnt.respQ_overflow++;
+               sge->stats.respQ_overflow++;
                CH_ALERT("%s: SGE response queue overflow\n",
                         adapter->name);
        }
        if (cause & F_FL_EXHAUSTED) {
-               sge->intr_cnt.freelistQ_empty++;
+               sge->stats.freelistQ_empty++;
                freelQs_empty(sge);
        }
        if (cause & F_PACKET_TOO_BIG) {
-               sge->intr_cnt.pkt_too_big++;
+               sge->stats.pkt_too_big++;
                CH_ALERT("%s: SGE max packet size exceeded\n",
                         adapter->name);
        }
        if (cause & F_PACKET_MISMATCH) {
-               sge->intr_cnt.pkt_mismatch++;
+               sge->stats.pkt_mismatch++;
                CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
        }
        if (cause & SGE_INT_FATAL)
                t1_fatal_err(adapter);
 
-       t1_write_reg_4(adapter, A_SG_INT_CAUSE, cause);
+       writel(cause, adapter->regs + A_SG_INT_CAUSE);
        return 0;
 }
 
-/*
- * The following code is copied from 2.6, where the skb_pull is doing the
- * right thing and only pulls ETH_HLEN.
+const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge)
+{
+       return &sge->stats;
+}
+
+const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port)
+{
+       return &sge->port_stats[port];
+}
+
+/**
+ *     recycle_fl_buf - recycle a free list buffer
+ *     @fl: the free list
+ *     @idx: index of buffer to recycle
  *
- *     Determine the packet's protocol ID. The rule here is that we
- *     assume 802.3 if the type field is short enough to be a length.
- *     This is normal practice and works for any 'now in use' protocol.
+ *     Recycles the specified buffer on the given free list by adding it at
+ *     the next available slot on the list.
  */
-static unsigned short sge_eth_type_trans(struct sk_buff *skb,
-                                        struct net_device *dev)
+static void recycle_fl_buf(struct freelQ *fl, int idx)
 {
-       struct ethhdr *eth;
-       unsigned char *rawp;
+       struct freelQ_e *from = &fl->entries[idx];
+       struct freelQ_e *to = &fl->entries[fl->pidx];
 
-       skb->mac.raw = skb->data;
-       skb_pull(skb, ETH_HLEN);
-       eth = (struct ethhdr *)skb->mac.raw;
+       fl->centries[fl->pidx] = fl->centries[idx];
+       to->addr_lo = from->addr_lo;
+       to->addr_hi = from->addr_hi;
+       to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
+       wmb();
+       to->gen2 = V_CMD_GEN2(fl->genbit);
+       fl->credits++;
 
-       if (*eth->h_dest&1) {
-               if(memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
-                       skb->pkt_type = PACKET_BROADCAST;
-               else
-                       skb->pkt_type = PACKET_MULTICAST;
+       if (++fl->pidx == fl->size) {
+               fl->pidx = 0;
+               fl->genbit ^= 1;
        }
+}
 
-       /*
-        *      This ALLMULTI check should be redundant by 1.4
-        *      so don't forget to remove it.
-        *
-        *      Seems, you forgot to remove it. All silly devices
-        *      seems to set IFF_PROMISC.
-        */
+/**
+ *     get_packet - return the next ingress packet buffer
+ *     @pdev: the PCI device that received the packet
+ *     @fl: the SGE free list holding the packet
+ *     @len: the actual packet length, excluding any SGE padding
+ *     @dma_pad: padding at beginning of buffer left by SGE DMA
+ *     @skb_pad: padding to be used if the packet is copied
+ *     @copy_thres: length threshold under which a packet should be copied
+ *     @drop_thres: # of remaining buffers before we start dropping packets
+ *
+ *     Get the next packet from a free list and complete setup of the
+ *     sk_buff.  If the packet is small we make a copy and recycle the
+ *     original buffer, otherwise we use the original buffer itself.  If a
+ *     positive drop threshold is supplied packets are dropped and their
+ *     buffers recycled if (a) the number of remaining buffers is under the
+ *     threshold and the packet is too big to copy, or (b) the packet should
+ *     be copied but there is no memory for the copy.
+ */
+static inline struct sk_buff *get_packet(struct pci_dev *pdev,
+                                        struct freelQ *fl, unsigned int len,
+                                        int dma_pad, int skb_pad,
+                                        unsigned int copy_thres,
+                                        unsigned int drop_thres)
+{
+       struct sk_buff *skb;
+       struct freelQ_ce *ce = &fl->centries[fl->cidx];
+
+       if (len < copy_thres) {
+               skb = alloc_skb(len + skb_pad, GFP_ATOMIC);
+               if (likely(skb != NULL)) {
+                       skb_reserve(skb, skb_pad);
+                       skb_put(skb, len);
+                       pci_dma_sync_single_for_cpu(pdev,
+                                           pci_unmap_addr(ce, dma_addr),
+                                           pci_unmap_len(ce, dma_len),
+                                           PCI_DMA_FROMDEVICE);
+                       memcpy(skb->data, ce->skb->data + dma_pad, len);
+                       pci_dma_sync_single_for_device(pdev,
+                                           pci_unmap_addr(ce, dma_addr),
+                                           pci_unmap_len(ce, dma_len),
+                                           PCI_DMA_FROMDEVICE);
+               } else if (!drop_thres)
+                       goto use_orig_buf;
 
-       else if (1 /*dev->flags&IFF_PROMISC*/)
-       {
-               if(memcmp(eth->h_dest,dev->dev_addr, ETH_ALEN))
-                       skb->pkt_type=PACKET_OTHERHOST;
+               recycle_fl_buf(fl, fl->cidx);
+               return skb;
        }
 
-       if (ntohs(eth->h_proto) >= 1536)
-               return eth->h_proto;
-
-       rawp = skb->data;
+       if (fl->credits < drop_thres) {
+               recycle_fl_buf(fl, fl->cidx);
+               return NULL;
+       }
 
-       /*
-        * This is a magic hack to spot IPX packets. Older Novell breaks
-        * the protocol design and runs IPX over 802.3 without an 802.2 LLC
-        * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
-        * won't work for fault tolerant netware but does for the rest.
-        */
-       if (*(unsigned short *)rawp == 0xFFFF)
-               return htons(ETH_P_802_3);
+use_orig_buf:
+       pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
+                        pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+       skb = ce->skb;
+       skb_reserve(skb, dma_pad);
+       skb_put(skb, len);
+       return skb;
+}
 
-       /*
-        * Real 802.2 LLC
-        */
-       return htons(ETH_P_802_2);
+/**
+ *     unexpected_offload - handle an unexpected offload packet
+ *     @adapter: the adapter
+ *     @fl: the free list that received the packet
+ *
+ *     Called when we receive an unexpected offload packet (e.g., the TOE
+ *     function is disabled or the card is a NIC).  Prints a message and
+ *     recycles the buffer.
+ */
+static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
+{
+       struct freelQ_ce *ce = &fl->centries[fl->cidx];
+       struct sk_buff *skb = ce->skb;
+
+       pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
+                           pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+       CH_ERR("%s: unexpected offload packet, cmd %u\n",
+              adapter->name, *skb->data);
+       recycle_fl_buf(fl, fl->cidx);
 }
 
 /*
- * Prepare the received buffer and pass it up the stack. If it is small enough
- * and allocation doesn't fail, we use a new sk_buff and copy the content.
+ * Write the command descriptors to transmit the given skb starting at
+ * descriptor pidx with the given generation.
  */
-static unsigned int t1_sge_rx(struct sge *sge, struct freelQ *Q,
-                             unsigned int len, unsigned int offload)
+static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
+                                 unsigned int pidx, unsigned int gen,
+                                 struct cmdQ *q)
 {
-       struct sk_buff *skb;
-       struct adapter *adapter = sge->adapter;
-       struct freelQ_ce *ce = &Q->centries[Q->cidx];
+       dma_addr_t mapping;
+       struct cmdQ_e *e, *e1;
+       struct cmdQ_ce *ce;
+       unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
+
+       mapping = pci_map_single(adapter->pdev, skb->data,
+                                skb->len - skb->data_len, PCI_DMA_TODEVICE);
+       ce = &q->centries[pidx];
+       ce->skb = NULL;
+       pci_unmap_addr_set(ce, dma_addr, mapping);
+       pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
 
-       if (len <= SGE_RX_COPY_THRESHOLD &&
-           (skb = alloc_skb(len + NET_IP_ALIGN, GFP_ATOMIC))) {
-               struct freelQ_e *e;
-               char *src = ce->skb->data;
+       flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
+               V_CMD_GEN2(gen);
+       e = &q->entries[pidx];
+       e->addr_lo = (u32)mapping;
+       e->addr_hi = (u64)mapping >> 32;
+       e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
+       for (e1 = e, i = 0; nfrags--; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-               pci_dma_sync_single_for_cpu(adapter->pdev,
-                                           pci_unmap_addr(ce, dma_addr),
-                                           pci_unmap_len(ce, dma_len),
-                                           PCI_DMA_FROMDEVICE);
-               if (!offload) {
-                       skb_reserve(skb, NET_IP_ALIGN);
-                       src += sge->rx_pkt_pad;
+               ce++;
+               e1++;
+               if (++pidx == q->size) {
+                       pidx = 0;
+                       gen ^= 1;
+                       ce = q->centries;
+                       e1 = q->entries;
                }
-               memcpy(skb->data, src, len);
 
-               /* Reuse the entry. */
-               e = &Q->entries[Q->cidx];
-               e->GenerationBit  ^= 1;
-               e->GenerationBit2 ^= 1;
-       } else {
-               pci_unmap_single(adapter->pdev, pci_unmap_addr(ce, dma_addr),
-                                pci_unmap_len(ce, dma_len),
-                                PCI_DMA_FROMDEVICE);
-               skb = ce->skb;
-               if (!offload && sge->rx_pkt_pad)
-                       __skb_pull(skb, sge->rx_pkt_pad);
+               mapping = pci_map_page(adapter->pdev, frag->page,
+                                      frag->page_offset, frag->size,
+                                      PCI_DMA_TODEVICE);
+               ce->skb = NULL;
+               pci_unmap_addr_set(ce, dma_addr, mapping);
+               pci_unmap_len_set(ce, dma_len, frag->size);
+
+               e1->addr_lo = (u32)mapping;
+               e1->addr_hi = (u64)mapping >> 32;
+               e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
+               e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
+                           V_CMD_GEN2(gen);
        }
 
-       skb_put(skb, len);
+       ce->skb = skb;
+       wmb();
+       e->flags = flags;
+}
 
+/*
+ * Clean up completed Tx buffers.
+ */
+static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
+{
+       unsigned int reclaim = q->processed - q->cleaned;
 
-       if (unlikely(offload)) {
-               {
-                       printk(KERN_ERR
-                              "%s: unexpected offloaded packet, cmd %u\n",
-                              adapter->name, *skb->data);
-                       dev_kfree_skb_any(skb);
-               }
-       } else {
-               struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)skb->data;
-
-               skb_pull(skb, sizeof(*p));
-               skb->dev = adapter->port[p->iff].dev;
-               skb->dev->last_rx = jiffies;
-               skb->protocol = sge_eth_type_trans(skb, skb->dev);
-               if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
-                   skb->protocol == htons(ETH_P_IP) &&
-                   (skb->data[9] == IPPROTO_TCP ||
-                    skb->data[9] == IPPROTO_UDP))
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-               else
-                       skb->ip_summed = CHECKSUM_NONE;
-               if (adapter->vlan_grp && p->vlan_valid)
-                       vlan_hwaccel_rx(skb, adapter->vlan_grp,
-                                       ntohs(p->vlan));
-               else
-                       netif_rx(skb);
+       if (reclaim) {
+               free_cmdQ_buffers(sge, q, reclaim);
+               q->cleaned += reclaim;
        }
+}
 
-       if (++Q->cidx == Q->entries_n)
-               Q->cidx = 0;
+#ifndef SET_ETHTOOL_OPS
+# define __netif_rx_complete(dev) netif_rx_complete(dev)
+#endif
 
-       if (unlikely(--Q->credits < Q->entries_n - SGE_FREEL_REFILL_THRESH))
-               refill_free_list(sge, Q);
-       return 1;
+/*
+ * We cannot use the standard netif_rx_schedule_prep() because we have multiple
+ * ports plus the TOE all multiplexing onto a single response queue, therefore
+ * accepting new responses cannot depend on the state of any particular port.
+ * So define our own equivalent that omits the netif_running() test.
+ */
+static inline int napi_schedule_prep(struct net_device *dev)
+{
+       return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
 }
 
 
-/*
- * Adaptive interrupt timer logic to keep the CPU utilization to
- * manageable levels. Basically, as the Average Packet Size (APS)
- * gets higher, the interrupt latency setting gets longer. Every
- * SGE_INTR_BUCKETSIZE (of 100B) causes a bump of 2usec to the
- * base value of SGE_INTRTIMER0. At large values of payload the
- * latency hits the ceiling value of SGE_INTRTIMER1 stored at
- * index SGE_INTR_MAXBUCKETS-1 in sge->intrtimer[].
+/**
+ *     sge_rx - process an ingress ethernet packet
+ *     @sge: the sge structure
+ *     @fl: the free list that contains the packet buffer
+ *     @len: the packet length
  *
- * sge->currIndex caches the last index to save unneeded PIOs.
+ *     Process an ingress ethernet pakcet and deliver it to the stack.
  */
-static inline void update_intr_timer(struct sge *sge, unsigned int avg_payload)
+static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
 {
-       unsigned int newIndex;
+       struct sk_buff *skb;
+       struct cpl_rx_pkt *p;
+       struct adapter *adapter = sge->adapter;
 
-       newIndex = avg_payload / SGE_INTR_BUCKETSIZE;
-       if (newIndex > SGE_INTR_MAXBUCKETS - 1) {
-               newIndex = SGE_INTR_MAXBUCKETS - 1;
-       }
-       /* Save a PIO with this check....maybe */
-       if (newIndex != sge->currIndex) {
-               t1_write_reg_4(sge->adapter, A_SG_INTRTIMER,
-                              sge->intrtimer[newIndex]);
-               sge->currIndex = newIndex;
-               sge->adapter->params.sge.last_rx_coalesce_raw =
-                       sge->intrtimer[newIndex];
+       sge->stats.ethernet_pkts++;
+       skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
+                        sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
+                        SGE_RX_DROP_THRES);
+       if (!skb) {
+               sge->port_stats[0].rx_drops++; /* charge only port 0 for now */
+               return 0;
        }
+
+       p = (struct cpl_rx_pkt *)skb->data;
+       skb_pull(skb, sizeof(*p));
+       skb->dev = adapter->port[p->iff].dev;
+       skb->dev->last_rx = jiffies;
+       skb->protocol = eth_type_trans(skb, skb->dev);
+       if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
+           skb->protocol == htons(ETH_P_IP) &&
+           (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
+               sge->port_stats[p->iff].rx_cso_good++;
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       } else
+               skb->ip_summed = CHECKSUM_NONE;
+
+       if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
+               sge->port_stats[p->iff].vlan_xtract++;
+               if (adapter->params.sge.polling)
+                       vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
+                                                ntohs(p->vlan));
+               else
+                       vlan_hwaccel_rx(skb, adapter->vlan_grp,
+                                       ntohs(p->vlan));
+       } else if (adapter->params.sge.polling)
+               netif_receive_skb(skb);
+       else
+               netif_rx(skb);
+       return 0;
 }
 
 /*
- * Returns true if command queue q_num has enough available descriptors that
+ * Returns true if a command queue has enough available descriptors that
  * we can resume Tx operation after temporarily disabling its packet queue.
  */
-static inline int enough_free_Tx_descs(struct sge *sge, int q_num)
+static inline int enough_free_Tx_descs(const struct cmdQ *q)
 {
-       return atomic_read(&sge->cmdQ[q_num].credits) >
-               (sge->cmdQ[q_num].entries_n >> 2);
+       unsigned int r = q->processed - q->cleaned;
+
+       return q->in_use - r < (q->size >> 1);
 }
 
 /*
- * Main interrupt handler, optimized assuming that we took a 'DATA'
- * interrupt.
- *
- * 1. Clear the interrupt
- * 2. Loop while we find valid descriptors and process them; accumulate
- *     information that can be processed after the loop
- * 3. Tell the SGE at which index we stopped processing descriptors
- * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
- *     outstanding TX buffers waiting, replenish RX buffers, potentially
- *     reenable upper layers if they were turned off due to lack of TX
- *     resources which are available again.
- * 5. If we took an interrupt, but no valid respQ descriptors was found we
- *     let the slow_intr_handler run and do error handling.
+ * Called when sufficient space has become available in the SGE command queues
+ * after the Tx packet schedulers have been suspended to restart the Tx path.
  */
-irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs)
+static void restart_tx_queues(struct sge *sge)
 {
-       struct net_device *netdev;
-       struct adapter *adapter = cookie;
-       struct sge *sge = adapter->sge;
-       struct respQ *Q = &sge->respQ;
-       unsigned int credits = Q->credits, flags = 0, ret = 0;
-       unsigned int tot_rxpayload = 0, tot_txpayload = 0, n_rx = 0, n_tx = 0;
-       unsigned int credits_pend[SGE_CMDQ_N] = { 0, 0 };
+       struct adapter *adap = sge->adapter;
 
-       struct respQ_e *e = &Q->entries[Q->cidx];
-       prefetch(e);
+       if (enough_free_Tx_descs(&sge->cmdQ[0])) {
+               int i;
 
-       t1_write_reg_4(adapter, A_PL_CAUSE, F_PL_INTR_SGE_DATA);
+               for_each_port(adap, i) {
+                       struct net_device *nd = adap->port[i].dev;
 
+                       if (test_and_clear_bit(nd->if_port,
+                                              &sge->stopped_tx_queues) &&
+                           netif_running(nd)) {
+                               sge->stats.cmdQ_restarted[2]++;
+                               netif_wake_queue(nd);
+                       }
+               }
+       }
+}
 
-       while (e->GenerationBit == Q->genbit) {
-               if (--credits < SGE_RESPQ_REPLENISH_THRES) {
-                       u32 n = Q->entries_n - credits - 1;
+/*
+ * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 
+ * information.
+ */
+static unsigned int update_tx_info(struct adapter *adapter, 
+                                         unsigned int flags, 
+                                         unsigned int pr0)
+{
+       struct sge *sge = adapter->sge;
+       struct cmdQ *cmdq = &sge->cmdQ[0];
 
-                       t1_write_reg_4(adapter, A_SG_RSPQUEUECREDIT, n);
-                       credits += n;
-               }
-               if (likely(e->DataValid)) {
-                       if (!e->Sop || !e->Eop)
-                               BUG();
-                       t1_sge_rx(sge, &sge->freelQ[e->FreelistQid],
-                                 e->BufferLength, e->Offload);
-                       tot_rxpayload += e->BufferLength;
-                       ++n_rx;
+       cmdq->processed += pr0;
+
+       if (flags & F_CMDQ0_ENABLE) {
+               clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+       
+               if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
+                   !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
+                       set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+                       writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
                }
-               flags |= e->Qsleeping;
-               credits_pend[0] += e->Cmdq0CreditReturn;
-               credits_pend[1] += e->Cmdq1CreditReturn;
+               flags &= ~F_CMDQ0_ENABLE;
+       }
+       
+       if (unlikely(sge->stopped_tx_queues != 0))
+               restart_tx_queues(sge);
 
-#ifdef CONFIG_SMP
-               /*
-                * If enough cmdQ0 buffers have finished DMAing free them so
-                * anyone that may be waiting for their release can continue.
-                * We do this only on MP systems to allow other CPUs to proceed
-                * promptly.  UP systems can wait for the free_cmdQ_buffers()
-                * calls after this loop as the sole CPU is currently busy in
-                * this loop.
+       return flags;
+}
+
+/*
+ * Process SGE responses, up to the supplied budget.  Returns the number of
+ * responses processed.  A negative budget is effectively unlimited.
+ */
+static int process_responses(struct adapter *adapter, int budget)
+{
+       struct sge *sge = adapter->sge;
+       struct respQ *q = &sge->respQ;
+       struct respQ_e *e = &q->entries[q->cidx];
+       int budget_left = budget;
+       unsigned int flags = 0;
+       unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
+       
+
+       while (likely(budget_left && e->GenerationBit == q->genbit)) {
+               flags |= e->Qsleeping;
+               
+               cmdq_processed[0] += e->Cmdq0CreditReturn;
+               cmdq_processed[1] += e->Cmdq1CreditReturn;
+               
+               /* We batch updates to the TX side to avoid cacheline
+                * ping-pong of TX state information on MP where the sender
+                * might run on a different CPU than this function...
                 */
-               if (unlikely(credits_pend[0] > SGE_FREEL_REFILL_THRESH)) {
-                       free_cmdQ_buffers(sge, &sge->cmdQ[0], credits_pend[0],
-                                         &tot_txpayload);
-                       n_tx += credits_pend[0];
-                       credits_pend[0] = 0;
+               if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {
+                       flags = update_tx_info(adapter, flags, cmdq_processed[0]);
+                       cmdq_processed[0] = 0;
                }
-#endif
-               ret++;
+               if (unlikely(cmdq_processed[1] > 16)) {
+                       sge->cmdQ[1].processed += cmdq_processed[1];
+                       cmdq_processed[1] = 0;
+               }
+               if (likely(e->DataValid)) {
+                       struct freelQ *fl = &sge->freelQ[e->FreelistQid];
+
+                       if (unlikely(!e->Sop || !e->Eop))
+                               BUG();
+                       if (unlikely(e->Offload))
+                               unexpected_offload(adapter, fl);
+                       else
+                               sge_rx(sge, fl, e->BufferLength);
+
+                       /*
+                        * Note: this depends on each packet consuming a
+                        * single free-list buffer; cf. the BUG above.
+                        */
+                       if (++fl->cidx == fl->size)
+                               fl->cidx = 0;
+                       if (unlikely(--fl->credits <
+                                    fl->size - SGE_FREEL_REFILL_THRESH))
+                               refill_free_list(sge, fl);
+               } else
+                       sge->stats.pure_rsps++;
+
                e++;
-               if (unlikely(++Q->cidx == Q->entries_n)) {
-                       Q->cidx = 0;
-                       Q->genbit ^= 1;
-                       e = Q->entries;
+               if (unlikely(++q->cidx == q->size)) {
+                       q->cidx = 0;
+                       q->genbit ^= 1;
+                       e = q->entries;
                }
+               prefetch(e);
+
+               if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+                       writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+                       q->credits = 0;
+               }
+               --budget_left;
        }
 
-       Q->credits = credits;
-       t1_write_reg_4(adapter, A_SG_SLEEPING, Q->cidx);
+       flags = update_tx_info(adapter, flags, cmdq_processed[0]); 
+       sge->cmdQ[1].processed += cmdq_processed[1];
 
-       if (credits_pend[0])
-               free_cmdQ_buffers(sge, &sge->cmdQ[0], credits_pend[0], &tot_txpayload);
-       if (credits_pend[1])
-               free_cmdQ_buffers(sge, &sge->cmdQ[1], credits_pend[1], &tot_txpayload);
+       budget -= budget_left;
+       return budget;
+}
 
-       /* Do any coalescing and interrupt latency timer adjustments */
-       if (adapter->params.sge.coalesce_enable) {
-               unsigned int    avg_txpayload = 0, avg_rxpayload = 0;
+/*
+ * A simpler version of process_responses() that handles only pure (i.e.,
+ * non data-carrying) responses.  Such respones are too light-weight to justify
+ * calling a softirq when using NAPI, so we handle them specially in hard
+ * interrupt context.  The function is called with a pointer to a response,
+ * which the caller must ensure is a valid pure response.  Returns 1 if it
+ * encounters a valid data-carrying response, 0 otherwise.
+ */
+static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
+{
+       struct sge *sge = adapter->sge;
+       struct respQ *q = &sge->respQ;
+       unsigned int flags = 0;
+       unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
 
-               n_tx += credits_pend[0] + credits_pend[1];
+       do {
+               flags |= e->Qsleeping;
 
-               /*
-                * Choose larger avg. payload size to increase
-                * throughput and reduce [CPU util., intr/s.]
-                *
-                * Throughput behavior favored in mixed-mode.
-                */
-               if (n_tx)
-                       avg_txpayload = tot_txpayload/n_tx;
-               if (n_rx)
-                       avg_rxpayload = tot_rxpayload/n_rx;
-
-               if (n_tx && avg_txpayload > avg_rxpayload){
-                       update_intr_timer(sge, avg_txpayload);
-               } else if (n_rx) {
-                       update_intr_timer(sge, avg_rxpayload);
+               cmdq_processed[0] += e->Cmdq0CreditReturn;
+               cmdq_processed[1] += e->Cmdq1CreditReturn;
+               
+               e++;
+               if (unlikely(++q->cidx == q->size)) {
+                       q->cidx = 0;
+                       q->genbit ^= 1;
+                       e = q->entries;
                }
-       }
-
-       if (flags & F_CMDQ0_ENABLE) {
-               struct cmdQ *cmdQ = &sge->cmdQ[0];
+               prefetch(e);
 
-               atomic_set(&cmdQ->asleep, 1);
-               if (atomic_read(&cmdQ->pio_pidx) != cmdQ->pidx) {
-                       doorbell_pio(sge, F_CMDQ0_ENABLE);
-                       atomic_set(&cmdQ->pio_pidx, cmdQ->pidx);
+               if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+                       writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+                       q->credits = 0;
                }
-       }
-       if (unlikely(flags & (F_FL0_ENABLE | F_FL1_ENABLE)))
-               freelQs_empty(sge);
+               sge->stats.pure_rsps++;
+       } while (e->GenerationBit == q->genbit && !e->DataValid);
 
-       netdev = adapter->port[0].dev;
-       if (unlikely(netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
-                    enough_free_Tx_descs(sge, 0) &&
-                    enough_free_Tx_descs(sge, 1))) {
-               netif_wake_queue(netdev);
-       }
-       if (unlikely(!ret))
-               ret = t1_slow_intr_handler(adapter);
+       flags = update_tx_info(adapter, flags, cmdq_processed[0]); 
+       sge->cmdQ[1].processed += cmdq_processed[1];
 
-       return IRQ_RETVAL(ret != 0);
+       return e->GenerationBit == q->genbit;
 }
 
 /*
- * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
- *
- * The code figures out how many entries the sk_buff will require in the
- * cmdQ and updates the cmdQ data structure with the state once the enqueue
- * has complete. Then, it doesn't access the global structure anymore, but
- * uses the corresponding fields on the stack. In conjuction with a spinlock
- * around that code, we can make the function reentrant without holding the
- * lock when we actually enqueue (which might be expensive, especially on
- * architectures with IO MMUs).
+ * Handler for new data events when using NAPI.  This does not need any locking
+ * or protection from interrupts as data interrupts are off at this point and
+ * other adapter interrupts do not interfere.
  */
-static unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
-                      unsigned int qid)
+static int t1_poll(struct net_device *dev, int *budget)
 {
-       struct sge *sge = adapter->sge;
-       struct cmdQ *Q = &sge->cmdQ[qid];
-       struct cmdQ_e *e;
-       struct cmdQ_ce *ce;
-       dma_addr_t mapping;
-       unsigned int credits, pidx, genbit;
+       struct adapter *adapter = dev->priv;
+       int effective_budget = min(*budget, dev->quota);
 
-       unsigned int count = 1 + skb_shinfo(skb)->nr_frags;
+       int work_done = process_responses(adapter, effective_budget);
+       *budget -= work_done;
+       dev->quota -= work_done;
+
+       if (work_done >= effective_budget)
+               return 1;
+
+       __netif_rx_complete(dev);
 
        /*
-        * Coming from the timer
+        * Because we don't atomically flush the following write it is
+        * possible that in very rare cases it can reach the device in a way
+        * that races with a new response being written plus an error interrupt
+        * causing the NAPI interrupt handler below to return unhandled status
+        * to the OS.  To protect against this would require flushing the write
+        * and doing both the write and the flush with interrupts off.  Way too
+        * expensive and unjustifiable given the rarity of the race.
         */
-       if ((skb == sge->pskb)) {
-               /*
-                * Quit if any cmdQ activities
-                */
-               if (!spin_trylock(&Q->Qlock))
-                       return 0;
-               if (atomic_read(&Q->credits) != Q->entries_n) {
-                       spin_unlock(&Q->Qlock);
-                       return 0;
-               }
-       }
-       else
-               spin_lock(&Q->Qlock);
-
-       genbit = Q->genbit;
-       pidx = Q->pidx;
-       credits = atomic_read(&Q->credits);
-
-       credits -= count;
-       atomic_sub(count, &Q->credits);
-       Q->pidx += count;
-       if (Q->pidx >= Q->entries_n) {
-               Q->pidx -= Q->entries_n;
-               Q->genbit ^= 1;
-       }
+       writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
+       return 0;
+}
 
-       if (unlikely(credits < (MAX_SKB_FRAGS + 1))) {
-               sge->intr_cnt.cmdQ_full[qid]++;
-               netif_stop_queue(adapter->port[0].dev);
-       }
-       spin_unlock(&Q->Qlock);
+/*
+ * Returns true if the device is already scheduled for polling.
+ */
+static inline int napi_is_scheduled(struct net_device *dev)
+{
+       return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
+}
 
-       mapping = pci_map_single(adapter->pdev, skb->data,
-                                skb->len - skb->data_len, PCI_DMA_TODEVICE);
-       ce = &Q->centries[pidx];
-       ce->skb = NULL;
-       pci_unmap_addr_set(ce, dma_addr, mapping);
-       pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
-       ce->single = 1;
+/*
+ * NAPI version of the main interrupt handler.
+ */
+static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs)
+{
+       int handled;
+       struct adapter *adapter = data;
+       struct sge *sge = adapter->sge;
+       struct respQ *q = &adapter->sge->respQ;
 
-       e = &Q->entries[pidx];
-       e->Sop =  1;
-       e->DataValid = 1;
-       e->BufferLength = skb->len - skb->data_len;
-       e->AddrHigh = (u64)mapping >> 32;
-       e->AddrLow = (u32)mapping;
+       /*
+        * Clear the SGE_DATA interrupt first thing.  Normally the NAPI
+        * handler has control of the response queue and the interrupt handler
+        * can look at the queue reliably only once it knows NAPI is off.
+        * We can't wait that long to clear the SGE_DATA interrupt because we
+        * could race with t1_poll rearming the SGE interrupt, so we need to
+        * clear the interrupt speculatively and really early on.
+        */
+       writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
+
+       spin_lock(&adapter->async_lock);
+       if (!napi_is_scheduled(sge->netdev)) {
+               struct respQ_e *e = &q->entries[q->cidx];
+
+               if (e->GenerationBit == q->genbit) {
+                       if (e->DataValid ||
+                           process_pure_responses(adapter, e)) {
+                               if (likely(napi_schedule_prep(sge->netdev)))
+                                       __netif_rx_schedule(sge->netdev);
+                               else
+                                       printk(KERN_CRIT
+                                              "NAPI schedule failure!\n");
+                       } else
+                       writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+                       handled = 1;
+                       goto unlock;
+               } else
+               writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+       }  else
+       if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
+               printk(KERN_ERR "data interrupt while NAPI running\n");
+       
+       handled = t1_slow_intr_handler(adapter);
+       if (!handled)
+               sge->stats.unhandled_irqs++;
+ unlock:
+       spin_unlock(&adapter->async_lock);
+       return IRQ_RETVAL(handled != 0);
+}
 
-       if (--count > 0) {
-               unsigned int i;
+/*
+ * Main interrupt handler, optimized assuming that we took a 'DATA'
+ * interrupt.
+ *
+ * 1. Clear the interrupt
+ * 2. Loop while we find valid descriptors and process them; accumulate
+ *      information that can be processed after the loop
+ * 3. Tell the SGE at which index we stopped processing descriptors
+ * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
+ *      outstanding TX buffers waiting, replenish RX buffers, potentially
+ *      reenable upper layers if they were turned off due to lack of TX
+ *      resources which are available again.
+ * 5. If we took an interrupt, but no valid respQ descriptors was found we
+ *      let the slow_intr_handler run and do error handling.
+ */
+static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs)
+{
+       int work_done;
+       struct respQ_e *e;
+       struct adapter *adapter = cookie;
+       struct respQ *Q = &adapter->sge->respQ;
 
-               e->Eop = 0;
-               wmb();
-               e->GenerationBit = e->GenerationBit2 = genbit;
+       spin_lock(&adapter->async_lock);
+       e = &Q->entries[Q->cidx];
+       prefetch(e);
 
-               for (i = 0; i < count; i++) {
-                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+       writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
 
-                       ce++; e++;
-                       if (++pidx == Q->entries_n) {
-                               pidx = 0;
-                               genbit ^= 1;
-                               ce = Q->centries;
-                               e = Q->entries;
-                       }
+       if (likely(e->GenerationBit == Q->genbit))
+               work_done = process_responses(adapter, -1);
+       else
+               work_done = t1_slow_intr_handler(adapter);
 
-                       mapping = pci_map_page(adapter->pdev, frag->page,
-                                              frag->page_offset,
-                                              frag->size,
-                                              PCI_DMA_TODEVICE);
-                       ce->skb = NULL;
-                       pci_unmap_addr_set(ce, dma_addr, mapping);
-                       pci_unmap_len_set(ce, dma_len, frag->size);
-                       ce->single = 0;
-
-                       e->Sop = 0;
-                       e->DataValid = 1;
-                       e->BufferLength = frag->size;
-                       e->AddrHigh = (u64)mapping >> 32;
-                       e->AddrLow = (u32)mapping;
-
-                       if (i < count - 1) {
-                               e->Eop = 0;
-                               wmb();
-                               e->GenerationBit = e->GenerationBit2 = genbit;
-                       }
+       /*
+        * The unconditional clearing of the PL_CAUSE above may have raced
+        * with DMA completion and the corresponding generation of a response
+        * to cause us to miss the resulting data interrupt.  The next write
+        * is also unconditional to recover the missed interrupt and render
+        * this race harmless.
+        */
+       writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
+
+       if (!work_done)
+               adapter->sge->stats.unhandled_irqs++;
+       spin_unlock(&adapter->async_lock);
+       return IRQ_RETVAL(work_done != 0);
+}
+
+intr_handler_t t1_select_intr_handler(adapter_t *adapter)
+{
+       return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
+}
+
+/*
+ * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
+ *
+ * The code figures out how many entries the sk_buff will require in the
+ * cmdQ and updates the cmdQ data structure with the state once the enqueue
+ * has complete. Then, it doesn't access the global structure anymore, but
+ * uses the corresponding fields on the stack. In conjuction with a spinlock
+ * around that code, we can make the function reentrant without holding the
+ * lock when we actually enqueue (which might be expensive, especially on
+ * architectures with IO MMUs).
+ *
+ * This runs with softirqs disabled.
+ */
+static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
+                    unsigned int qid, struct net_device *dev)
+{
+       struct sge *sge = adapter->sge;
+       struct cmdQ *q = &sge->cmdQ[qid];
+       unsigned int credits, pidx, genbit, count;
+
+       spin_lock(&q->lock);
+       reclaim_completed_tx(sge, q);
+
+       pidx = q->pidx;
+       credits = q->size - q->in_use;
+       count = 1 + skb_shinfo(skb)->nr_frags;
+
+       {       /* Ethernet packet */
+               if (unlikely(credits < count)) {
+                       netif_stop_queue(dev);
+                       set_bit(dev->if_port, &sge->stopped_tx_queues);
+                       sge->stats.cmdQ_full[2]++;
+                       spin_unlock(&q->lock);
+                       if (!netif_queue_stopped(dev))
+                               CH_ERR("%s: Tx ring full while queue awake!\n",
+                                      adapter->name);
+                       return NETDEV_TX_BUSY;
+               }
+               if (unlikely(credits - count < q->stop_thres)) {
+                       sge->stats.cmdQ_full[2]++;
+                       netif_stop_queue(dev);
+                       set_bit(dev->if_port, &sge->stopped_tx_queues);
                }
        }
+       q->in_use += count;
+       genbit = q->genbit;
+       q->pidx += count;
+       if (q->pidx >= q->size) {
+               q->pidx -= q->size;
+               q->genbit ^= 1;
+       }
+       spin_unlock(&q->lock);
 
-       if (skb != sge->pskb)
-               ce->skb = skb;
-       e->Eop = 1;
-       wmb();
-       e->GenerationBit = e->GenerationBit2 = genbit;
+       write_tx_descs(adapter, skb, pidx, genbit, q);
 
        /*
         * We always ring the doorbell for cmdQ1.  For cmdQ0, we only ring
@@ -1317,50 +1381,50 @@ static unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
         * then the interrupt handler will detect the outstanding TX packet
         * and ring the doorbell for us.
         */
-       if (qid) {
-               doorbell_pio(sge, F_CMDQ1_ENABLE);
-       } else if (atomic_read(&Q->asleep)) {
-               atomic_set(&Q->asleep, 0);
-               doorbell_pio(sge, F_CMDQ0_ENABLE);
-               atomic_set(&Q->pio_pidx, Q->pidx);
+       if (qid)
+               doorbell_pio(adapter, F_CMDQ1_ENABLE);
+       else {
+               clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+               if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+                       set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+                       writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+               }
        }
-       return 0;
+       return NETDEV_TX_OK;
 }
 
 #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
 
+/*
+ *     eth_hdr_len - return the length of an Ethernet header
+ *     @data: pointer to the start of the Ethernet header
+ *
+ *     Returns the length of an Ethernet header, including optional VLAN tag.
+ */
+static inline int eth_hdr_len(const void *data)
+{
+       const struct ethhdr *e = data;
+
+       return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
+}
+
 /*
  * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
  */
 int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct adapter *adapter = dev->priv;
+       struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];
+       struct sge *sge = adapter->sge;
        struct cpl_tx_pkt *cpl;
-       struct ethhdr *eth;
-       size_t max_len;
-
-       /*
-        * We are using a non-standard hard_header_len and some kernel
-        * components, such as pktgen, do not handle it right.  Complain
-        * when this happens but try to fix things up.
-        */
-       if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
-               struct sk_buff *orig_skb = skb;
-
-               if (net_ratelimit())
-                       printk(KERN_ERR
-                              "%s: Tx packet has inadequate headroom\n",
-                              dev->name);
-               skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
-               dev_kfree_skb_any(orig_skb);
-               if (!skb)
-                       return -ENOMEM;
-       }
 
+#ifdef NETIF_F_TSO
        if (skb_shinfo(skb)->tso_size) {
                int eth_type;
                struct cpl_tx_pkt_lso *hdr;
 
+               st->tso++;
+
                eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
                        CPL_ETH_II : CPL_ETH_II_VLAN;
 
@@ -1373,40 +1437,72 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                                skb_shinfo(skb)->tso_size));
                hdr->len = htonl(skb->len - sizeof(*hdr));
                cpl = (struct cpl_tx_pkt *)hdr;
+               sge->stats.tx_lso_pkts++;
        } else
+#endif
        {
                /*
-                * An Ethernet packet must have at least space for
-                * the DIX Ethernet header and be no greater than
-                * the device set MTU. Otherwise trash the packet.
+                * Packets shorter than ETH_HLEN can break the MAC, drop them
+                * early.  Also, we may get oversized packets because some
+                * parts of the kernel don't handle our unusual hard_header_len
+                * right, drop those too.
                 */
-               if (skb->len < ETH_HLEN)
-                       goto t1_start_xmit_fail2;
-               eth = (struct ethhdr *)skb->data;
-               if (eth->h_proto == htons(ETH_P_8021Q))
-                       max_len = dev->mtu + VLAN_ETH_HLEN;
-               else
-                       max_len = dev->mtu + ETH_HLEN;
-               if (skb->len > max_len)
-                       goto t1_start_xmit_fail2;
+               if (unlikely(skb->len < ETH_HLEN ||
+                            skb->len > dev->mtu + eth_hdr_len(skb->data))) {
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
+
+               /*
+                * We are using a non-standard hard_header_len and some kernel
+                * components, such as pktgen, do not handle it right.
+                * Complain when this happens but try to fix things up.
+                */
+               if (unlikely(skb_headroom(skb) <
+                            dev->hard_header_len - ETH_HLEN)) {
+                       struct sk_buff *orig_skb = skb;
+
+                       if (net_ratelimit())
+                               printk(KERN_ERR "%s: inadequate headroom in "
+                                      "Tx packet\n", dev->name);
+                       skb = skb_realloc_headroom(skb, sizeof(*cpl));
+                       dev_kfree_skb_any(orig_skb);
+                       if (!skb)
+                               return NETDEV_TX_OK;
+               }
 
                if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
                    skb->ip_summed == CHECKSUM_HW &&
-                   skb->nh.iph->protocol == IPPROTO_UDP &&
-                   skb_checksum_help(skb, 0))
-                       goto t1_start_xmit_fail3;
-
+                   skb->nh.iph->protocol == IPPROTO_UDP)
+                       if (unlikely(skb_checksum_help(skb, 0))) {
+                               dev_kfree_skb_any(skb);
+                               return NETDEV_TX_OK;
+                       }
 
-               if (!adapter->sge->pskb) {
+               /* Hmmm, assuming to catch the gratious arp... and we'll use
+                * it to flush out stuck espi packets...
+                 */
+               if (unlikely(!adapter->sge->espibug_skb)) {
                        if (skb->protocol == htons(ETH_P_ARP) &&
-                           skb->nh.arph->ar_op == htons(ARPOP_REQUEST))
-                               adapter->sge->pskb = skb;
+                           skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
+                               adapter->sge->espibug_skb = skb;
+                               /* We want to re-use this skb later. We
+                                * simply bump the reference count and it
+                                * will not be freed...
+                                */
+                               skb = skb_get(skb);
+                       }
                }
-               cpl = (struct cpl_tx_pkt *)skb_push(skb, sizeof(*cpl));
+
+               cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
                cpl->opcode = CPL_TX_PKT;
                cpl->ip_csum_dis = 1;    /* SW calculates IP csum */
                cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1;
                /* the length field isn't used so don't bother setting it */
+
+               st->tx_cso += (skb->ip_summed == CHECKSUM_HW);
+               sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW);
+               sge->stats.tx_reg_pkts++;
        }
        cpl->iff = dev->if_port;
 
@@ -1414,38 +1510,176 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
                cpl->vlan_valid = 1;
                cpl->vlan = htons(vlan_tx_tag_get(skb));
+               st->vlan_insert++;
        } else
 #endif
                cpl->vlan_valid = 0;
 
        dev->trans_start = jiffies;
-       return t1_sge_tx(skb, adapter, 0);
+       return t1_sge_tx(skb, adapter, 0, dev);
+}
 
-t1_start_xmit_fail3:
-       printk(KERN_INFO "%s: Unable to complete checksum\n", dev->name);
-       goto t1_start_xmit_fail1;
+/*
+ * Callback for the Tx buffer reclaim timer.  Runs with softirqs disabled.
+ */
+static void sge_tx_reclaim_cb(unsigned long data)
+{
+       int i;
+       struct sge *sge = (struct sge *)data;
+
+       for (i = 0; i < SGE_CMDQ_N; ++i) {
+               struct cmdQ *q = &sge->cmdQ[i];
+
+               if (!spin_trylock(&q->lock))
+                       continue;
 
-t1_start_xmit_fail2:
-       printk(KERN_INFO "%s: Invalid packet length %d, dropping\n",
-                       dev->name, skb->len);
+               reclaim_completed_tx(sge, q);
+               if (i == 0 && q->in_use)   /* flush pending credits */
+                       writel(F_CMDQ0_ENABLE,
+                               sge->adapter->regs + A_SG_DOORBELL);
 
-t1_start_xmit_fail1:
-       dev_kfree_skb_any(skb);
+               spin_unlock(&q->lock);
+       }
+       mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+}
+
+/*
+ * Propagate changes of the SGE coalescing parameters to the HW.
+ */
+int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
+{
+       sge->netdev->poll = t1_poll;
+       sge->fixed_intrtimer = p->rx_coalesce_usecs *
+               core_ticks_per_usec(sge->adapter);
+       writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
        return 0;
 }
 
-void t1_sge_set_ptimeout(adapter_t *adapter, u32 val)
+/*
+ * Allocates both RX and TX resources and configures the SGE. However,
+ * the hardware is not enabled yet.
+ */
+int t1_sge_configure(struct sge *sge, struct sge_params *p)
 {
-       struct sge *sge = adapter->sge;
+       if (alloc_rx_resources(sge, p))
+               return -ENOMEM;
+       if (alloc_tx_resources(sge, p)) {
+               free_rx_resources(sge);
+               return -ENOMEM;
+       }
+       configure_sge(sge, p);
+
+       /*
+        * Now that we have sized the free lists calculate the payload
+        * capacity of the large buffers.  Other parts of the driver use
+        * this to set the max offload coalescing size so that RX packets
+        * do not overflow our large buffers.
+        */
+       p->large_buf_capacity = jumbo_payload_capacity(sge);
+       return 0;
+}
 
-       if (is_T2(adapter))
-               sge->ptimeout = max((u32)((HZ * val) / 1000), (u32)1);
+/*
+ * Disables the DMA engine.
+ */
+void t1_sge_stop(struct sge *sge)
+{
+       writel(0, sge->adapter->regs + A_SG_CONTROL);
+       (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+       if (is_T2(sge->adapter))
+               del_timer_sync(&sge->espibug_timer);
+       del_timer_sync(&sge->tx_reclaim_timer);
 }
 
-u32 t1_sge_get_ptimeout(adapter_t *adapter)
+/*
+ * Enables the DMA engine.
+ */
+void t1_sge_start(struct sge *sge)
 {
+       refill_free_list(sge, &sge->freelQ[0]);
+       refill_free_list(sge, &sge->freelQ[1]);
+
+       writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
+       doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
+       (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+
+       mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+       if (is_T2(sge->adapter)) 
+               mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+/*
+ * Callback for the T2 ESPI 'stuck packet feature' workaorund
+ */
+static void espibug_workaround(void *data)
+{
+       struct adapter *adapter = (struct adapter *)data;
        struct sge *sge = adapter->sge;
 
-       return (is_T2(adapter) ? ((sge->ptimeout * 1000) / HZ) : 0);
+       if (netif_running(adapter->port[0].dev)) {
+               struct sk_buff *skb = sge->espibug_skb;
+
+               u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
+
+               if ((seop & 0xfff0fff) == 0xfff && skb) {
+                       if (!skb->cb[0]) {
+                               u8 ch_mac_addr[ETH_ALEN] =
+                                   {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
+                               memcpy(skb->data + sizeof(struct cpl_tx_pkt),
+                                   ch_mac_addr, ETH_ALEN);
+                               memcpy(skb->data + skb->len - 10, ch_mac_addr,
+                                   ETH_ALEN);
+                               skb->cb[0] = 0xff;
+                       }
+
+                       /* bump the reference count to avoid freeing of the
+                        * skb once the DMA has completed.
+                        */
+                       skb = skb_get(skb);
+                       t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
+               }
+       }
+       mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
 }
 
+/*
+ * Creates a t1_sge structure and returns suggested resource parameters.
+ */
+struct sge * __devinit t1_sge_create(struct adapter *adapter,
+                                    struct sge_params *p)
+{
+       struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
+
+       if (!sge)
+               return NULL;
+       memset(sge, 0, sizeof(*sge));
+
+       sge->adapter = adapter;
+       sge->netdev = adapter->port[0].dev;
+       sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
+       sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+       init_timer(&sge->tx_reclaim_timer);
+       sge->tx_reclaim_timer.data = (unsigned long)sge;
+       sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
+
+       if (is_T2(sge->adapter)) {
+               init_timer(&sge->espibug_timer);
+               sge->espibug_timer.function = (void *)&espibug_workaround;
+               sge->espibug_timer.data = (unsigned long)sge->adapter;
+               sge->espibug_timeout = 1;
+       }
+        
+
+       p->cmdQ_size[0] = SGE_CMDQ0_E_N;
+       p->cmdQ_size[1] = SGE_CMDQ1_E_N;
+       p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
+       p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
+       p->rx_coalesce_usecs =  50;
+       p->coalesce_enable = 0;
+       p->sample_interval_usecs = 0;
+       p->polling = 0;
+
+       return sge;
+}