}
 
 static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
+                                   const int nfrags,
                                    struct sk_buff *skb,
                                    const dma_addr_t *dmas)
 {
        int f;
-       int nfrags = skb_shinfo(skb)->nr_frags;
        struct pci_dev *pdev = mac->dma_pdev;
 
        pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE);
        unsigned int i, j;
        struct pasemi_mac_buffer *info;
        dma_addr_t dmas[MAX_SKB_FRAGS+1];
-       int freed;
+       int freed, nfrags;
        int start, limit;
 
        start = txring->next_to_clean;
        for (i = start; i < limit; i += freed) {
                info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)];
                if (info->dma && info->skb) {
-                       for (j = 0; j <= skb_shinfo(info->skb)->nr_frags; j++)
+                       nfrags = skb_shinfo(info->skb)->nr_frags;
+                       for (j = 0; j <= nfrags; j++)
                                dmas[j] = txring->ring_info[(i+1+j) &
                                                (TX_RING_SIZE-1)].dma;
-                       freed = pasemi_mac_unmap_tx_skb(mac, info->skb, dmas);
+                       freed = pasemi_mac_unmap_tx_skb(mac, nfrags,
+                                                       info->skb, dmas);
                } else
                        freed = 2;
        }
        unsigned long flags;
        struct sk_buff *skbs[TX_CLEAN_BATCHSIZE];
        dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1];
+       int nf[TX_CLEAN_BATCHSIZE];
+       int nr_frags;
 
        total_count = 0;
        batch_limit = TX_CLEAN_BATCHSIZE;
        start = txring->next_to_clean;
        ring_limit = txring->next_to_fill;
 
+       prefetch(&TX_DESC_INFO(txring, start+1).skb);
+
        /* Compensate for when fill has wrapped but clean has not */
        if (start > ring_limit)
                ring_limit += TX_RING_SIZE;
                u64 mactx = TX_DESC(txring, i);
                struct sk_buff *skb;
 
+               skb = TX_DESC_INFO(txring, i+1).skb;
+               nr_frags = TX_DESC_INFO(txring, i).dma;
+
                if ((mactx  & XCT_MACTX_E) ||
                    (*chan->status & PAS_STATUS_ERROR))
                        pasemi_mac_tx_error(mac, mactx);
                        /* Not yet transmitted */
                        break;
 
-               skb = TX_DESC_INFO(txring, i+1).skb;
-               skbs[descr_count] = skb;
+               buf_count = 2 + nr_frags;
+               /* Since we always fill with an even number of entries, make
+                * sure we skip any unused one at the end as well.
+                */
+               if (buf_count & 1)
+                       buf_count++;
 
-               buf_count = 2 + skb_shinfo(skb)->nr_frags;
-               for (j = 0; j <= skb_shinfo(skb)->nr_frags; j++)
+               for (j = 0; j <= nr_frags; j++)
                        dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma;
 
+               skbs[descr_count] = skb;
+               nf[descr_count] = nr_frags;
+
                TX_DESC(txring, i) = 0;
                TX_DESC(txring, i+1) = 0;
 
-               /* Since we always fill with an even number of entries, make
-                * sure we skip any unused one at the end as well.
-                */
-               if (buf_count & 1)
-                       buf_count++;
                descr_count++;
        }
        txring->next_to_clean = i & (TX_RING_SIZE-1);
        netif_wake_queue(mac->netdev);
 
        for (i = 0; i < descr_count; i++)
-               pasemi_mac_unmap_tx_skb(mac, skbs[i], dmas[i]);
+               pasemi_mac_unmap_tx_skb(mac, nf[i], skbs[i], dmas[i]);
 
        total_count += descr_count;
 
        }
 
        TX_DESC(txring, fill) = mactx;
+       TX_DESC_INFO(txring, fill).dma = nfrags;
        fill++;
        TX_DESC_INFO(txring, fill).skb = skb;
        for (i = 0; i <= nfrags; i++) {