Many drivers use skb->tail unnecessarily.
In these situations, the code roughly looks like:
	dev = dev_alloc_skb(...);
	[optional] skb_reserve(skb, ...);
	... skb->tail ...
But even if the skb_reserve() happens, skb->data equals
skb->tail.  So it doesn't make any sense to use anything
other than skb->data in these cases.
Another case was the s2io.c driver directly mucking with
the skb->data and skb->tail pointers.  It really just wanted
to do an skb_reserve(), so that's what the code was changed
to do instead.
Another reason I'm making this change as it allows some SKB
cleanups I have planned simpler to merge.  In those cleanups,
skb->head, skb->tail, and skb->end pointers are removed, and
replaced with skb->head_room and skb->tail_room integers.
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Jeff Garzik <jgarzik@pobox.com>
                                break;  /* Bad news!  */
                        skb->dev = dev; /* Mark as being used by this device. */
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
-                       vp->rx_ring[i].addr = isa_virt_to_bus(skb->tail);
+                       vp->rx_ring[i].addr = isa_virt_to_bus(skb->data);
                }
                vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]);     /* Wrap the ring. */
                outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
                                break;  /* Bad news!  */
                        skb->dev = dev; /* Mark as being used by this device. */
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
-                       vp->rx_ring[entry].addr = isa_virt_to_bus(skb->tail);
+                       vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data);
                        vp->rx_skbuff[entry] = skb;
                }
                vp->rx_ring[entry].status = 0;  /* Clear complete bit. */
 
                                break;                  /* Bad news!  */
                        skb->dev = dev;                 /* Mark as being used by this device. */
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
-                       vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+                       vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
                }
                if (i != RX_RING_SIZE) {
                        int j;
                                pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
                                /* 'skb_put()' points to the start of sk_buff data area. */
                                memcpy(skb_put(skb, pkt_len),
-                                          vp->rx_skbuff[entry]->tail,
+                                          vp->rx_skbuff[entry]->data,
                                           pkt_len);
                                pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
                                vp->rx_copy++;
                        }
                        skb->dev = dev;                 /* Mark as being used by this device. */
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
-                       vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+                       vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
                        vp->rx_skbuff[entry] = skb;
                }
                vp->rx_ring[entry].status = 0;  /* Clear complete bit. */
 
 
                mapping =
                cp->rx_skb[rx_tail].mapping =
-                       pci_map_single(cp->pdev, new_skb->tail,
+                       pci_map_single(cp->pdev, new_skb->data,
                                       buflen, PCI_DMA_FROMDEVICE);
                cp->rx_skb[rx_tail].skb = new_skb;
 
                skb_reserve(skb, RX_OFFSET);
 
                cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
-                       skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                       skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
                cp->rx_skb[i].skb = skb;
 
                cp->rx_ring[i].opts2 = 0;
 
                rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
                rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
                rbd->skb = skb;
-               rbd->v_data = skb->tail;
-               rbd->b_data = WSWAPchar(virt_to_bus(skb->tail));
+               rbd->v_data = skb->data;
+               rbd->b_data = WSWAPchar(virt_to_bus(skb->data));
                rbd->size = PKT_BUF_SZ;
 #ifdef __mc68000__
-               cache_clear(virt_to_phys(skb->tail), PKT_BUF_SZ);
+               cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ);
 #endif
        }
        lp->rbd_head = lp->rbds;
                                rx_in_place = 1;
                                rbd->skb = newskb;
                                newskb->dev = dev;
-                               rbd->v_data = newskb->tail;
-                               rbd->b_data = WSWAPchar(virt_to_bus(newskb->tail));
+                               rbd->v_data = newskb->data;
+                               rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
 #ifdef __mc68000__
-                               cache_clear(virt_to_phys(newskb->tail), PKT_BUF_SZ);
+                               cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
 #endif
                        }
                        else
                                skb->protocol=eth_type_trans(skb,dev);
                                skb->len = pkt_len;
 #ifdef __mc68000__
-                               cache_clear(virt_to_phys(rbd->skb->tail),
+                               cache_clear(virt_to_phys(rbd->skb->data),
                                                pkt_len);
 #endif
                                netif_rx(skb);
 
                                skb_reserve (skb, 2);
                                np->rx_ring[entry].fraginfo =
                                    cpu_to_le64 (pci_map_single
-                                        (np->pdev, skb->tail, np->rx_buf_sz,
+                                        (np->pdev, skb->data, np->rx_buf_sz,
                                          PCI_DMA_FROMDEVICE));
                        }
                        np->rx_ring[entry].fraginfo |=
                /* Rubicon now supports 40 bits of addressing space. */
                np->rx_ring[i].fraginfo =
                    cpu_to_le64 ( pci_map_single (
-                                 np->pdev, skb->tail, np->rx_buf_sz,
+                                 np->pdev, skb->data, np->rx_buf_sz,
                                  PCI_DMA_FROMDEVICE));
                np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48;
        }
                                /* 16 byte align the IP header */
                                skb_reserve (skb, 2);
                                eth_copy_and_sum (skb,
-                                                 np->rx_skbuff[entry]->tail,
+                                                 np->rx_skbuff[entry]->data,
                                                  pkt_len, 0);
                                skb_put (skb, pkt_len);
                                pci_dma_sync_single_for_device(np->pdev,
                        skb_reserve (skb, 2);
                        np->rx_ring[entry].fraginfo =
                            cpu_to_le64 (pci_map_single
-                                        (np->pdev, skb->tail, np->rx_buf_sz,
+                                        (np->pdev, skb->data, np->rx_buf_sz,
                                          PCI_DMA_FROMDEVICE));
                }
                np->rx_ring[entry].fraginfo |=
 
                if (skb == NULL)
                        break;                  /* OK.  Just initially short of Rx bufs. */
                skb->dev = dev;                 /* Mark as being used by this device. */
-               rxf = (struct RxFD *)skb->tail;
+               rxf = (struct RxFD *)skb->data;
                sp->rx_ringp[i] = rxf;
                sp->rx_ring_dma[i] =
                        pci_map_single(sp->pdev, rxf,
                sp->rx_ringp[entry] = NULL;
                return NULL;
        }
-       rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
+       rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
        sp->rx_ring_dma[entry] =
                pci_map_single(sp->pdev, rxf,
                                           PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
 
 #if 1 || USE_IP_CSUM
                                /* Packet is in one chunk -- we can copy + cksum. */
-                               eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
+                               eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
                                skb_put(skb, pkt_len);
 #else
-                               memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
+                               memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data,
                                           pkt_len);
 #endif
                                pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
 
                skb->dev = dev;                 /* Mark as being used by this device. */
                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
                ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, 
-                       skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                       skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
                ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
        }
        ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
                                                            ep->rx_ring[entry].bufaddr,
                                                            ep->rx_buf_sz,
                                                            PCI_DMA_FROMDEVICE);
-                               eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0);
+                               eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
                                skb_put(skb, pkt_len);
                                pci_dma_sync_single_for_device(ep->pci_dev,
                                                               ep->rx_ring[entry].bufaddr,
                        skb->dev = dev;                 /* Mark as being used by this device. */
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
                        ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, 
-                               skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                               skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
                        work_done++;
                }
                ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
 
 
                skb->dev = dev; /* Mark as being used by this device. */
                np->lack_rxbuf->skbuff = skb;
-               np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->tail,
+               np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data,
                        np->rx_buf_sz, PCI_DMA_FROMDEVICE);
                np->lack_rxbuf->status = RXOWN;
                ++np->really_rx_count;
                ++np->really_rx_count;
                np->rx_ring[i].skbuff = skb;
                skb->dev = dev; /* Mark as being used by this device. */
-               np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->tail,
+               np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data,
                        np->rx_buf_sz, PCI_DMA_FROMDEVICE);
                np->rx_ring[i].status = RXOWN;
                np->rx_ring[i].control |= RXIC;
 
 #if ! defined(__alpha__)
                                eth_copy_and_sum(skb, 
-                                       np->cur_rx->skbuff->tail, pkt_len, 0);
+                                       np->cur_rx->skbuff->data, pkt_len, 0);
                                skb_put(skb, pkt_len);
 #else
                                memcpy(skb_put(skb, pkt_len),
-                                       np->cur_rx->skbuff->tail, pkt_len);
+                                       np->cur_rx->skbuff->data, pkt_len);
 #endif
                                pci_dma_sync_single_for_device(np->pci_dev,
                                                               np->cur_rx->buffer,
 
                skb->dev = dev;         /* Mark as being used by this device. */
                skb_reserve(skb, 2); /* 16 byte align the IP header. */
                 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 
-                       skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+                       skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
                hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | 
                        DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2));
        }
                skb->dev = dev;         /* Mark as being used by this device. */
                skb_reserve(skb, 2); /* 16 byte align the IP header. */
                 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 
-                       skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+                       skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
                /* -2 because it doesn't REALLY have that first 2 bytes -KDU */
                hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | 
                        DescEndPacket | DescIntr | (hmp->rx_buf_sz -2));
                                            desc->addr,
                                            hmp->rx_buf_sz,
                                            PCI_DMA_FROMDEVICE);
-               buf_addr = (u8 *) hmp->rx_skbuff[entry]->tail;
+               buf_addr = (u8 *) hmp->rx_skbuff[entry]->data;
                frame_status = le32_to_cpu(get_unaligned((s32*)&(buf_addr[data_size - 12])));
                if (hamachi_debug > 4)
                        printk(KERN_DEBUG "  hamachi_rx() status was %8.8x.\n",
                        skb->dev = dev;         /* Mark as being used by this device. */
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
                        desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 
-                               skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+                               skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
                }
                desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz);
                if (entry >= RX_RING_SIZE-1)
                                   readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
                                   i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
                        if (hamachi_debug > 6) {
-                               if (*(u8*)hmp->rx_skbuff[i]->tail != 0x69) {
+                               if (*(u8*)hmp->rx_skbuff[i]->data != 0x69) {
                                        u16 *addr = (u16 *)
-                                               hmp->rx_skbuff[i]->tail;
+                                               hmp->rx_skbuff[i]->data;
                                        int j;
 
                                        for (j = 0; j < 0x50; j++)
 
                lp->rx_skbuff[i] = skb;
                if (skb) {
                        skb->dev = dev;
-                       rx_buff = skb->tail;
+                       rx_buff = skb->data;
                } else
                        rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
                if (rx_buff == NULL)
 
                if (skb == NULL)
                        panic("%s: alloc_skb() failed", __FILE__);
                skb_reserve(skb, 2);
-               dma_addr = dma_map_single(lp->dev, skb->tail,PKT_BUF_SZ,
+               dma_addr = dma_map_single(lp->dev, skb->data,PKT_BUF_SZ,
                                          DMA_FROM_DEVICE);
                skb->dev = dev;
                rbd->v_next = rbd+1;
                rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
                rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
                rbd->skb = skb;
-               rbd->v_data = skb->tail;
+               rbd->v_data = skb->data;
                rbd->b_data = WSWAPchar(dma_addr);
                rbd->size = PKT_BUF_SZ;
        }
                                rx_in_place = 1;
                                rbd->skb = newskb;
                                newskb->dev = dev;
-                               dma_addr = dma_map_single(lp->dev, newskb->tail, PKT_BUF_SZ, DMA_FROM_DEVICE);
-                               rbd->v_data = newskb->tail;
+                               dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE);
+                               rbd->v_data = newskb->data;
                                rbd->b_data = WSWAPchar(dma_addr);
                                CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
                        }
 
                                break; /* Better luck next round. */
                        skb->dev = dev; /* Mark as being used by this device. */
                        np->rx_dma[entry] = pci_map_single(np->pci_dev,
-                               skb->tail, buflen, PCI_DMA_FROMDEVICE);
+                               skb->data, buflen, PCI_DMA_FROMDEVICE);
                        np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
                }
                np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
                                        buflen,
                                        PCI_DMA_FROMDEVICE);
                                eth_copy_and_sum(skb,
-                                       np->rx_skbuff[entry]->tail, pkt_len, 0);
+                                       np->rx_skbuff[entry]->data, pkt_len, 0);
                                skb_put(skb, pkt_len);
                                pci_dma_sync_single_for_device(np->pci_dev,
                                        np->rx_dma[entry],
 
 
        dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
        cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR;
-       buf = pci_map_single(dev->pci_dev, skb->tail,
+       buf = pci_map_single(dev->pci_dev, skb->data,
                             REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
        build_rx_desc(dev, sg, 0, buf, cmdsts, 0);
        /* update link of previous rx */
                if (unlikely(!skb))
                        break;
 
-               res = (long)skb->tail & 0xf;
+               res = (long)skb->data & 0xf;
                res = 0x10 - res;
                res &= 0xf;
                skb_reserve(skb, res);
 
 
        rmb();
        if (lp->rx_dma_addr[i] == 0)
-           lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->tail,
+           lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data,
                    PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
        lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]);
        lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
                        lp->rx_skbuff[entry] = newskb;
                        newskb->dev = dev;
                        lp->rx_dma_addr[entry] =
-                           pci_map_single(lp->pci_dev, newskb->tail,
+                           pci_map_single(lp->pci_dev, newskb->data,
                                    PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
                        lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]);
                        rx_in_place = 1;
                                                PKT_BUF_SZ-2,
                                                PCI_DMA_FROMDEVICE);
                    eth_copy_and_sum(skb,
-                           (unsigned char *)(lp->rx_skbuff[entry]->tail),
+                           (unsigned char *)(lp->rx_skbuff[entry]->data),
                            pkt_len,0);
                    pci_dma_sync_single_for_device(lp->pci_dev,
                                                   lp->rx_dma_addr[entry],
 
        skb_reserve(skb, NET_IP_ALIGN);
        *sk_buff = skb;
 
-       mapping = pci_map_single(pdev, skb->tail, rx_buf_sz,
+       mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
                                 PCI_DMA_FROMDEVICE);
 
        rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
                skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
                if (skb) {
                        skb_reserve(skb, NET_IP_ALIGN);
-                       eth_copy_and_sum(skb, sk_buff[0]->tail, pkt_size, 0);
+                       eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
                        *sk_buff = skb;
                        rtl8169_mark_to_asic(desc, rx_buf_sz);
                        ret = 0;
 
 #else
                ba = &nic->ba[ring_no][block_no][off];
                skb_reserve(skb, BUF0_LEN);
-               tmp = (unsigned long) skb->data;
-               tmp += ALIGN_SIZE;
-               tmp &= ~ALIGN_SIZE;
-               skb->data = (void *) tmp;
-               skb->tail = (void *) tmp;
+               tmp = ((unsigned long) skb->data & ALIGN_SIZE);
+               if (tmp)
+                       skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
 
                memset(rxdp, 0, sizeof(RxD_t));
                rxdp->Buffer2_ptr = pci_map_single
 
        /*
         * Do not interrupt per DMA transfer.
         */
-       dsc->dscr_a = virt_to_phys(sb_new->tail) |
+       dsc->dscr_a = virt_to_phys(sb_new->data) |
                V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
                0;
 #else
-       dsc->dscr_a = virt_to_phys(sb_new->tail) |
+       dsc->dscr_a = virt_to_phys(sb_new->data) |
                V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
                M_DMA_DSCRA_INTERRUPT;
 #endif
 
                sis_priv->rx_skbuff[i] = skb;
                sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
                 sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
-                        skb->tail, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+                        skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
        }
        sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
 
                        sis_priv->rx_skbuff[entry] = skb;
                        sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
                        sis_priv->rx_ring[entry].bufptr = 
-                               pci_map_single(sis_priv->pci_dev, skb->tail, 
+                               pci_map_single(sis_priv->pci_dev, skb->data, 
                                        RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
                        sis_priv->dirty_rx++;
                }
                        sis_priv->rx_skbuff[entry] = skb;
                        sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
                        sis_priv->rx_ring[entry].bufptr =
-                               pci_map_single(sis_priv->pci_dev, skb->tail,
+                               pci_map_single(sis_priv->pci_dev, skb->data,
                                        RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
                }
        }
 
                np->rx_info[i].skb = skb;
                if (skb == NULL)
                        break;
-               np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+               np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
                skb->dev = dev;                 /* Mark as being used by this device. */
                /* Grrr, we cannot offset to correctly align the IP header. */
                np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
                        pci_dma_sync_single_for_cpu(np->pci_dev,
                                                    np->rx_info[entry].mapping,
                                                    pkt_len, PCI_DMA_FROMDEVICE);
-                       eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0);
+                       eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0);
                        pci_dma_sync_single_for_device(np->pci_dev,
                                                       np->rx_info[entry].mapping,
                                                       pkt_len, PCI_DMA_FROMDEVICE);
                        if (skb == NULL)
                                break;  /* Better luck next round. */
                        np->rx_info[entry].mapping =
-                               pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                               pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
                        skb->dev = dev; /* Mark as being used by this device. */
                        np->rx_ring[entry].rxaddr =
                                cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
 
                skb->dev = dev;         /* Mark as being used by this device. */
                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
                np->rx_ring[i].frag[0].addr = cpu_to_le32(
-                       pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz,
+                       pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
                                PCI_DMA_FROMDEVICE));
                np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
        }
                                                            np->rx_buf_sz,
                                                            PCI_DMA_FROMDEVICE);
 
-                               eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+                               eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
                                pci_dma_sync_single_for_device(np->pci_dev,
                                                               desc->frag[0].addr,
                                                               np->rx_buf_sz,
                        skb->dev = dev;         /* Mark as being used by this device. */
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
                        np->rx_ring[entry].frag[0].addr = cpu_to_le32(
-                               pci_map_single(np->pci_dev, skb->tail,
+                               pci_map_single(np->pci_dev, skb->data,
                                        np->rx_buf_sz, PCI_DMA_FROMDEVICE));
                }
                /* Perhaps we need not reset this field. */
 
 
                        mapping =
                        de->rx_skb[rx_tail].mapping =
-                               pci_map_single(de->pdev, copy_skb->tail,
+                               pci_map_single(de->pdev, copy_skb->data,
                                               buflen, PCI_DMA_FROMDEVICE);
                        de->rx_skb[rx_tail].skb = copy_skb;
                } else {
                        pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
                        skb_reserve(copy_skb, RX_OFFSET);
-                       memcpy(skb_put(copy_skb, len), skb->tail, len);
+                       memcpy(skb_put(copy_skb, len), skb->data, len);
 
                        pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
 
                skb->dev = de->dev;
 
                de->rx_skb[i].mapping = pci_map_single(de->pdev,
-                       skb->tail, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                       skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
                de->rx_skb[i].skb = skb;
 
                de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
 
 
                                /* Received Packet CRC check need or not */
                                if ( (db->dm910x_chk_mode & 1) &&
-                                       (cal_CRC(skb->tail, rxlen, 1) !=
-                                       (*(u32 *) (skb->tail+rxlen) ))) { /* FIXME (?) */
+                                       (cal_CRC(skb->data, rxlen, 1) !=
+                                       (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
                                        /* Found a error received packet */
                                        dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
                                        db->dm910x_chk_mode = 3;
                                                /* size less than COPY_SIZE, allocate a rxlen SKB */
                                                skb->dev = dev;
                                                skb_reserve(skb, 2); /* 16byte align */
-                                               memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
+                                               memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen);
                                                dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
                                        } else {
                                                skb->dev = dev;
 
        if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
                rxptr->rx_skb_ptr = skb;
-               rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+               rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
                wmb();
                rxptr->rdes0 = cpu_to_le32(0x80000000);
                db->rx_avail_cnt++;
                if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
                        break;
                rxptr->rx_skb_ptr = skb; /* FIXME (?) */
-               rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+               rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
                wmb();
                rxptr->rdes0 = cpu_to_le32(0x80000000);
                rxptr = rxptr->next_rx_desc;
 
                        if (skb == NULL)
                                break;
 
-                       mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
+                       mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
                                                 PCI_DMA_FROMDEVICE);
                        tp->rx_buffers[entry].mapping = mapping;
 
                                                                   tp->rx_buffers[entry].mapping,
                                                                   pkt_len, PCI_DMA_FROMDEVICE);
 #if ! defined(__alpha__)
-                                       eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
+                                       eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
                                                         pkt_len, 0);
                                        skb_put(skb, pkt_len);
 #else
                                        memcpy(skb_put(skb, pkt_len),
-                                              tp->rx_buffers[entry].skb->tail,
+                                              tp->rx_buffers[entry].skb->data,
                                               pkt_len);
 #endif
                                        pci_dma_sync_single_for_device(tp->pdev,
                                                            tp->rx_buffers[entry].mapping,
                                                            pkt_len, PCI_DMA_FROMDEVICE);
 #if ! defined(__alpha__)
-                               eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
+                               eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
                                                 pkt_len, 0);
                                skb_put(skb, pkt_len);
 #else
                                memcpy(skb_put(skb, pkt_len),
-                                      tp->rx_buffers[entry].skb->tail,
+                                      tp->rx_buffers[entry].skb->data,
                                       pkt_len);
 #endif
                                pci_dma_sync_single_for_device(tp->pdev,
 
                tp->rx_buffers[i].skb = skb;
                if (skb == NULL)
                        break;
-               mapping = pci_map_single(tp->pdev, skb->tail,
+               mapping = pci_map_single(tp->pdev, skb->data,
                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
                tp->rx_buffers[i].mapping = mapping;
                skb->dev = dev;                 /* Mark as being used by this device. */
 
                if (skb == NULL)
                        break;
                skb->dev = dev;                 /* Mark as being used by this device. */
-               np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail,
+               np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
                                        skb->len,PCI_DMA_FROMDEVICE);
 
                np->rx_ring[i].buffer1 = np->rx_addr[i];
                                pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
                                                            np->rx_skbuff[entry]->len,
                                                            PCI_DMA_FROMDEVICE);
-                               eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+                               eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
                                skb_put(skb, pkt_len);
                                pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
                                                               np->rx_skbuff[entry]->len,
                                break;                  /* Better luck next round. */
                        skb->dev = dev;                 /* Mark as being used by this device. */
                        np->rx_addr[entry] = pci_map_single(np->pci_dev,
-                                                       skb->tail,
+                                                       skb->data,
                                                        skb->len, PCI_DMA_FROMDEVICE);
                        np->rx_ring[entry].buffer1 = np->rx_addr[entry];
                }
 
                        break;
                skb->dev = dev;                 /* Mark as being used by this device. */
                tp->rx_ring[i].status = Rx0DescOwned;   /* Owned by Xircom chip */
-               tp->rx_ring[i].buffer1 = virt_to_bus(skb->tail);
+               tp->rx_ring[i].buffer1 = virt_to_bus(skb->data);
        }
        tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 
                        if (skb == NULL)
                                break;
                        skb->dev = dev;                 /* Mark as being used by this device. */
-                       tp->rx_ring[entry].buffer1 = virt_to_bus(skb->tail);
+                       tp->rx_ring[entry].buffer1 = virt_to_bus(skb->data);
                        work_done++;
                }
                tp->rx_ring[entry].status = Rx0DescOwned;
 
 #endif
 
        skb->dev = tp->dev;
-       dma_addr = pci_map_single(tp->pdev, skb->tail,
+       dma_addr = pci_map_single(tp->pdev, skb->data,
                                  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
 
        /* Since no card does 64 bit DAC, the high bits will never
                        pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
                                                    PKT_BUF_SZ,
                                                    PCI_DMA_FROMDEVICE);
-                       eth_copy_and_sum(new_skb, skb->tail, pkt_len, 0);
+                       eth_copy_and_sum(new_skb, skb->data, pkt_len, 0);
                        pci_dma_sync_single_for_device(tp->pdev, dma_addr,
                                                       PKT_BUF_SZ,
                                                       PCI_DMA_FROMDEVICE);
 
                skb->dev = dev;                 /* Mark as being used by this device. */
 
                rp->rx_skbuff_dma[i] =
-                       pci_map_single(rp->pdev, skb->tail, rp->rx_buf_sz,
+                       pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
                                       PCI_DMA_FROMDEVICE);
 
                rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
                                                            PCI_DMA_FROMDEVICE);
 
                                eth_copy_and_sum(skb,
-                                                rp->rx_skbuff[entry]->tail,
+                                                rp->rx_skbuff[entry]->data,
                                                 pkt_len, 0);
                                skb_put(skb, pkt_len);
                                pci_dma_sync_single_for_device(rp->pdev,
                                break;  /* Better luck next round. */
                        skb->dev = dev; /* Mark as being used by this device. */
                        rp->rx_skbuff_dma[entry] =
-                               pci_map_single(rp->pdev, skb->tail,
+                               pci_map_single(rp->pdev, skb->data,
                                               rp->rx_buf_sz,
                                               PCI_DMA_FROMDEVICE);
                        rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
 
                        if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
                                skb_reserve(new_skb, 2);
 
-                       memcpy(new_skb->data, rx_skb[0]->tail, pkt_size);
+                       memcpy(new_skb->data, rx_skb[0]->data, pkt_size);
                        *rx_skb = new_skb;
                        ret = 0;
                }
         *      Do the gymnastics to get the buffer head for data at
         *      64byte alignment.
         */
-       skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->tail & 63);
+       skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
        rd_info->skb->dev = vptr->dev;
-       rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->tail, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
+       rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
        
        /*
         *      Fill in the descriptor to match
 
        }
        skb_reserve(skb, 4);
        cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
-       data = (cisco_packet*)skb->tail;
+       data = (cisco_packet*)skb->data;
 
        data->type = htonl(type);
        data->par1 = htonl(par1);
 
                skb->dev = dev;         /* Mark as being used by this device. */
                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
                yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
-                       skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+                       skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
        }
        yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
        yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
                pci_dma_sync_single_for_cpu(yp->pci_dev, desc->addr,
                        yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
                desc_status = le32_to_cpu(desc->result_status) >> 16;
-               buf_addr = rx_skb->tail;
+               buf_addr = rx_skb->data;
                data_size = (le32_to_cpu(desc->dbdma_cmd) - 
                        le32_to_cpu(desc->result_status)) & 0xffff;
                frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2])));
                                        break;
                                skb->dev = dev;
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
-                               eth_copy_and_sum(skb, rx_skb->tail, pkt_len, 0);
+                               eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0);
                                skb_put(skb, pkt_len);
                                pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
                                                                                           yp->rx_buf_sz,
                        skb->dev = dev; /* Mark as being used by this device. */
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
                        yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
-                               skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+                               skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
                }
                yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
                yp->rx_ring[entry].result_status = 0;   /* Clear complete bit. */