dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
                "skb->end=0x%p skb->len=%d\n", (void *) skb->head,
-               (void *)skb->data, skb_tail_pointer(skb), (void *)skb->end,
+               (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
                skb->len);
 
        skb->protocol = eth_type_trans(skb, xpnet_device);
        dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p "
                "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n",
                (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb),
-               (void *) skb->end, skb->len);
+               skb_end_pointer(skb), skb->len);
 
 
        xpnet_device->last_rx = jiffies;
 
        dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
                "skb->end=0x%p skb->len=%d\n", (void *) skb->head,
-               (void *)skb->data, skb_tail_pointer(skb), (void *)skb->end,
+               (void *)skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
                skb->len);
 
 
 
     }
     // cast needed as there is no %? for pointer differences
     PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
-           skb, skb->head, (long) (skb->end - skb->head));
+           skb, skb->head, (long) (skb_end_pointer(skb) - skb->head));
     rx.handle = virt_to_bus (skb);
     rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
     if (rx_give (dev, &rx, pool))
 
        vcc = vc->rx_vcc;
 
        pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb),
-                                   skb->end - skb->data, PCI_DMA_FROMDEVICE);
+                                   skb_end_pointer(skb) - skb->data,
+                                   PCI_DMA_FROMDEVICE);
 
        if ((vcc->qos.aal == ATM_AAL0) ||
            (vcc->qos.aal == ATM_AAL34)) {
                }
 
                pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
-                                skb->end - skb->data, PCI_DMA_FROMDEVICE);
+                                skb_end_pointer(skb) - skb->data,
+                                PCI_DMA_FROMDEVICE);
                sb_pool_remove(card, skb);
 
                skb_trim(skb, len);
        tail = readl(SAR_REG_RAWCT);
 
        pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
-                                   queue->end - queue->head - 16,
+                                   skb_end_pointer(queue) - queue->head - 16,
                                    PCI_DMA_FROMDEVICE);
 
        while (head != tail) {
                                queue = card->raw_cell_head;
                                pci_dma_sync_single_for_cpu(card->pcidev,
                                                            IDT77252_PRV_PADDR(queue),
-                                                           queue->end - queue->data,
+                                                           (skb_end_pointer(queue) -
+                                                            queue->data),
                                                            PCI_DMA_FROMDEVICE);
                        } else {
                                card->raw_cell_head = NULL;
                }
 
                paddr = pci_map_single(card->pcidev, skb->data,
-                                      skb->end - skb->data,
+                                      skb_end_pointer(skb) - skb->data,
                                       PCI_DMA_FROMDEVICE);
                IDT77252_PRV_PADDR(skb) = paddr;
 
 
 outunmap:
        pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
-                        skb->end - skb->data, PCI_DMA_FROMDEVICE);
+                        skb_end_pointer(skb) - skb->data, PCI_DMA_FROMDEVICE);
 
        handle = IDT77252_PRV_POOL(skb);
        card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
        int err;
 
        pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb),
-                                      skb->end - skb->data, PCI_DMA_FROMDEVICE);
+                                      skb_end_pointer(skb) - skb->data,
+                                      PCI_DMA_FROMDEVICE);
 
        err = push_rx_skb(card, skb, POOL_QUEUE(handle));
        if (err) {
                pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
-                                skb->end - skb->data, PCI_DMA_FROMDEVICE);
+                                skb_end_pointer(skb) - skb->data,
+                                PCI_DMA_FROMDEVICE);
                sb_pool_remove(card, skb);
                dev_kfree_skb(skb);
        }
                        if (skb) {
                                pci_unmap_single(card->pcidev,
                                                 IDT77252_PRV_PADDR(skb),
-                                                skb->end - skb->data,
+                                                (skb_end_pointer(skb) -
+                                                 skb->data),
                                                 PCI_DMA_FROMDEVICE);
                                card->sbpool[i].skb[j] = NULL;
                                dev_kfree_skb(skb);
 
        BUG_ON(skb_cloned(skb));
 
        mpalen = sizeof(*mpa) + ep->plen;
-       if (skb->data + mpalen + sizeof(*req) > skb->end) {
+       if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
                kfree_skb(skb);
                skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
                if (!skb) {
 
 
 #ifdef ETHDEBUG
                printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n",
-                 skb->head, skb->data, skb_tail_pointer(skb), skb->end);
+                      skb->head, skb->data, skb_tail_pointer(skb),
+                      skb_end_pointer(skb));
                printk("copying packet to 0x%x.\n", skb_data_ptr);
 #endif
 
 
                struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
                if (skb) {
                        np->put_rx_ctx->skb = skb;
-                       np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
-                                                            skb->end-skb->data, PCI_DMA_FROMDEVICE);
-                       np->put_rx_ctx->dma_len = skb->end-skb->data;
+                       np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
+                                                            skb->data,
+                                                       (skb_end_pointer(skb) -
+                                                        skb->data),
+                                                            PCI_DMA_FROMDEVICE);
+                       np->put_rx_ctx->dma_len = (skb_end_pointer(skb) -
+                                                  skb->data);
                        np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
                        wmb();
                        np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
                struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
                if (skb) {
                        np->put_rx_ctx->skb = skb;
-                       np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
-                                                            skb->end-skb->data, PCI_DMA_FROMDEVICE);
-                       np->put_rx_ctx->dma_len = skb->end-skb->data;
+                       np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
+                                                            skb->data,
+                                                            (skb_end_pointer(skb) -
+                                                             skb->data),
+                                                            PCI_DMA_FROMDEVICE);
+                       np->put_rx_ctx->dma_len = (skb_end_pointer(skb) -
+                                                  skb->data);
                        np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
                        np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
                        wmb();
                wmb();
                if (np->rx_skb[i].skb) {
                        pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
-                                               np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
-                                               PCI_DMA_FROMDEVICE);
+                                        (skb_end_pointer(np->rx_skb[i].skb) -
+                                         np->rx_skb[i].skb->data),
+                                        PCI_DMA_FROMDEVICE);
                        dev_kfree_skb(np->rx_skb[i].skb);
                        np->rx_skb[i].skb = NULL;
                }
        for (i = 0; i < pkt_len; i++)
                pkt_data[i] = (u8)(i & 0xff);
        test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
-                                      tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
+                                      (skb_end_pointer(tx_skb) -
+                                       tx_skb->data), PCI_DMA_FROMDEVICE);
 
        if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
                np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
        }
 
        pci_unmap_page(np->pci_dev, test_dma_addr,
-                      tx_skb->end-tx_skb->data,
+                      (skb_end_pointer(tx_skb) - tx_skb->data),
                       PCI_DMA_TODEVICE);
        dev_kfree_skb_any(tx_skb);
  out:
 
        dev_dbg(&bp->pdev->dev,
                "start_xmit: len %u head %p data %p tail %p end %p\n",
                skb->len, skb->head, skb->data,
-               skb_tail_pointer(skb), skb->end);
+               skb_tail_pointer(skb), skb_end_pointer(skb));
        dev_dbg(&bp->pdev->dev,
                "data:");
        for (i = 0; i < 16; i++)
 
         sc->lmc_rxring[i].status = 0x80000000;
 
         /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
-        sc->lmc_rxring[i].length = skb->end - skb->data;
+        sc->lmc_rxring[i].length = skb_end_pointer(skb) - skb->data;
 
         /* use to be tail which is dumb since you're thinking why write
          * to the end of the packj,et but since there's nothing there tail == data
 
                if (frag != 0)
                        flen -= hdrlen;
 
-               if (skb_tail_pointer(frag_skb) + flen > frag_skb->end) {
+               if (frag_skb->tail + flen > frag_skb->end) {
                        printk(KERN_WARNING "%s: host decrypted and "
                               "reassembled frame did not fit skb\n",
                               dev->name);
 
 
        sarb = instance->cached_vcc->sarb;
 
-       if (skb_tail_pointer(sarb) + ATM_CELL_PAYLOAD > sarb->end) {
+       if (sarb->tail + ATM_CELL_PAYLOAD > sarb->end) {
                atm_rldbg(instance, "%s: buffer overrun (sarb->len %u, vcc: 0x%p)!\n",
                                __func__, sarb->len, vcc);
                /* discard cells already received */
                skb_trim(sarb, 0);
-               UDSL_ASSERT(skb_tail_pointer(sarb) + ATM_CELL_PAYLOAD <= sarb->end);
+               UDSL_ASSERT(sarb->tail + ATM_CELL_PAYLOAD <= sarb->end);
        }
 
        memcpy(skb_tail_pointer(sarb), source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD);
 
        sk_buff_data_t          mac_header;
        /* These elements must be at the end, see alloc_skb() for details.  */
        sk_buff_data_t          tail;
+       sk_buff_data_t          end;
        unsigned char           *head,
-                               *data,
-                               *end;
+                               *data;
        unsigned int            truesize;
        atomic_t                users;
 };
                                    unsigned int to, struct ts_config *config,
                                    struct ts_state *state);
 
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
+{
+       return skb->head + skb->end;
+}
+#else
+static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
+{
+       return skb->end;
+}
+#endif
+
 /* Internal */
-#define skb_shinfo(SKB)                ((struct skb_shared_info *)((SKB)->end))
+#define skb_shinfo(SKB)        ((struct skb_shared_info *)(skb_end_pointer(SKB)))
 
 /**
  *     skb_queue_empty - check if a queue is empty
 {
        skb->tail = skb->data + offset;
 }
+
 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
 
 /*
        SKB_LINEAR_ASSERT(skb);
        skb->tail += len;
        skb->len  += len;
-       if (unlikely(skb_tail_pointer(skb) > skb->end))
+       if (unlikely(skb->tail > skb->end))
                skb_over_panic(skb, len, current_text_addr());
        return tmp;
 }
  */
 static inline int skb_tailroom(const struct sk_buff *skb)
 {
-       return skb_is_nonlinear(skb) ? 0 : skb->end - skb_tail_pointer(skb);
+       return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
 }
 
 /**
 
 
        DPRINTK("skbuff head:%lx data:%lx tail:%lx end:%lx\n",
                (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb),
-               (long)skb->end);
+               (long)skb_end_pointer(skb));
 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
        if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
                lec_handle_bridge(skb, dev);
 
 void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 {
        printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
-                         "data:%p tail:%#lx end:%p dev:%s\n",
+                         "data:%p tail:%#lx end:%#lx dev:%s\n",
               here, skb->len, sz, skb->head, skb->data,
-              (unsigned long)skb->tail, skb->end,
+              (unsigned long)skb->tail, (unsigned long)skb->end,
               skb->dev ? skb->dev->name : "<NULL>");
        BUG();
 }
 void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 {
        printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
-                         "data:%p tail:%#lx end:%p dev:%s\n",
+                         "data:%p tail:%#lx end:%#lx dev:%s\n",
               here, skb->len, sz, skb->head, skb->data,
-              (unsigned long)skb->tail, skb->end,
+              (unsigned long)skb->tail, (unsigned long)skb->end,
               skb->dev ? skb->dev->name : "<NULL>");
        BUG();
 }
        skb->head = data;
        skb->data = data;
        skb_reset_tail_pointer(skb);
-       skb->end  = data + size;
+       skb->end = skb->tail + size;
        /* make sure we initialize shinfo sequentially */
        shinfo = skb_shinfo(skb);
        atomic_set(&shinfo->dataref, 1);
        /*
         *      Allocate the copy buffer
         */
-       struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len,
-                                     gfp_mask);
+       struct sk_buff *n;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       n = alloc_skb(skb->end + skb->data_len, gfp_mask);
+#else
+       n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
+#endif
        if (!n)
                return NULL;
 
        /*
         *      Allocate the copy buffer
         */
-       struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
-
+       struct sk_buff *n;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       n = alloc_skb(skb->end, gfp_mask);
+#else
+       n = alloc_skb(skb->end - skb->head, gfp_mask);
+#endif
        if (!n)
                goto out;
 
 {
        int i;
        u8 *data;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       int size = nhead + skb->end + ntail;
+#else
        int size = nhead + (skb->end - skb->head) + ntail;
+#endif
        long off;
 
        if (skb_shared(skb))
        /* Copy only real data... and, alas, header. This should be
         * optimized for the cases when header is void. */
        memcpy(data + nhead, skb->head,
-               skb->tail
-#ifndef NET_SKBUFF_DATA_USES_OFFSET
-               - skb->head
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+               skb->tail);
+#else
+               skb->tail - skb->head);
 #endif
-               );
-       memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
+       memcpy(data + size, skb_end_pointer(skb),
+              sizeof(struct skb_shared_info));
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
                get_page(skb_shinfo(skb)->frags[i].page);
        off = (data + nhead) - skb->head;
 
        skb->head     = data;
-       skb->end      = data + size;
        skb->data    += off;
-#ifndef NET_SKBUFF_DATA_USES_OFFSET
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       skb->end      = size;
+#else
+       skb->end      = skb->head + size;
        /* {transport,network,mac}_header and tail are relative to skb->head */
        skb->tail             += off;
        skb->transport_header += off;
                return 0;
        }
 
-       ntail = skb->data_len + pad - (skb->end - skb_tail_pointer(skb));
+       ntail = skb->data_len + pad - (skb->end - skb->tail);
        if (likely(skb_cloned(skb) || ntail > 0)) {
                err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
                if (unlikely(err))
         * plus 128 bytes for future expansions. If we have enough
         * room at tail, reallocate without expansion only if skb is cloned.
         */
-       int i, k, eat = (skb_tail_pointer(skb) + delta) - skb->end;
+       int i, k, eat = (skb->tail + delta) - skb->end;
 
        if (eat > 0 || skb_cloned(skb)) {
                if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
 
                if (frag != 0)
                        flen -= hdrlen;
 
-               if (skb_tail_pointer(frag_skb) + flen > frag_skb->end) {
+               if (frag_skb->tail + flen > frag_skb->end) {
                        printk(KERN_WARNING "%s: host decrypted and "
                               "reassembled frame did not fit skb\n",
                               dev->name);
 
 
        skb_orphan(skb);
 
-       delta = skb->end - skb_tail_pointer(skb);
+       delta = skb->end - skb->tail;
        if (delta * 2 < skb->truesize)
                return skb;