iph->tot_len = 0;
                        iph->check = 0;
-                       skb->h.th->check = ~csum_tcpudp_magic(iph->saddr,
-                                                             iph->daddr, 0,
-                                                             IPPROTO_TCP, 0);
+                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+                                                                iph->daddr, 0,
+                                                                IPPROTO_TCP,
+                                                                0);
                        ipofst = skb_network_offset(skb);
                        if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */
                                tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
 
                vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
 
                tcp_opt_len = 0;
-               if (skb->h.th->doff > 5)
+               if (tcp_hdr(skb)->doff > 5)
                        tcp_opt_len = tcp_optlen(skb);
 
                ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
                iph = ip_hdr(skb);
                iph->check = 0;
                iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
-               skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                                     0, IPPROTO_TCP, 0);
-
+               tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+                                                        iph->daddr, 0,
+                                                        IPPROTO_TCP, 0);
                if (tcp_opt_len || (iph->ihl > 5)) {
                        vlan_tag_flags |= ((iph->ihl - 5) +
                                           (tcp_opt_len >> 2)) << 8;
 
                hdr->opcode = CPL_TX_PKT_LSO;
                hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
                hdr->ip_hdr_words = ip_hdr(skb)->ihl;
-               hdr->tcp_hdr_words = skb->h.th->doff;
+               hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
                hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
                                                          skb_shinfo(skb)->gso_size));
                hdr->len = htonl(skb->len - sizeof(*hdr));
 
                    CPL_ETH_II : CPL_ETH_II_VLAN;
                tso_info |= V_LSO_ETH_TYPE(eth_type) |
                    V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
-                   V_LSO_TCPHDR_WORDS(skb->h.th->doff);
+                   V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
                hdr->lso_info = htonl(tso_info);
                flits = 3;
        } else {
 
                        struct iphdr *iph = ip_hdr(skb);
                        iph->tot_len = 0;
                        iph->check = 0;
-                       skb->h.th->check = ~csum_tcpudp_magic(iph->saddr,
-                                                             iph->daddr, 0,
-                                                             IPPROTO_TCP, 0);
+                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+                                                                iph->daddr, 0,
+                                                                IPPROTO_TCP,
+                                                                0);
                        cmd_length = E1000_TXD_CMD_IP;
                        ipcse = skb_transport_offset(skb) - 1;
                } else if (skb->protocol == htons(ETH_P_IPV6)) {
                        ipv6_hdr(skb)->payload_len = 0;
-                       skb->h.th->check =
+                       tcp_hdr(skb)->check =
                                ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
                                                 &ipv6_hdr(skb)->daddr,
                                                 0, IPPROTO_TCP, 0);
                ipcss = skb_network_offset(skb);
                ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
                tucss = skb_transport_offset(skb);
-               tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
+               tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
                tucse = 0;
 
                cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
 
                }
                if (proto == IPPROTO_TCP) {
                        csoff += offsetof(struct tcphdr, check);
-                       skb->h.th->check = csum;
+                       tcp_hdr(skb)->check = csum;
                }
 
                w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
 
                iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
-               skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                                     0, IPPROTO_TCP, 0);
+               tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+                                                        iph->daddr, 0,
+                                                        IPPROTO_TCP, 0);
                ipcss = skb_network_offset(skb);
                ipcso = (void *)&(iph->check) - (void *)skb->data;
                ipcse = skb_transport_offset(skb) - 1;
                tucss = skb_transport_offset(skb);
-               tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
+               tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
                tucse = 0;
 
                i = adapter->tx_ring.next_to_use;
 
                        desc->l4i_chk = udp_hdr(skb)->check;
                        break;
                case IPPROTO_TCP:
-                       desc->l4i_chk = skb->h.th->check;
+                       desc->l4i_chk = tcp_hdr(skb)->check;
                        break;
                default:
                        BUG();
 
                base_flags |= (TXD_FLAG_CPU_PRE_DMA |
                               TXD_FLAG_CPU_POST_DMA);
 
-               skb->h.th->check = 0;
+               tcp_hdr(skb)->check = 0;
 
        }
        else if (skb->ip_summed == CHECKSUM_PARTIAL)
                iph->check = 0;
                iph->tot_len = htons(mss + hdr_len);
                if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
-                       skb->h.th->check = 0;
+                       tcp_hdr(skb)->check = 0;
                        base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
-               }
-               else {
-                       skb->h.th->check = ~csum_tcpudp_magic(iph->saddr,
-                                                             iph->daddr, 0,
-                                                             IPPROTO_TCP, 0);
-               }
+               } else
+                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+                                                                iph->daddr, 0,
+                                                                IPPROTO_TCP,
+                                                                0);
 
                if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
 
                        eddp->skb_offset += VLAN_HLEN;
 #endif /* CONFIG_QETH_VLAN */
        }
-       tcph = eddp->skb->h.th;
+       tcph = tcp_hdr(eddp->skb);
        while (eddp->skb_offset < eddp->skb->len) {
                data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
                               (int)(eddp->skb->len - eddp->skb_offset));
 
 
        hdr  = (struct qeth_hdr_tso *) skb->data;
        iph  = ip_hdr(skb);
-       tcph = skb->h.th;
+       tcph = tcp_hdr(skb);
        /*fix header to TSO values ...*/
        hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
        /*set values which are fix for the first approach ...*/
 {
        struct iphdr *iph    = ip_hdr(skb);
        struct ipv6hdr *ip6h = ipv6_hdr(skb);
-       struct tcphdr *tcph  = skb->h.th;
+       struct tcphdr *tcph  = tcp_hdr(skb);
 
        tcph->check = 0;
        if (skb->protocol == ETH_P_IPV6) {
 
        /* 4 byte hole on 64 bit*/
 
        union {
-               struct tcphdr   *th;
                struct iphdr    *ipiph;
                struct ipv6hdr  *ipv6h;
                unsigned char   *raw;
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_timewait_sock.h>
 
+static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
+{
+       return (struct tcphdr *)skb->h.raw;
+}
+
 static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
 {
-       return skb->h.th->doff * 4;
+       return tcp_hdr(skb)->doff * 4;
 }
 
 static inline unsigned int tcp_optlen(const struct sk_buff *skb)
 {
-       return (skb->h.th->doff - 5) * 4;
+       return (tcp_hdr(skb)->doff - 5) * 4;
 }
 
 /* This defines a selective acknowledgement block. */
 
        ireq->wscale_ok = rx_opt->wscale_ok;
        ireq->acked = 0;
        ireq->ecn_ok = 0;
-       ireq->rmt_port = skb->h.th->source;
+       ireq->rmt_port = tcp_hdr(skb)->source;
 }
 
 extern void tcp_enter_memory_pressure(void);
 
                        INET_ECN_xmit(sk);
                        if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
                                tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
-                               skb->h.th->cwr = 1;
+                               tcp_hdr(skb)->cwr = 1;
                                skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
                        }
                } else {
                        INET_ECN_dontxmit(sk);
                }
                if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
-                       skb->h.th->ece = 1;
+                       tcp_hdr(skb)->ece = 1;
        }
 }
 
 
 static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb)
 {
-       if (skb->h.th->cwr)
+       if (tcp_hdr(skb)->cwr)
                tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
 }
 
 
                                                .tos = RT_TOS(ip_hdr(skb)->tos) } },
                                    /* Not quite clean, but right. */
                                    .uli_u = { .ports =
-                                              { .sport = skb->h.th->dest,
-                                                .dport = skb->h.th->source } },
+                                              { .sport = tcp_hdr(skb)->dest,
+                                                .dport = tcp_hdr(skb)->source } },
                                    .proto = sk->sk_protocol };
                security_skb_classify_flow(skb, &fl);
                if (ip_route_output_key(&rt, &fl))
 
 __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       const struct iphdr *iph = ip_hdr(skb);
+       const struct tcphdr *th = tcp_hdr(skb);
        int mssind;
        const __u16 mss = *mssp;
 
-
        tp->last_synq_overflow = jiffies;
 
        /* XXX sort msstab[] by probability?  Binary search? */
 
        NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT);
 
-       return secure_tcp_syn_cookie(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
-                                    skb->h.th->source, skb->h.th->dest,
-                                    ntohl(skb->h.th->seq),
+       return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
+                                    th->source, th->dest, ntohl(th->seq),
                                     jiffies / (HZ * 60), mssind);
 }
 
  */
 static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
 {
-       __u32 seq;
-       __u32 mssind;
-
-       seq = ntohl(skb->h.th->seq)-1;
-       mssind = check_tcp_syn_cookie(cookie,
-                                     ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
-                                     skb->h.th->source, skb->h.th->dest,
-                                     seq, jiffies / (HZ * 60), COUNTER_TRIES);
+       const struct iphdr *iph = ip_hdr(skb);
+       const struct tcphdr *th = tcp_hdr(skb);
+       __u32 seq = ntohl(th->seq) - 1;
+       __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
+                                           th->source, th->dest, seq,
+                                           jiffies / (HZ * 60),
+                                           COUNTER_TRIES);
 
        return mssind < NUM_MSS ? msstab[mssind] + 1 : 0;
 }
        struct inet_request_sock *ireq;
        struct tcp_request_sock *treq;
        struct tcp_sock *tp = tcp_sk(sk);
-       __u32 cookie = ntohl(skb->h.th->ack_seq) - 1;
+       const struct tcphdr *th = tcp_hdr(skb);
+       __u32 cookie = ntohl(th->ack_seq) - 1;
        struct sock *ret = sk;
        struct request_sock *req;
        int mss;
        struct rtable *rt;
        __u8 rcv_wscale;
 
-       if (!sysctl_tcp_syncookies || !skb->h.th->ack)
+       if (!sysctl_tcp_syncookies || !th->ack)
                goto out;
 
        if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
        }
        ireq = inet_rsk(req);
        treq = tcp_rsk(req);
-       treq->rcv_isn           = ntohl(skb->h.th->seq) - 1;
+       treq->rcv_isn           = ntohl(th->seq) - 1;
        treq->snt_isn           = cookie;
        req->mss                = mss;
-       ireq->rmt_port          = skb->h.th->source;
+       ireq->rmt_port          = th->source;
        ireq->loc_addr          = ip_hdr(skb)->daddr;
        ireq->rmt_addr          = ip_hdr(skb)->saddr;
        ireq->opt               = NULL;
                                                .tos = RT_CONN_FLAGS(sk) } },
                                    .proto = IPPROTO_TCP,
                                    .uli_u = { .ports =
-                                              { .sport = skb->h.th->dest,
-                                                .dport = skb->h.th->source } } };
+                                              { .sport = th->dest,
+                                                .dport = th->source } } };
                security_req_classify_flow(req, &fl);
                if (ip_route_output_key(&rt, &fl)) {
                        reqsk_free(req);
 
                        /* Subtract 1, if FIN is in queue. */
                        if (answ && !skb_queue_empty(&sk->sk_receive_queue))
                                answ -=
-                      ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
+                      tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
                } else
                        answ = tp->urg_seq - tp->copied_seq;
                release_sock(sk);
 
        skb_queue_walk(&sk->sk_receive_queue, skb) {
                offset = seq - TCP_SKB_CB(skb)->seq;
-               if (skb->h.th->syn)
+               if (tcp_hdr(skb)->syn)
                        offset--;
-               if (offset < skb->len || skb->h.th->fin) {
+               if (offset < skb->len || tcp_hdr(skb)->fin) {
                        *off = offset;
                        return skb;
                }
                        if (offset != skb->len)
                                break;
                }
-               if (skb->h.th->fin) {
+               if (tcp_hdr(skb)->fin) {
                        sk_eat_skb(sk, skb, 0);
                        ++seq;
                        break;
                                break;
                        }
                        offset = *seq - TCP_SKB_CB(skb)->seq;
-                       if (skb->h.th->syn)
+                       if (tcp_hdr(skb)->syn)
                                offset--;
                        if (offset < skb->len)
                                goto found_ok_skb;
-                       if (skb->h.th->fin)
+                       if (tcp_hdr(skb)->fin)
                                goto found_fin_ok;
                        BUG_TRAP(flags & MSG_PEEK);
                        skb = skb->next;
                if (used + offset < skb->len)
                        continue;
 
-               if (skb->h.th->fin)
+               if (tcp_hdr(skb)->fin)
                        goto found_fin_ok;
                if (!(flags & MSG_PEEK)) {
                        sk_eat_skb(sk, skb, copied_early);
         */
        while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
                u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
-                         skb->h.th->fin;
+                         tcp_hdr(skb)->fin;
                data_was_unread += len;
                __kfree_skb(skb);
        }
        if (!pskb_may_pull(skb, sizeof(*th)))
                goto out;
 
-       th = skb->h.th;
+       th = tcp_hdr(skb);
        thlen = th->doff * 4;
        if (thlen < sizeof(*th))
                goto out;
        delta = htonl(oldlen + (thlen + len));
 
        skb = segs;
-       th = skb->h.th;
+       th = tcp_hdr(skb);
        seq = ntohl(th->seq);
 
        do {
 
                seq += len;
                skb = skb->next;
-               th = skb->h.th;
+               th = tcp_hdr(skb);
 
                th->seq = htonl(seq);
                th->cwr = 0;
 
                     * to handle super-low mtu links fairly.
                     */
                    (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
-                    !(tcp_flag_word(skb->h.th)&TCP_REMNANT))) {
+                    !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
                        /* Subtract also invariant (if peer is RFC compliant),
                         * tcp header plus fixed timestamp option length.
                         * Resulting "len" is MSS free of SACK jitter.
                                 struct sk_buff *skb, u32 ack, u32 ack_seq)
 {
        int flag = 0;
-       u32 nwin = ntohs(skb->h.th->window);
+       u32 nwin = ntohs(tcp_hdr(skb)->window);
 
-       if (likely(!skb->h.th->syn))
+       if (likely(!tcp_hdr(skb)->syn))
                nwin <<= tp->rx_opt.snd_wscale;
 
        if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
                if (TCP_SKB_CB(skb)->sacked)
                        flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
 
-               if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
+               if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
                        flag |= FLAG_ECE;
 
                tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
 void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
 {
        unsigned char *ptr;
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        int length=(th->doff*4)-sizeof(struct tcphdr);
 
        ptr = (unsigned char *)(th + 1);
 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        u32 seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
 
                __skb_unlink(skb, &tp->out_of_order_queue);
                __skb_queue_tail(&sk->sk_receive_queue, skb);
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               if (skb->h.th->fin)
-                       tcp_fin(skb, sk, skb->h.th);
+               if (tcp_hdr(skb)->fin)
+                       tcp_fin(skb, sk, tcp_hdr(skb));
        }
 }
 
 
 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        struct tcp_sock *tp = tcp_sk(sk);
        int eaten = -1;
 
                 * - bloated or contains data before "start" or
                 *   overlaps to the next one.
                 */
-               if (!skb->h.th->syn && !skb->h.th->fin &&
+               if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
                    (tcp_win_from_space(skb->truesize) > skb->len ||
                     before(TCP_SKB_CB(skb)->seq, start) ||
                     (skb->next != tail &&
                start = TCP_SKB_CB(skb)->end_seq;
                skb = skb->next;
        }
-       if (skb == tail || skb->h.th->syn || skb->h.th->fin)
+       if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
                return;
 
        while (before(start, end)) {
                                __kfree_skb(skb);
                                NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
                                skb = next;
-                               if (skb == tail || skb->h.th->syn || skb->h.th->fin)
+                               if (skb == tail ||
+                                   tcp_hdr(skb)->syn ||
+                                   tcp_hdr(skb)->fin)
                                        return;
                        }
                }
                tcp_rcv_space_adjust(sk);
 
                if ((tp->ucopy.len == 0) ||
-                   (tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) ||
+                   (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
                    (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
                        tp->ucopy.wakeup = 1;
                        sk->sk_data_ready(sk, 0);
 
 {
        return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
                                          ip_hdr(skb)->saddr,
-                                         skb->h.th->dest,
-                                         skb->h.th->source);
+                                         tcp_hdr(skb)->dest,
+                                         tcp_hdr(skb)->source);
 }
 
 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
 {
        struct inet_sock *inet = inet_sk(sk);
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                th->check = ~tcp_v4_check(len, inet->saddr,
                return -EINVAL;
 
        iph = ip_hdr(skb);
-       th = skb->h.th;
+       th = tcp_hdr(skb);
 
        th->check = 0;
        th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
 
 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        struct {
                struct tcphdr th;
 #ifdef CONFIG_TCP_MD5SIG
                            struct sk_buff *skb, u32 seq, u32 ack,
                            u32 win, u32 ts)
 {
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        struct {
                struct tcphdr th;
                __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
        skb = tcp_make_synack(sk, dst, req);
 
        if (skb) {
-               struct tcphdr *th = skb->h.th;
+               struct tcphdr *th = tcp_hdr(skb);
 
                th->check = tcp_v4_check(skb->len,
                                         ireq->loc_addr,
                warntime = jiffies;
                printk(KERN_INFO
                       "possible SYN flooding on port %d. Sending cookies.\n",
-                      ntohs(skb->h.th->dest));
+                      ntohs(tcp_hdr(skb)->dest));
        }
 }
 #endif
        __u8 *hash_location = NULL;
        struct tcp_md5sig_key *hash_expected;
        const struct iphdr *iph = ip_hdr(skb);
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        int length = (th->doff << 2) - sizeof(struct tcphdr);
        int genhash;
        unsigned char *ptr;
        ireq->rmt_addr = saddr;
        ireq->opt = tcp_v4_save_options(sk, skb);
        if (!want_cookie)
-               TCP_ECN_create_request(req, skb->h.th);
+               TCP_ECN_create_request(req, tcp_hdr(skb));
 
        if (want_cookie) {
 #ifdef CONFIG_SYN_COOKIES
                        LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
                                       "request from %u.%u.%u.%u/%u\n",
                                       NIPQUAD(saddr),
-                                      ntohs(skb->h.th->source));
+                                      ntohs(tcp_hdr(skb)->source));
                        dst_release(dst);
                        goto drop_and_free;
                }
 
 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        const struct iphdr *iph = ip_hdr(skb);
        struct sock *nsk;
        struct request_sock **prev;
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
                TCP_CHECK_TIMER(sk);
-               if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) {
+               if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
                        rsk = sk;
                        goto reset;
                }
        }
 
        TCP_CHECK_TIMER(sk);
-       if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) {
+       if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
                rsk = sk;
                goto reset;
        }
        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
                goto discard_it;
 
-       th = skb->h.th;
+       th = tcp_hdr(skb);
 
        if (th->doff < sizeof(struct tcphdr) / 4)
                goto bad_packet;
             tcp_v4_checksum_init(skb)))
                goto bad_packet;
 
-       th = skb->h.th;
+       th = tcp_hdr(skb);
        iph = ip_hdr(skb);
        TCP_SKB_CB(skb)->seq = ntohl(th->seq);
        TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
 
                        newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
                        newtp->window_clamp = min(newtp->window_clamp, 65535U);
                }
-               newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
+               newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
+                                 newtp->rx_opt.snd_wscale);
                newtp->max_window = newtp->snd_wnd;
 
                if (newtp->rx_opt.tstamp_ok) {
                           struct request_sock *req,
                           struct request_sock **prev)
 {
-       struct tcphdr *th = skb->h.th;
+       const struct tcphdr *th = tcp_hdr(skb);
        __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
        int paws_reject = 0;
        struct tcp_options_received tmp_opt;
        int state = child->sk_state;
 
        if (!sock_owned_by_user(child)) {
-               ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
-
+               ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
+                                           skb->len);
                /* Wakeup parent, send SIGIO */
                if (state == TCP_SYN_RECV && child->sk_state != state)
                        parent->sk_data_ready(parent, 0);
 
                tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
 #endif
 
-       th = (struct tcphdr *) skb_push(skb, tcp_header_size);
-       skb->h.th = th;
+       skb_push(skb, tcp_header_size);
+       skb_reset_transport_header(skb);
        skb_set_owner_w(skb, sk);
 
        /* Build TCP header and checksum it. */
+       th = tcp_hdr(skb);
        th->source              = inet->sport;
        th->dest                = inet->dport;
        th->seq                 = htonl(tcb->seq);
                tp->af_specific->calc_md5_hash(md5_hash_location,
                                               md5,
                                               sk, NULL, NULL,
-                                              skb->h.th,
+                                              tcp_hdr(skb),
                                               sk->sk_protocol,
                                               skb->len);
        }
        if (md5)
                tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
 #endif
-       skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
+       skb_push(skb, tcp_header_size);
+       skb_reset_transport_header(skb);
 
+       th = tcp_hdr(skb);
        memset(th, 0, sizeof(struct tcphdr));
        th->syn = 1;
        th->ack = 1;
                tp->af_specific->calc_md5_hash(md5_hash_location,
                                               md5,
                                               NULL, dst, req,
-                                              skb->h.th, sk->sk_protocol,
+                                              tcp_hdr(skb), sk->sk_protocol,
                                               skb->len);
        }
 #endif
 
 {
        return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
                                            ipv6_hdr(skb)->saddr.s6_addr32,
-                                           skb->h.th->dest,
-                                           skb->h.th->source);
+                                           tcp_hdr(skb)->dest,
+                                           tcp_hdr(skb)->source);
 }
 
 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        skb = tcp_make_synack(sk, dst, req);
        if (skb) {
-               struct tcphdr *th = skb->h.th;
+               struct tcphdr *th = tcp_hdr(skb);
 
                th->check = tcp_v6_check(th, skb->len,
                                         &treq->loc_addr, &treq->rmt_addr,
        __u8 *hash_location = NULL;
        struct tcp_md5sig_key *hash_expected;
        struct ipv6hdr *ip6h = ipv6_hdr(skb);
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        int length = (th->doff << 2) - sizeof (*th);
        int genhash;
        u8 *ptr;
 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,  0);
                return -EINVAL;
 
        ipv6h = ipv6_hdr(skb);
-       th = skb->h.th;
+       th = tcp_hdr(skb);
 
        th->check = 0;
        th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
 
 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcphdr *th = skb->h.th, *t1;
+       struct tcphdr *th = tcp_hdr(skb), *t1;
        struct sk_buff *buff;
        struct flowi fl;
        int tot_len = sizeof(*th);
 static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
                            struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
 {
-       struct tcphdr *th = skb->h.th, *t1;
+       struct tcphdr *th = tcp_hdr(skb), *t1;
        struct sk_buff *buff;
        struct flowi fl;
        int tot_len = sizeof(struct tcphdr);
 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
 {
        struct request_sock *req, **prev;
-       const struct tcphdr *th = skb->h.th;
+       const struct tcphdr *th = tcp_hdr(skb);
        struct sock *nsk;
 
        /* Find possible connection requests. */
        treq = inet6_rsk(req);
        ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
        ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
-       TCP_ECN_create_request(req, skb->h.th);
+       TCP_ECN_create_request(req, tcp_hdr(skb));
        treq->pktopts = NULL;
        if (ipv6_opt_accepted(sk, skb) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
 {
        if (skb->ip_summed == CHECKSUM_COMPLETE) {
-               if (!tcp_v6_check(skb->h.th, skb->len, &ipv6_hdr(skb)->saddr,
+               if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
                                  &ipv6_hdr(skb)->daddr, skb->csum)) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        return 0;
                }
        }
 
-       skb->csum = ~csum_unfold(tcp_v6_check(skb->h.th, skb->len,
+       skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
                                              &ipv6_hdr(skb)->saddr,
                                              &ipv6_hdr(skb)->daddr, 0));
 
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
                TCP_CHECK_TIMER(sk);
-               if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
+               if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
                        goto reset;
                TCP_CHECK_TIMER(sk);
                if (opt_skb)
        }
 
        TCP_CHECK_TIMER(sk);
-       if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
+       if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
                goto reset;
        TCP_CHECK_TIMER(sk);
        if (opt_skb)
        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
                goto discard_it;
 
-       th = skb->h.th;
+       th = tcp_hdr(skb);
 
        if (th->doff < sizeof(struct tcphdr)/4)
                goto bad_packet;
             tcp_v6_checksum_init(skb)))
                goto bad_packet;
 
-       th = skb->h.th;
+       th = tcp_hdr(skb);
        TCP_SKB_CB(skb)->seq = ntohl(th->seq);
        TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
                                    skb->len - th->doff*4);