#define NETIF_F_TSO            (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
 #define NETIF_F_UFO            (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT)
 #define NETIF_F_GSO_ROBUST     (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
+#define NETIF_F_TSO_ECN                (SKB_GSO_TCPV4_ECN << NETIF_F_GSO_SHIFT)
 
 #define NETIF_F_GEN_CSUM       (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
 #define NETIF_F_ALL_CSUM       (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
 
 
        /* This indicates the skb is from an untrusted source. */
        SKB_GSO_DODGY = 1 << 2,
+
+       /* This indicates the tcp segment has CWR set. */
+       SKB_GSO_TCPV4_ECN = 1 << 3,
 };
 
 /** 
 
        SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
        SOCK_DBG, /* %SO_DEBUG setting */
        SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
-       SOCK_NO_LARGESEND, /* whether to sent large segments or not */
        SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
        SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
 };
        if (sk->sk_route_caps & NETIF_F_GSO)
                sk->sk_route_caps |= NETIF_F_TSO;
        if (sk->sk_route_caps & NETIF_F_TSO) {
-               if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
+               if (dst->header_len)
                        sk->sk_route_caps &= ~NETIF_F_TSO;
                else 
                        sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
 
                                    struct sk_buff *skb)
 {
        tp->ecn_flags = 0;
-       if (sysctl_tcp_ecn && !(sk->sk_route_caps & NETIF_F_TSO)) {
+       if (sysctl_tcp_ecn) {
                TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
                tp->ecn_flags = TCP_ECN_OK;
-               sock_set_flag(sk, SOCK_NO_LARGESEND);
        }
 }
 
                        if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
                                tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
                                skb->h.th->cwr = 1;
+                               if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+                                       skb_shinfo(skb)->gso_type |=
+                                               SKB_GSO_TCPV4_ECN;
                        }
                } else {
                        /* ACK or retransmitted segment: clear ECT|CE */
 
                 */
 
                TCP_ECN_rcv_synack(tp, th);
-               if (tp->ecn_flags&TCP_ECN_OK)
-                       sock_set_flag(sk, SOCK_NO_LARGESEND);
 
                tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
                tcp_ack(sk, skb, FLAG_SLOWPATH);
                tp->max_window = tp->snd_wnd;
 
                TCP_ECN_rcv_syn(tp, th);
-               if (tp->ecn_flags&TCP_ECN_OK)
-                       sock_set_flag(sk, SOCK_NO_LARGESEND);
 
                tcp_mtup_init(sk);
                tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
 
                        newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
                newtp->rx_opt.mss_clamp = req->mss;
                TCP_ECN_openreq_child(newtp, req);
-               if (newtp->ecn_flags&TCP_ECN_OK)
-                       sock_set_flag(newsk, SOCK_NO_LARGESEND);
 
                TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
        }
 
        memset(th, 0, sizeof(struct tcphdr));
        th->syn = 1;
        th->ack = 1;
-       if (dst->dev->features&NETIF_F_TSO)
-               ireq->ecn_ok = 0;
        TCP_ECN_make_synack(req, th);
        th->source = inet_sk(sk)->sport;
        th->dest = ireq->rmt_port;