I noticed in oprofile study a cache miss in tcp_rcv_established() to read
copied_seq.
ffffffff80400a80 <tcp_rcv_established>: /* tcp_rcv_established total: 
4034293  
2.0400 */
 55493  0.0281 :
ffffffff80400bc9:   mov    0x4c8(%r12),%eax copied_seq
543103  0.2746 :
ffffffff80400bd1:   cmp    0x3e0(%r12),%eax   rcv_nxt    
if (tp->copied_seq == tp->rcv_nxt &&
        len - tcp_header_len <= tp->ucopy.len) {
In this function, the cache line 0x4c0 -> 0x500 is used only for this
reading 'copied_seq' field.
rcv_wup and copied_seq should be next to rcv_nxt field, to lower number of
active cache lines in hot paths. (tcp_rcv_established(), tcp_poll(), ...)
As you suggested, I changed tcp_create_openreq_child() so that these fields
are changed together, to avoid adding a new store buffer stall.
Patch is 64bit friendly (no new hole because of alignment constraints)
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  *     See RFC793 and RFC1122. The RFC writes these in capitals.
  */
        u32     rcv_nxt;        /* What we want to receive next         */
+       u32     copied_seq;     /* Head of yet unread data              */
+       u32     rcv_wup;        /* rcv_nxt on last window update sent   */
        u32     snd_nxt;        /* Next sequence we send                */
 
        u32     snd_una;        /* First byte we want an ack for        */
        struct sk_buff_head     out_of_order_queue; /* Out of order segments go here */
 
        u32     rcv_wnd;        /* Current receiver window              */
-       u32     rcv_wup;        /* rcv_nxt on last window update sent   */
        u32     write_seq;      /* Tail(+1) of data held in tcp send buffer */
        u32     pushed_seq;     /* Last pushed seq, required to talk to windows */
-       u32     copied_seq;     /* Head of yet unread data              */
 
 /*     SACKs data      */
        struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
 
                /* Now setup tcp_sock */
                newtp = tcp_sk(newsk);
                newtp->pred_flags = 0;
-               newtp->rcv_nxt = treq->rcv_isn + 1;
-               newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1;
+               newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1;
+               newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1;
 
                tcp_prequeue_init(newtp);
 
                tcp_set_ca_state(newsk, TCP_CA_Open);
                tcp_init_xmit_timers(newsk);
                skb_queue_head_init(&newtp->out_of_order_queue);
-               newtp->rcv_wup = treq->rcv_isn + 1;
                newtp->write_seq = treq->snt_isn + 1;
                newtp->pushed_seq = newtp->write_seq;
-               newtp->copied_seq = treq->rcv_isn + 1;
 
                newtp->rx_opt.saw_tstamp = 0;