]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
[TCP]: Limit processing lost_retrans loop to work-to-do cases
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Fri, 12 Oct 2007 00:36:13 +0000 (17:36 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 12 Oct 2007 00:36:13 +0000 (17:36 -0700)
This addition of lost_retrans_low to tcp_sock might be
unnecessary, it's not clear how often lost_retrans worker is
executed when there wasn't work to do.

Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/tcp.h
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c

index 9ff456e8d6c7f0d8bd6dfbdfdadba9f8279490b6..c5b94c1a5ee288885e639182a34484aa090ceeb1 100644 (file)
@@ -348,6 +348,8 @@ struct tcp_sock {
        int     lost_cnt_hint;
        int     retransmit_cnt_hint;
 
+       u32     lost_retrans_low;       /* Sent seq after any rxmit (lowest) */
+
        u16     advmss;         /* Advertised MSS                       */
        u16     prior_ssthresh; /* ssthresh saved at recovery start     */
        u32     lost_out;       /* Lost packets                 */
index d5e0fcc22a3be1ff1435bf84accfc81db4b0f6d1..0a42e934034678bfd57cd2936e7700e4ea1ebd1c 100644 (file)
@@ -1112,7 +1112,8 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
  *
  * Search retransmitted skbs from write_queue that were sent when snd_nxt was
  * less than what is now known to be received by the other end (derived from
- * SACK blocks by the caller).
+ * SACK blocks by the caller). Also calculate the lowest snd_nxt among the
+ * remaining retransmitted skbs to avoid some costly processing per ACKs.
  */
 static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
 {
@@ -1120,6 +1121,7 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
        struct sk_buff *skb;
        int flag = 0;
        int cnt = 0;
+       u32 new_low_seq = 0;
 
        tcp_for_write_queue(skb, sk) {
                u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
@@ -1151,9 +1153,15 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
                                NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
                        }
                } else {
+                       if (!new_low_seq || before(ack_seq, new_low_seq))
+                               new_low_seq = ack_seq;
                        cnt += tcp_skb_pcount(skb);
                }
        }
+
+       if (tp->retrans_out)
+               tp->lost_retrans_low = new_low_seq;
+
        return flag;
 }
 
@@ -1481,8 +1489,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                }
        }
 
-       if (tp->retrans_out && highest_sack_end_seq &&
-           after(highest_sack_end_seq, tp->high_seq) &&
+       if (tp->retrans_out &&
+           after(highest_sack_end_seq, tp->lost_retrans_low) &&
            icsk->icsk_ca_state == TCP_CA_Recovery)
                flag |= tcp_mark_lost_retrans(sk, highest_sack_end_seq);
 
index 53296753b0bd432f2e4689ac011dc0b4427f9b16..324b4207254ae411728dd8a3cc1740f5ac0ce267 100644 (file)
@@ -1914,6 +1914,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                                printk(KERN_DEBUG "retrans_out leaked.\n");
                }
 #endif
+               if (!tp->retrans_out)
+                       tp->lost_retrans_low = tp->snd_nxt;
                TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
                tp->retrans_out += tcp_skb_pcount(skb);