]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
sched: incremental effective_load()
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Fri, 27 Jun 2008 11:41:38 +0000 (13:41 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 27 Jun 2008 12:31:47 +0000 (14:31 +0200)
Increase the accuracy of the effective_load values.

Not only consider the current increment (as per the attempted wakeup), but
also consider the delta between when we last adjusted the shares and the
current situation.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c
kernel/sched_fair.c

index 01d3e51b7116142d4b3165213eeeacb30378c864..7613f69f0978ed4b2b9fdcd4ce55b970212e0393 100644 (file)
@@ -427,6 +427,11 @@ struct cfs_rq {
         * this cpu's part of tg->shares
         */
        unsigned long shares;
+
+       /*
+        * load.weight at the time we set shares
+        */
+       unsigned long rq_weight;
 #endif
 #endif
 };
@@ -1527,6 +1532,7 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
         * record the actual number of shares, not the boosted amount.
         */
        tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
+       tg->cfs_rq[cpu]->rq_weight = rq_weight;
 
        if (shares < MIN_SHARES)
                shares = MIN_SHARES;
index bed2f71e63d9168318b1143326638ae43dad3aba..e87f1a52f625ce7304103d3f60e8ee1adc532079 100644 (file)
@@ -1074,10 +1074,22 @@ static inline int wake_idle(int cpu, struct task_struct *p)
 static const struct sched_class fair_sched_class;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static unsigned long effective_load(struct task_group *tg, int cpu,
-               unsigned long wl, unsigned long wg)
+static long effective_load(struct task_group *tg, int cpu,
+               long wl, long wg)
 {
        struct sched_entity *se = tg->se[cpu];
+       long more_w;
+
+       if (!tg->parent)
+               return wl;
+
+       /*
+        * Instead of using this increment, also add the difference
+        * between when the shares were last updated and now.
+        */
+       more_w = se->my_q->load.weight - se->my_q->rq_weight;
+       wl += more_w;
+       wg += more_w;
 
        for_each_sched_entity(se) {
 #define D(n) (likely(n) ? (n) : 1)
@@ -1086,7 +1098,7 @@ static unsigned long effective_load(struct task_group *tg, int cpu,
 
                S = se->my_q->tg->shares;
                s = se->my_q->shares;
-               rw = se->my_q->load.weight;
+               rw = se->my_q->rq_weight;
 
                a = S*(rw + wl);
                b = S*rw + s*wg;