]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
sched: fix typo in the FAIR_GROUP_SCHED branch
authorIngo Molnar <mingo@elte.hu>
Fri, 10 Aug 2007 21:05:11 +0000 (23:05 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 10 Aug 2007 21:05:11 +0000 (23:05 +0200)
while there's no in-tree way to turn group scheduling at the moment,
fix a typo in it nevertheless.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_fair.c

index e91db32cadfd63538d4977eb705a0fe142e51353..c5af38948a1e8c16fda75ddb95b65d5b22e1387a 100644 (file)
@@ -959,13 +959,12 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
        for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
 #ifdef CONFIG_FAIR_GROUP_SCHED
                struct cfs_rq *this_cfs_rq;
-               long imbalances;
+               long imbalance;
                unsigned long maxload;
 
                this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
 
-               imbalance = busy_cfs_rq->load.weight -
-                                                this_cfs_rq->load.weight;
+               imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
                /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
                if (imbalance <= 0)
                        continue;
@@ -976,7 +975,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
 
                *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
 #else
-#define maxload rem_load_move
+# define maxload rem_load_move
 #endif
                /* pass busy_cfs_rq argument into
                 * load_balance_[start|next]_fair iterators