]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
sched: Create a helper function to calculate imbalance
authorGautham R Shenoy <ego@in.ibm.com>
Wed, 25 Mar 2009 09:14:12 +0000 (14:44 +0530)
committerIngo Molnar <mingo@elte.hu>
Wed, 25 Mar 2009 09:30:47 +0000 (10:30 +0100)
Move all the imbalance calculation out of find_busiest_group()
through this helper function.

With this change, the structure of find_busiest_group() will be
as follows:

- update_sched_domain_statistics.

- check if imbalance exits.

- update imbalance and return busiest.

Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Balbir Singh" <balbir@in.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com>
LKML-Reference: <20090325091411.13992.43293.stgit@sofia.in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c

index 540147e5e82b556e2fa752897270218136aa912e..934f615cccebeb5044455dcdfd0b0e9d57221b92 100644 (file)
@@ -3487,8 +3487,8 @@ group_next:
 
 /**
  * fix_small_imbalance - Calculate the minor imbalance that exists
- *                     amongst the groups of a sched_domain, during
- *                     load balancing.
+ *                     amongst the groups of a sched_domain, during
+ *                     load balancing.
  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
  * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
  * @imbalance: Variable to store the imbalance.
@@ -3549,6 +3549,47 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
        if (pwr_move > pwr_now)
                *imbalance = sds->busiest_load_per_task;
 }
+
+/**
+ * calculate_imbalance - Calculate the amount of imbalance present within the
+ *                      groups of a given sched_domain during load balance.
+ * @sds: statistics of the sched_domain whose imbalance is to be calculated.
+ * @this_cpu: Cpu for which currently load balance is being performed.
+ * @imbalance: The variable to store the imbalance.
+ */
+static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
+               unsigned long *imbalance)
+{
+       unsigned long max_pull;
+       /*
+        * In the presence of smp nice balancing, certain scenarios can have
+        * max load less than avg load(as we skip the groups at or below
+        * its cpu_power, while calculating max_load..)
+        */
+       if (sds->max_load < sds->avg_load) {
+               *imbalance = 0;
+               return fix_small_imbalance(sds, this_cpu, imbalance);
+       }
+
+       /* Don't want to pull so many tasks that a group would go idle */
+       max_pull = min(sds->max_load - sds->avg_load,
+                       sds->max_load - sds->busiest_load_per_task);
+
+       /* How much load to actually move to equalise the imbalance */
+       *imbalance = min(max_pull * sds->busiest->__cpu_power,
+               (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
+                       / SCHED_LOAD_SCALE;
+
+       /*
+        * if *imbalance is less than the average load per runnable task
+        * there is no gaurantee that any tasks will be moved so we'll have
+        * a think about bumping its value to force at least one task to be
+        * moved
+        */
+       if (*imbalance < sds->busiest_load_per_task)
+               return fix_small_imbalance(sds, this_cpu, imbalance);
+
+}
 /******* find_busiest_group() helpers end here *********************/
 
 /*
@@ -3562,7 +3603,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                   int *sd_idle, const struct cpumask *cpus, int *balance)
 {
        struct sd_lb_stats sds;
-       unsigned long max_pull;
 
        memset(&sds, 0, sizeof(sds));
 
@@ -3605,36 +3645,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
        if (sds.max_load <= sds.busiest_load_per_task)
                goto out_balanced;
 
-       /*
-        * In the presence of smp nice balancing, certain scenarios can have
-        * max load less than avg load(as we skip the groups at or below
-        * its cpu_power, while calculating max_load..)
-        */
-       if (sds.max_load < sds.avg_load) {
-               *imbalance = 0;
-               fix_small_imbalance(&sds, this_cpu, imbalance);
-               goto ret_busiest;
-       }
-
-       /* Don't want to pull so many tasks that a group would go idle */
-       max_pull = min(sds.max_load - sds.avg_load,
-                       sds.max_load - sds.busiest_load_per_task);
-
-       /* How much load to actually move to equalise the imbalance */
-       *imbalance = min(max_pull * sds.busiest->__cpu_power,
-                       (sds.avg_load - sds.this_load) * sds.this->__cpu_power)
-                       / SCHED_LOAD_SCALE;
-
-       /*
-        * if *imbalance is less than the average load per runnable task
-        * there is no gaurantee that any tasks will be moved so we'll have
-        * a think about bumping its value to force at least one task to be
-        * moved
-        */
-       if (*imbalance < sds.busiest_load_per_task)
-               fix_small_imbalance(&sds, this_cpu, imbalance);
-
-ret_busiest:
+       /* Looks like there is an imbalance. Compute it */
+       calculate_imbalance(&sds, this_cpu, imbalance);
        return sds.busiest;
 
 out_balanced: