]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched.c
sched: fix SMP migration latencies
[linux-2.6-omap-h63xx.git] / kernel / sched.c
index bf85b4b281c5f8d94641d46b49d0f8d57fa0dc0f..4ad789d268fe4959d7a24789f06a0c711af516a1 100644 (file)
@@ -171,21 +171,13 @@ struct rt_prio_array {
        struct list_head queue[MAX_RT_PRIO];
 };
 
-struct load_stat {
-       struct load_weight load;
-};
-
 /* CFS-related fields in a runqueue */
 struct cfs_rq {
        struct load_weight load;
        unsigned long nr_running;
 
-       s64 fair_clock;
        u64 exec_clock;
        u64 min_vruntime;
-       s64 wait_runtime;
-       u64 sleeper_bonus;
-       unsigned long wait_runtime_overruns, wait_runtime_underruns;
 
        struct rb_root tasks_timeline;
        struct rb_node *rb_leftmost;
@@ -236,7 +228,7 @@ struct rq {
 #ifdef CONFIG_NO_HZ
        unsigned char in_nohz_recently;
 #endif
-       struct load_stat ls;    /* capture load from *all* tasks on this cpu */
+       struct load_weight load;        /* capture load from *all* tasks on this cpu */
        unsigned long nr_load_updates;
        u64 nr_switches;
 
@@ -394,19 +386,17 @@ static void update_rq_clock(struct rq *rq)
  * Debugging: various feature bits
  */
 enum {
-       SCHED_FEAT_FAIR_SLEEPERS        = 1,
-       SCHED_FEAT_NEW_FAIR_SLEEPERS    = 2,
-       SCHED_FEAT_SLEEPER_AVG          = 4,
-       SCHED_FEAT_SLEEPER_LOAD_AVG     = 8,
-       SCHED_FEAT_START_DEBIT          = 16,
+       SCHED_FEAT_NEW_FAIR_SLEEPERS    = 1,
+       SCHED_FEAT_START_DEBIT          = 2,
+       SCHED_FEAT_USE_TREE_AVG         = 4,
+       SCHED_FEAT_APPROX_AVG           = 8,
 };
 
 const_debug unsigned int sysctl_sched_features =
-               SCHED_FEAT_FAIR_SLEEPERS        *0 |
                SCHED_FEAT_NEW_FAIR_SLEEPERS    *1 |
-               SCHED_FEAT_SLEEPER_AVG          *0 |
-               SCHED_FEAT_SLEEPER_LOAD_AVG     *1 |
-               SCHED_FEAT_START_DEBIT          *1;
+               SCHED_FEAT_START_DEBIT          *1 |
+               SCHED_FEAT_USE_TREE_AVG         *0 |
+               SCHED_FEAT_APPROX_AVG           *0;
 
 #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
 
@@ -673,19 +663,6 @@ static inline void resched_task(struct task_struct *p)
 }
 #endif
 
-static u64 div64_likely32(u64 divident, unsigned long divisor)
-{
-#if BITS_PER_LONG == 32
-       if (likely(divident <= 0xffffffffULL))
-               return (u32)divident / divisor;
-       do_div(divident, divisor);
-
-       return divident;
-#else
-       return divident / divisor;
-#endif
-}
-
 #if BITS_PER_LONG == 32
 # define WMULT_CONST   (~0UL)
 #else
@@ -730,15 +707,11 @@ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
 {
        lw->weight += inc;
-       if (sched_feat(FAIR_SLEEPERS))
-               lw->inv_weight = WMULT_CONST / lw->weight;
 }
 
 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
 {
        lw->weight -= dec;
-       if (sched_feat(FAIR_SLEEPERS) && likely(lw->weight))
-               lw->inv_weight = WMULT_CONST / lw->weight;
 }
 
 /*
@@ -827,7 +800,7 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  * Update delta_exec, delta_fair fields for rq.
  *
  * delta_fair clock advances at a rate inversely proportional to
- * total load (rq->ls.load.weight) on the runqueue, while
+ * total load (rq->load.weight) on the runqueue, while
  * delta_exec advances at the same rate as wall-clock (provided
  * cpu is not idle).
  *
@@ -835,17 +808,17 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  * runqueue over any given interval. This (smoothened) load is used
  * during load balance.
  *
- * This function is called /before/ updating rq->ls.load
+ * This function is called /before/ updating rq->load
  * and when switching tasks.
  */
 static inline void inc_load(struct rq *rq, const struct task_struct *p)
 {
-       update_load_add(&rq->ls.load, p->se.load.weight);
+       update_load_add(&rq->load, p->se.load.weight);
 }
 
 static inline void dec_load(struct rq *rq, const struct task_struct *p)
 {
-       update_load_sub(&rq->ls.load, p->se.load.weight);
+       update_load_sub(&rq->load, p->se.load.weight);
 }
 
 static void inc_nr_running(struct task_struct *p, struct rq *rq)
@@ -862,8 +835,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq)
 
 static void set_load_weight(struct task_struct *p)
 {
-       p->se.wait_runtime = 0;
-
        if (task_has_rt_policy(p)) {
                p->se.load.weight = prio_to_weight[0] * 2;
                p->se.load.inv_weight = prio_to_wmult[0] >> 1;
@@ -992,7 +963,7 @@ inline int task_curr(const struct task_struct *p)
 /* Used instead of source_load when we know the type == 0 */
 unsigned long weighted_cpuload(const int cpu)
 {
-       return cpu_rq(cpu)->ls.load.weight;
+       return cpu_rq(cpu)->load.weight;
 }
 
 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
@@ -1009,15 +980,9 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
        int old_cpu = task_cpu(p);
        struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
-       u64 clock_offset, fair_clock_offset;
+       u64 clock_offset;
 
        clock_offset = old_rq->clock - new_rq->clock;
-       fair_clock_offset = old_rq->cfs.fair_clock - new_rq->cfs.fair_clock;
-
-       if (p->se.wait_start_fair)
-               p->se.wait_start_fair -= fair_clock_offset;
-       if (p->se.sleep_start_fair)
-               p->se.sleep_start_fair -= fair_clock_offset;
 
 #ifdef CONFIG_SCHEDSTATS
        if (p->se.wait_start)
@@ -1027,6 +992,9 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
        if (p->se.block_start)
                p->se.block_start -= clock_offset;
 #endif
+       if (likely(new_rq->cfs.min_vruntime))
+               p->se.vruntime -= old_rq->cfs.min_vruntime -
+                                               new_rq->cfs.min_vruntime;
 
        __set_task_cpu(p, new_cpu);
 }
@@ -1587,16 +1555,12 @@ int fastcall wake_up_state(struct task_struct *p, unsigned int state)
  */
 static void __sched_fork(struct task_struct *p)
 {
-       p->se.wait_start_fair           = 0;
        p->se.exec_start                = 0;
        p->se.sum_exec_runtime          = 0;
        p->se.prev_sum_exec_runtime     = 0;
-       p->se.wait_runtime              = 0;
-       p->se.sleep_start_fair          = 0;
 
 #ifdef CONFIG_SCHEDSTATS
        p->se.wait_start                = 0;
-       p->se.sum_wait_runtime          = 0;
        p->se.sum_sleep_runtime         = 0;
        p->se.sleep_start               = 0;
        p->se.block_start               = 0;
@@ -1605,8 +1569,6 @@ static void __sched_fork(struct task_struct *p)
        p->se.exec_max                  = 0;
        p->se.slice_max                 = 0;
        p->se.wait_max                  = 0;
-       p->se.wait_runtime_overruns     = 0;
-       p->se.wait_runtime_underruns    = 0;
 #endif
 
        INIT_LIST_HEAD(&p->run_list);
@@ -1975,7 +1937,7 @@ unsigned long nr_active(void)
  */
 static void update_cpu_load(struct rq *this_rq)
 {
-       unsigned long this_load = this_rq->ls.load.weight;
+       unsigned long this_load = this_rq->load.weight;
        int i, scale;
 
        this_rq->nr_load_updates++;
@@ -6453,7 +6415,6 @@ int in_sched_functions(unsigned long addr)
 static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
 {
        cfs_rq->tasks_timeline = RB_ROOT;
-       cfs_rq->fair_clock = 1;
 #ifdef CONFIG_FAIR_GROUP_SCHED
        cfs_rq->rq = rq;
 #endif
@@ -6579,16 +6540,12 @@ void normalize_rt_tasks(void)
        read_lock_irq(&tasklist_lock);
        do_each_thread(g, p) {
                p->se.fair_key                  = 0;
-               p->se.wait_runtime              = 0;
                p->se.exec_start                = 0;
-               p->se.wait_start_fair           = 0;
-               p->se.sleep_start_fair          = 0;
 #ifdef CONFIG_SCHEDSTATS
                p->se.wait_start                = 0;
                p->se.sleep_start               = 0;
                p->se.block_start               = 0;
 #endif
-               task_rq(p)->cfs.fair_clock      = 0;
                task_rq(p)->clock               = 0;
 
                if (!rt_task(p)) {