]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched_fair.c
sched: minor fast-path overhead reduction
[linux-2.6-omap-h63xx.git] / kernel / sched_fair.c
index e3f3c10f70336d700a12560ba3e8e586428a4ef7..67084936b6029037448aba1bdacefaf6b381d99c 100644 (file)
@@ -528,11 +528,12 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
        update_load_add(&cfs_rq->load, se->load.weight);
        if (!parent_entity(se))
                inc_cpu_load(rq_of(cfs_rq), se->load.weight);
-       if (entity_is_task(se))
+       if (entity_is_task(se)) {
                add_cfs_task_weight(cfs_rq, se->load.weight);
+               list_add(&se->group_node, &cfs_rq->tasks);
+       }
        cfs_rq->nr_running++;
        se->on_rq = 1;
-       list_add(&se->group_node, &cfs_rq->tasks);
 }
 
 static void
@@ -541,11 +542,12 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
        update_load_sub(&cfs_rq->load, se->load.weight);
        if (!parent_entity(se))
                dec_cpu_load(rq_of(cfs_rq), se->load.weight);
-       if (entity_is_task(se))
+       if (entity_is_task(se)) {
                add_cfs_task_weight(cfs_rq, -se->load.weight);
+               list_del_init(&se->group_node);
+       }
        cfs_rq->nr_running--;
        se->on_rq = 0;
-       list_del_init(&se->group_node);
 }
 
 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -745,7 +747,7 @@ pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
        struct rq *rq = rq_of(cfs_rq);
        u64 pair_slice = rq->clock - cfs_rq->pair_start;
 
-       if (!cfs_rq->next || pair_slice > sched_slice(cfs_rq, cfs_rq->next)) {
+       if (!cfs_rq->next || pair_slice > sysctl_sched_min_granularity) {
                cfs_rq->pair_start = rq->clock;
                return se;
        }
@@ -1086,7 +1088,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
 #endif
 
 static int
-wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
+wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
            struct task_struct *p, int prev_cpu, int this_cpu, int sync,
            int idx, unsigned long load, unsigned long this_load,
            unsigned int imbalance)
@@ -1101,6 +1103,11 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
        if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
                return 0;
 
+       if (!sync && sched_feat(SYNC_WAKEUPS) &&
+           curr->se.avg_overlap < sysctl_sched_migration_cost &&
+           p->se.avg_overlap < sysctl_sched_migration_cost)
+               sync = 1;
+
        /*
         * If sync wakeup then subtract the (maximum possible)
         * effect of the currently running task from the load
@@ -1125,17 +1132,14 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
         * a reasonable amount of time then attract this newly
         * woken task:
         */
-       if (sync && balanced) {
-               if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
-                   p->se.avg_overlap < sysctl_sched_migration_cost)
-                       return 1;
-       }
+       if (sync && balanced)
+               return 1;
 
        schedstat_inc(p, se.nr_wakeups_affine_attempts);
        tl_per_task = cpu_avg_load_per_task(this_cpu);
 
-       if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
-                       balanced) {
+       if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <=
+                       tl_per_task)) {
                /*
                 * This domain has SD_WAKE_AFFINE and
                 * p is cache cold in this domain, and
@@ -1154,16 +1158,17 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
        struct sched_domain *sd, *this_sd = NULL;
        int prev_cpu, this_cpu, new_cpu;
        unsigned long load, this_load;
-       struct rq *rq, *this_rq;
+       struct rq *this_rq;
        unsigned int imbalance;
        int idx;
 
        prev_cpu        = task_cpu(p);
-       rq              = task_rq(p);
        this_cpu        = smp_processor_id();
        this_rq         = cpu_rq(this_cpu);
        new_cpu         = prev_cpu;
 
+       if (prev_cpu == this_cpu)
+               goto out;
        /*
         * 'this_sd' is the first domain that both
         * this_cpu and prev_cpu are present in:
@@ -1191,13 +1196,10 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
        load = source_load(prev_cpu, idx);
        this_load = target_load(this_cpu, idx);
 
-       if (wake_affine(rq, this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
+       if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
                                     load, this_load, imbalance))
                return this_cpu;
 
-       if (prev_cpu == this_cpu)
-               goto out;
-
        /*
         * Start passive balancing when half the imbalance_pct
         * limit is reached.
@@ -1268,9 +1270,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
        if (!sched_feat(WAKEUP_PREEMPT))
                return;
 
-       if (sched_feat(WAKEUP_OVERLAP) && sync &&
-                       se->avg_overlap < sysctl_sched_migration_cost &&
-                       pse->avg_overlap < sysctl_sched_migration_cost) {
+       if (sched_feat(WAKEUP_OVERLAP) && (sync ||
+                       (se->avg_overlap < sysctl_sched_migration_cost &&
+                        pse->avg_overlap < sysctl_sched_migration_cost))) {
                resched_task(curr);
                return;
        }
@@ -1335,19 +1337,9 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
        if (next == &cfs_rq->tasks)
                return NULL;
 
-       /* Skip over entities that are not tasks */
-       do {
-               se = list_entry(next, struct sched_entity, group_node);
-               next = next->next;
-       } while (next != &cfs_rq->tasks && !entity_is_task(se));
-
-       if (next == &cfs_rq->tasks && !entity_is_task(se))
-               return NULL;
-
-       cfs_rq->balance_iterator = next;
-
-       if (entity_is_task(se))
-               p = task_of(se);
+       se = list_entry(next, struct sched_entity, group_node);
+       p = task_of(se);
+       cfs_rq->balance_iterator = next->next;
 
        return p;
 }