]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched.c
sched: fix memory leak in a failure path
[linux-2.6-omap-h63xx.git] / kernel / sched.c
index d906f72b42d23ae1d8c2355d9b605e5fd0761eaa..57c933ffbee1aab4794c30f3690b8188e4ddf02a 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/cpuset.h>
 #include <linux/percpu.h>
 #include <linux/kthread.h>
+#include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/sysctl.h>
 #include <linux/syscalls.h>
@@ -227,9 +228,8 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
 
                now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
                hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
-               hrtimer_start(&rt_b->rt_period_timer,
-                             rt_b->rt_period_timer.expires,
-                             HRTIMER_MODE_ABS);
+               hrtimer_start_expires(&rt_b->rt_period_timer,
+                               HRTIMER_MODE_ABS);
        }
        spin_unlock(&rt_b->rt_runtime_lock);
 }
@@ -386,7 +386,6 @@ struct cfs_rq {
 
        u64 exec_clock;
        u64 min_vruntime;
-       u64 pair_start;
 
        struct rb_root tasks_timeline;
        struct rb_node *rb_leftmost;
@@ -398,7 +397,7 @@ struct cfs_rq {
         * 'curr' points to currently running entity on this cfs_rq.
         * It is set to NULL otherwise (i.e when none are currently running).
         */
-       struct sched_entity *curr, *next;
+       struct sched_entity *curr, *next, *last;
 
        unsigned long nr_spread_over;
 
@@ -818,6 +817,13 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
  */
 unsigned int sysctl_sched_shares_ratelimit = 250000;
 
+/*
+ * Inject some fuzzyness into changing the per-cpu group shares
+ * this avoids remote rq-locks at the expense of fairness.
+ * default: 4
+ */
+unsigned int sysctl_sched_shares_thresh = 4;
+
 /*
  * period over which we measure -rt task cpu usage in us.
  * default: 1s
@@ -1064,7 +1070,7 @@ static void hrtick_start(struct rq *rq, u64 delay)
        struct hrtimer *timer = &rq->hrtick_timer;
        ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
 
-       timer->expires = time;
+       hrtimer_set_expires(timer, time);
 
        if (rq == this_rq()) {
                hrtimer_restart(timer);
@@ -1454,8 +1460,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
  * Calculate and set the cpu's group shares.
  */
 static void
-__update_group_shares_cpu(struct task_group *tg, int cpu,
-                         unsigned long sd_shares, unsigned long sd_rq_weight)
+update_group_shares_cpu(struct task_group *tg, int cpu,
+                       unsigned long sd_shares, unsigned long sd_rq_weight)
 {
        int boost = 0;
        unsigned long shares;
@@ -1486,19 +1492,23 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
         *
         */
        shares = (sd_shares * rq_weight) / (sd_rq_weight + 1);
+       shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
 
-       /*
-        * record the actual number of shares, not the boosted amount.
-        */
-       tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
-       tg->cfs_rq[cpu]->rq_weight = rq_weight;
+       if (abs(shares - tg->se[cpu]->load.weight) >
+                       sysctl_sched_shares_thresh) {
+               struct rq *rq = cpu_rq(cpu);
+               unsigned long flags;
 
-       if (shares < MIN_SHARES)
-               shares = MIN_SHARES;
-       else if (shares > MAX_SHARES)
-               shares = MAX_SHARES;
+               spin_lock_irqsave(&rq->lock, flags);
+               /*
+                * record the actual number of shares, not the boosted amount.
+                */
+               tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
+               tg->cfs_rq[cpu]->rq_weight = rq_weight;
 
-       __set_se_shares(tg->se[cpu], shares);
+               __set_se_shares(tg->se[cpu], shares);
+               spin_unlock_irqrestore(&rq->lock, flags);
+       }
 }
 
 /*
@@ -1527,14 +1537,8 @@ static int tg_shares_up(struct task_group *tg, void *data)
        if (!rq_weight)
                rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
 
-       for_each_cpu_mask(i, sd->span) {
-               struct rq *rq = cpu_rq(i);
-               unsigned long flags;
-
-               spin_lock_irqsave(&rq->lock, flags);
-               __update_group_shares_cpu(tg, i, shares, rq_weight);
-               spin_unlock_irqrestore(&rq->lock, flags);
-       }
+       for_each_cpu_mask(i, sd->span)
+               update_group_shares_cpu(tg, i, shares, rq_weight);
 
        return 0;
 }
@@ -1801,7 +1805,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
        /*
         * Buddy candidates are cache hot:
         */
-       if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next))
+       if (sched_feat(CACHE_HOT_BUDDY) &&
+                       (&p->se == cfs_rq_of(&p->se)->next ||
+                        &p->se == cfs_rq_of(&p->se)->last))
                return 1;
 
        if (p->sched_class != &fair_sched_class)
@@ -3339,7 +3345,7 @@ small_imbalance:
                } else
                        this_load_per_task = cpu_avg_load_per_task(this_cpu);
 
-               if (max_load - this_load + 2*busiest_load_per_task >=
+               if (max_load - this_load + busiest_load_per_task >=
                                        busiest_load_per_task * imbn) {
                        *imbalance = busiest_load_per_task;
                        return busiest;
@@ -4443,12 +4449,8 @@ need_resched_nonpreemptible:
        if (sched_feat(HRTICK))
                hrtick_clear(rq);
 
-       /*
-        * Do the rq-clock update outside the rq lock:
-        */
-       local_irq_disable();
+       spin_lock_irq(&rq->lock);
        update_rq_clock(rq);
-       spin_lock(&rq->lock);
        clear_tsk_need_resched(prev);
 
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
@@ -6875,15 +6877,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
        struct sched_domain *tmp;
 
        /* Remove the sched domains which do not contribute to scheduling. */
-       for (tmp = sd; tmp; tmp = tmp->parent) {
+       for (tmp = sd; tmp; ) {
                struct sched_domain *parent = tmp->parent;
                if (!parent)
                        break;
+
                if (sd_parent_degenerate(tmp, parent)) {
                        tmp->parent = parent->parent;
                        if (parent->parent)
                                parent->parent->child = tmp;
-               }
+               } else
+                       tmp = tmp->parent;
        }
 
        if (sd && sd_degenerate(sd)) {
@@ -7672,6 +7676,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
 error:
        free_sched_groups(cpu_map, tmpmask);
        SCHED_CPUMASK_FREE((void *)allmasks);
+       kfree(rd);
        return -ENOMEM;
 #endif
 }