]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'linus' into sched/devel
authorIngo Molnar <mingo@elte.hu>
Tue, 26 Aug 2008 08:25:59 +0000 (10:25 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 26 Aug 2008 08:25:59 +0000 (10:25 +0200)
1  2 
kernel/sched.c

diff --combined kernel/sched.c
index 6a43c8942b0563550001b8f05409725e7a786526,9a1ddb84e26d56e7d6b283daab1edbad20ae2042..29e2ec0bd831c2b24414bef26e46f0a42ecb5ffc
@@@ -808,9 -808,9 +808,9 @@@ const_debug unsigned int sysctl_sched_n
  
  /*
   * ratelimit for updating the group shares.
-  * default: 0.5ms
+  * default: 0.25ms
   */
const_debug unsigned int sysctl_sched_shares_ratelimit = 500000;
unsigned int sysctl_sched_shares_ratelimit = 250000;
  
  /*
   * period over which we measure -rt task cpu usage in us.
@@@ -1921,8 -1921,11 +1921,8 @@@ unsigned long wait_task_inactive(struc
                running = task_running(rq, p);
                on_rq = p->se.on_rq;
                ncsw = 0;
 -              if (!match_state || p->state == match_state) {
 -                      ncsw = p->nivcsw + p->nvcsw;
 -                      if (unlikely(!ncsw))
 -                              ncsw = 1;
 -              }
 +              if (!match_state || p->state == match_state)
 +                      ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
                task_rq_unlock(rq, &flags);
  
                /*
@@@ -4596,7 -4599,10 +4596,7 @@@ do_wait_for_common(struct completion *x
                wait.flags |= WQ_FLAG_EXCLUSIVE;
                __add_wait_queue_tail(&x->wait, &wait);
                do {
 -                      if ((state == TASK_INTERRUPTIBLE &&
 -                           signal_pending(current)) ||
 -                          (state == TASK_KILLABLE &&
 -                           fatal_signal_pending(current))) {
 +                      if (signal_pending_state(state, current)) {
                                timeout = -ERESTARTSYS;
                                break;
                        }
@@@ -4663,6 -4669,52 +4663,52 @@@ int __sched wait_for_completion_killabl
  }
  EXPORT_SYMBOL(wait_for_completion_killable);
  
+ /**
+  *    try_wait_for_completion - try to decrement a completion without blocking
+  *    @x:     completion structure
+  *
+  *    Returns: 0 if a decrement cannot be done without blocking
+  *             1 if a decrement succeeded.
+  *
+  *    If a completion is being used as a counting completion,
+  *    attempt to decrement the counter without blocking. This
+  *    enables us to avoid waiting if the resource the completion
+  *    is protecting is not available.
+  */
+ bool try_wait_for_completion(struct completion *x)
+ {
+       int ret = 1;
+       spin_lock_irq(&x->wait.lock);
+       if (!x->done)
+               ret = 0;
+       else
+               x->done--;
+       spin_unlock_irq(&x->wait.lock);
+       return ret;
+ }
+ EXPORT_SYMBOL(try_wait_for_completion);
+ /**
+  *    completion_done - Test to see if a completion has any waiters
+  *    @x:     completion structure
+  *
+  *    Returns: 0 if there are waiters (wait_for_completion() in progress)
+  *             1 if there are no waiters.
+  *
+  */
+ bool completion_done(struct completion *x)
+ {
+       int ret = 1;
+       spin_lock_irq(&x->wait.lock);
+       if (!x->done)
+               ret = 0;
+       spin_unlock_irq(&x->wait.lock);
+       return ret;
+ }
+ EXPORT_SYMBOL(completion_done);
  static long __sched
  sleep_on_common(wait_queue_head_t *q, int state, long timeout)
  {
@@@ -5734,6 -5786,8 +5780,8 @@@ static inline void sched_init_granulari
                sysctl_sched_latency = limit;
  
        sysctl_sched_wakeup_granularity *= factor;
+       sysctl_sched_shares_ratelimit *= factor;
  }
  
  #ifdef CONFIG_SMP
@@@ -8456,8 -8510,8 +8504,8 @@@ struct task_group *sched_create_group(s
        WARN_ON(!parent); /* root should already exist */
  
        tg->parent = parent;
-       list_add_rcu(&tg->siblings, &parent->children);
        INIT_LIST_HEAD(&tg->children);
+       list_add_rcu(&tg->siblings, &parent->children);
        spin_unlock_irqrestore(&task_group_lock, flags);
  
        return tg;