]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'core/locking' into core/urgent
authorIngo Molnar <mingo@elte.hu>
Mon, 11 Aug 2008 22:11:49 +0000 (00:11 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 11 Aug 2008 22:11:49 +0000 (00:11 +0200)
1  2 
kernel/sched.c
kernel/workqueue.c
mm/mmap.c

diff --combined kernel/sched.c
index 04160d277e7aeafe5b34e58da1bcb5a786b1696d,9b2b6a85577d328c417692ba92f5c6ea8ca423d9..ace566bdfc680fff14c8ad5d9474aed45b15b7f5
@@@ -600,7 -600,6 +600,6 @@@ struct rq 
        /* BKL stats */
        unsigned int bkl_count;
  #endif
-       struct lock_class_key rq_lock_key;
  };
  
  static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@@@ -2759,10 -2758,10 +2758,10 @@@ static void double_rq_lock(struct rq *r
        } else {
                if (rq1 < rq2) {
                        spin_lock(&rq1->lock);
-                       spin_lock(&rq2->lock);
+                       spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
                } else {
                        spin_lock(&rq2->lock);
-                       spin_lock(&rq1->lock);
+                       spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
                }
        }
        update_rq_clock(rq1);
@@@ -2805,14 -2804,21 +2804,21 @@@ static int double_lock_balance(struct r
                if (busiest < this_rq) {
                        spin_unlock(&this_rq->lock);
                        spin_lock(&busiest->lock);
-                       spin_lock(&this_rq->lock);
+                       spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
                        ret = 1;
                } else
-                       spin_lock(&busiest->lock);
+                       spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
        }
        return ret;
  }
  
+ static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
+       __releases(busiest->lock)
+ {
+       spin_unlock(&busiest->lock);
+       lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
+ }
  /*
   * If dest_cpu is allowed for this process, migrate the task to it.
   * This is accomplished by forcing the cpu_allowed mask to only
@@@ -3637,7 -3643,7 +3643,7 @@@ redo
                ld_moved = move_tasks(this_rq, this_cpu, busiest,
                                        imbalance, sd, CPU_NEWLY_IDLE,
                                        &all_pinned);
-               spin_unlock(&busiest->lock);
+               double_unlock_balance(this_rq, busiest);
  
                if (unlikely(all_pinned)) {
                        cpu_clear(cpu_of(busiest), *cpus);
@@@ -3752,7 -3758,7 +3758,7 @@@ static void active_load_balance(struct 
                else
                        schedstat_inc(sd, alb_failed);
        }
-       spin_unlock(&target_rq->lock);
+       double_unlock_balance(busiest_rq, target_rq);
  }
  
  #ifdef CONFIG_NO_HZ
@@@ -5004,21 -5010,19 +5010,21 @@@ recheck
                        return -EPERM;
        }
  
 +      if (user) {
  #ifdef CONFIG_RT_GROUP_SCHED
 -      /*
 -       * Do not allow realtime tasks into groups that have no runtime
 -       * assigned.
 -       */
 -      if (user
 -          && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
 -              return -EPERM;
 +              /*
 +               * Do not allow realtime tasks into groups that have no runtime
 +               * assigned.
 +               */
 +              if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
 +                      return -EPERM;
  #endif
  
 -      retval = security_task_setscheduler(p, policy, param);
 -      if (retval)
 -              return retval;
 +              retval = security_task_setscheduler(p, policy, param);
 +              if (retval)
 +                      return retval;
 +      }
 +
        /*
         * make sure no PI-waiters arrive (or leave) while we are
         * changing the priority of the task:
@@@ -7673,34 -7677,34 +7679,34 @@@ static ssize_t sched_power_savings_stor
  }
  
  #ifdef CONFIG_SCHED_MC
 -static ssize_t sched_mc_power_savings_show(struct sys_device *dev,
 -                              struct sysdev_attribute *attr, char *page)
 +static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
 +                                         char *page)
  {
        return sprintf(page, "%u\n", sched_mc_power_savings);
  }
 -static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
 -                                          struct sysdev_attribute *attr,
 +static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
                                            const char *buf, size_t count)
  {
        return sched_power_savings_store(buf, count, 0);
  }
 -static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
 -                 sched_mc_power_savings_store);
 +static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
 +                       sched_mc_power_savings_show,
 +                       sched_mc_power_savings_store);
  #endif
  
  #ifdef CONFIG_SCHED_SMT
 -static ssize_t sched_smt_power_savings_show(struct sys_device *dev,
 -                              struct sysdev_attribute *attr, char *page)
 +static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
 +                                          char *page)
  {
        return sprintf(page, "%u\n", sched_smt_power_savings);
  }
 -static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
 -                                           struct sysdev_attribute *attr,
 +static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
                                             const char *buf, size_t count)
  {
        return sched_power_savings_store(buf, count, 1);
  }
 -static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
 +static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
 +                 sched_smt_power_savings_show,
                   sched_smt_power_savings_store);
  #endif
  
@@@ -8000,7 -8004,6 +8006,6 @@@ void __init sched_init(void
  
                rq = cpu_rq(i);
                spin_lock_init(&rq->lock);
-               lockdep_set_class(&rq->lock, &rq->rq_lock_key);
                rq->nr_running = 0;
                init_cfs_rq(&rq->cfs, rq);
                init_rt_rq(&rq->rt, rq);
diff --combined kernel/workqueue.c
index 4a26a1382df05febed4a94bce11be2bc2a46b648,8bb5b68fb3a910cb120af1b12c7189f892850765..4048e92aa04f21e7e105563fac7bf24e1be5411f
@@@ -290,11 -290,11 +290,11 @@@ static void run_workqueue(struct cpu_wo
  
                BUG_ON(get_wq_data(work) != cwq);
                work_clear_pending(work);
-               lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-               lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+               lock_map_acquire(&cwq->wq->lockdep_map);
+               lock_map_acquire(&lockdep_map);
                f(work);
-               lock_release(&lockdep_map, 1, _THIS_IP_);
-               lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+               lock_map_release(&lockdep_map);
+               lock_map_release(&cwq->wq->lockdep_map);
  
                if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
                        printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
@@@ -413,8 -413,8 +413,8 @@@ void flush_workqueue(struct workqueue_s
        int cpu;
  
        might_sleep();
-       lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-       lock_release(&wq->lockdep_map, 1, _THIS_IP_);
+       lock_map_acquire(&wq->lockdep_map);
+       lock_map_release(&wq->lockdep_map);
        for_each_cpu_mask_nr(cpu, *cpu_map)
                flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
  }
@@@ -441,8 -441,8 +441,8 @@@ int flush_work(struct work_struct *work
        if (!cwq)
                return 0;
  
-       lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-       lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+       lock_map_acquire(&cwq->wq->lockdep_map);
+       lock_map_release(&cwq->wq->lockdep_map);
  
        prev = NULL;
        spin_lock_irq(&cwq->lock);
@@@ -536,8 -536,8 +536,8 @@@ static void wait_on_work(struct work_st
  
        might_sleep();
  
-       lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-       lock_release(&work->lockdep_map, 1, _THIS_IP_);
+       lock_map_acquire(&work->lockdep_map);
+       lock_map_release(&work->lockdep_map);
  
        cwq = get_wq_data(work);
        if (!cwq)
@@@ -830,21 -830,10 +830,21 @@@ struct workqueue_struct *__create_workq
                start_workqueue_thread(cwq, -1);
        } else {
                cpu_maps_update_begin();
 +              /*
 +               * We must place this wq on list even if the code below fails.
 +               * cpu_down(cpu) can remove cpu from cpu_populated_map before
 +               * destroy_workqueue() takes the lock, in that case we leak
 +               * cwq[cpu]->thread.
 +               */
                spin_lock(&workqueue_lock);
                list_add(&wq->list, &workqueues);
                spin_unlock(&workqueue_lock);
 -
 +              /*
 +               * We must initialize cwqs for each possible cpu even if we
 +               * are going to call destroy_workqueue() finally. Otherwise
 +               * cpu_up() can hit the uninitialized cwq once we drop the
 +               * lock.
 +               */
                for_each_possible_cpu(cpu) {
                        cwq = init_cpu_workqueue(wq, cpu);
                        if (err || !cpu_online(cpu))
@@@ -872,8 -861,8 +872,8 @@@ static void cleanup_workqueue_thread(st
        if (cwq->thread == NULL)
                return;
  
-       lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-       lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+       lock_map_acquire(&cwq->wq->lockdep_map);
+       lock_map_release(&cwq->wq->lockdep_map);
  
        flush_cpu_workqueue(cwq);
        /*
diff --combined mm/mmap.c
index 971d0eda754a539d73d669fa8d91bd2459f10715,32a287b631d474b4436bf07f14517e1bcc707c0f..339cf5c4d5d8c3a82b04cf57fa6b973f5ad75408
+++ b/mm/mmap.c
@@@ -370,7 -370,7 +370,7 @@@ find_vma_prepare(struct mm_struct *mm, 
                if (vma_tmp->vm_end > addr) {
                        vma = vma_tmp;
                        if (vma_tmp->vm_start <= addr)
 -                              return vma;
 +                              break;
                        __rb_link = &__rb_parent->rb_left;
                } else {
                        rb_prev = __rb_parent;
@@@ -2273,14 -2273,14 +2273,14 @@@ int install_special_mapping(struct mm_s
  
  static DEFINE_MUTEX(mm_all_locks_mutex);
  
- static void vm_lock_anon_vma(struct anon_vma *anon_vma)
+ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
  {
        if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {
                /*
                 * The LSB of head.next can't change from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               spin_lock(&anon_vma->lock);
+               spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem);
                /*
                 * We can safely modify head.next after taking the
                 * anon_vma->lock. If some other vma in this mm shares
        }
  }
  
- static void vm_lock_mapping(struct address_space *mapping)
+ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
  {
        if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
                /*
                 */
                if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
                        BUG();
-               spin_lock(&mapping->i_mmap_lock);
+               spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem);
        }
  }
  
@@@ -2358,11 -2358,17 +2358,17 @@@ int mm_take_all_locks(struct mm_struct 
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (signal_pending(current))
                        goto out_unlock;
-               if (vma->anon_vma)
-                       vm_lock_anon_vma(vma->anon_vma);
                if (vma->vm_file && vma->vm_file->f_mapping)
-                       vm_lock_mapping(vma->vm_file->f_mapping);
+                       vm_lock_mapping(mm, vma->vm_file->f_mapping);
+       }
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               if (signal_pending(current))
+                       goto out_unlock;
+               if (vma->anon_vma)
+                       vm_lock_anon_vma(mm, vma->anon_vma);
        }
        ret = 0;
  
  out_unlock: