]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 26 Jan 2009 17:47:43 +0000 (09:47 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 26 Jan 2009 17:47:43 +0000 (09:47 -0800)
* 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  hrtimers: fix inconsistent lock state on resume in hres_timers_resume
  time-sched.c: tick_nohz_update_jiffies should be static
  locking, hpet: annotate false positive warning
  kernel/fork.c: unused variable 'ret'
  itimers: remove the per-cpu-ish-ness

arch/x86/kernel/hpet.c
include/linux/init_task.h
include/linux/sched.h
include/linux/workqueue.h
kernel/fork.c
kernel/hrtimer.c
kernel/posix-cpu-timers.c
kernel/sched_stats.h
kernel/time/tick-sched.c

index cd759ad90690e72d109aed4309adeb87755977e5..bb2e0f0975ae67d0a903f5542b4822595bda4ded 100644 (file)
@@ -628,7 +628,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
 
        switch (action & 0xf) {
        case CPU_ONLINE:
-               INIT_DELAYED_WORK(&work.work, hpet_work);
+               INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work);
                init_completion(&work.complete);
                /* FIXME: add schedule_work_on() */
                schedule_delayed_work_on(cpu, &work.work, 0);
index 2f3c2d4ef73b1b0df8b4e96998a9021480d5706e..ea0ea1a4c36fa0c36f2a0db8be447a2d8b6ce817 100644 (file)
@@ -48,6 +48,12 @@ extern struct fs_struct init_fs;
        .posix_timers    = LIST_HEAD_INIT(sig.posix_timers),            \
        .cpu_timers     = INIT_CPU_TIMERS(sig.cpu_timers),              \
        .rlim           = INIT_RLIMITS,                                 \
+       .cputime        = { .totals = {                                 \
+               .utime = cputime_zero,                                  \
+               .stime = cputime_zero,                                  \
+               .sum_exec_runtime = 0,                                  \
+               .lock = __SPIN_LOCK_UNLOCKED(sig.cputime.totals.lock),  \
+       }, },                                                           \
 }
 
 extern struct nsproxy init_nsproxy;
index 4cae9b81a1f8851d51a5380d8d37fa7ba3ceb529..c20943eabb4c94b13d3ca70de00ad8550548b39d 100644 (file)
@@ -450,6 +450,7 @@ struct task_cputime {
        cputime_t utime;
        cputime_t stime;
        unsigned long long sum_exec_runtime;
+       spinlock_t lock;
 };
 /* Alternate field names when used to cache expirations. */
 #define prof_exp       stime
@@ -465,7 +466,7 @@ struct task_cputime {
  * used for thread group CPU clock calculations.
  */
 struct thread_group_cputime {
-       struct task_cputime *totals;
+       struct task_cputime totals;
 };
 
 /*
@@ -2180,24 +2181,30 @@ static inline int spin_needbreak(spinlock_t *lock)
  * Thread group CPU time accounting.
  */
 
-extern int thread_group_cputime_alloc(struct task_struct *);
-extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
-
-static inline void thread_group_cputime_init(struct signal_struct *sig)
+static inline
+void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
 {
-       sig->cputime.totals = NULL;
+       struct task_cputime *totals = &tsk->signal->cputime.totals;
+       unsigned long flags;
+
+       spin_lock_irqsave(&totals->lock, flags);
+       *times = *totals;
+       spin_unlock_irqrestore(&totals->lock, flags);
 }
 
-static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
+static inline void thread_group_cputime_init(struct signal_struct *sig)
 {
-       if (curr->signal->cputime.totals)
-               return 0;
-       return thread_group_cputime_alloc(curr);
+       sig->cputime.totals = (struct task_cputime){
+               .utime = cputime_zero,
+               .stime = cputime_zero,
+               .sum_exec_runtime = 0,
+       };
+
+       spin_lock_init(&sig->cputime.totals.lock);
 }
 
 static inline void thread_group_cputime_free(struct signal_struct *sig)
 {
-       free_percpu(sig->cputime.totals);
 }
 
 /*
index b36291130f2237e68c678f525e2e0128ec6c6cd1..47151c8495aa87af42fd32b1c5c269af62f19dd4 100644 (file)
@@ -118,6 +118,12 @@ struct execute_work {
                init_timer(&(_work)->timer);                    \
        } while (0)
 
+#define INIT_DELAYED_WORK_ON_STACK(_work, _func)               \
+       do {                                                    \
+               INIT_WORK(&(_work)->work, (_func));             \
+               init_timer_on_stack(&(_work)->timer);           \
+       } while (0)
+
 #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func)                     \
        do {                                                    \
                INIT_WORK(&(_work)->work, (_func));             \
index bf0cef8bbdf2410058e51e4bf689e49d83362fa2..242a706e77218073f74b9b8aa44ca589aaa1cbf4 100644 (file)
@@ -817,17 +817,17 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
 {
        struct signal_struct *sig;
-       int ret;
 
        if (clone_flags & CLONE_THREAD) {
-               ret = thread_group_cputime_clone_thread(current);
-               if (likely(!ret)) {
-                       atomic_inc(&current->signal->count);
-                       atomic_inc(&current->signal->live);
-               }
-               return ret;
+               atomic_inc(&current->signal->count);
+               atomic_inc(&current->signal->live);
+               return 0;
        }
        sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
+
+       if (sig)
+               posix_cpu_timers_init_group(sig);
+
        tsk->signal = sig;
        if (!sig)
                return -ENOMEM;
@@ -864,8 +864,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
        task_unlock(current->group_leader);
 
-       posix_cpu_timers_init_group(sig);
-
        acct_init_pacct(&sig->pacct);
 
        tty_audit_fork(sig);
index 2dc30c59c5fd63560cc5485ca0075c958f5c36e5..f33afb0407bcdc5c6d6a21e06cd41dde40d46d84 100644 (file)
@@ -614,7 +614,9 @@ void clock_was_set(void)
  */
 void hres_timers_resume(void)
 {
-       /* Retrigger the CPU local events: */
+       WARN_ONCE(!irqs_disabled(),
+                 KERN_INFO "hres_timers_resume() called with IRQs enabled!");
+
        retrigger_next_event(NULL);
 }
 
index 157de3a478321ab80c8c05cee5ffd07da68bab39..fa07da94d7be9832a8a7a8816816ee16654378f5 100644 (file)
@@ -9,76 +9,6 @@
 #include <asm/uaccess.h>
 #include <linux/kernel_stat.h>
 
-/*
- * Allocate the thread_group_cputime structure appropriately and fill in the
- * current values of the fields.  Called from copy_signal() via
- * thread_group_cputime_clone_thread() when adding a second or subsequent
- * thread to a thread group.  Assumes interrupts are enabled when called.
- */
-int thread_group_cputime_alloc(struct task_struct *tsk)
-{
-       struct signal_struct *sig = tsk->signal;
-       struct task_cputime *cputime;
-
-       /*
-        * If we have multiple threads and we don't already have a
-        * per-CPU task_cputime struct (checked in the caller), allocate
-        * one and fill it in with the times accumulated so far.  We may
-        * race with another thread so recheck after we pick up the sighand
-        * lock.
-        */
-       cputime = alloc_percpu(struct task_cputime);
-       if (cputime == NULL)
-               return -ENOMEM;
-       spin_lock_irq(&tsk->sighand->siglock);
-       if (sig->cputime.totals) {
-               spin_unlock_irq(&tsk->sighand->siglock);
-               free_percpu(cputime);
-               return 0;
-       }
-       sig->cputime.totals = cputime;
-       cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id());
-       cputime->utime = tsk->utime;
-       cputime->stime = tsk->stime;
-       cputime->sum_exec_runtime = tsk->se.sum_exec_runtime;
-       spin_unlock_irq(&tsk->sighand->siglock);
-       return 0;
-}
-
-/**
- * thread_group_cputime - Sum the thread group time fields across all CPUs.
- *
- * @tsk:       The task we use to identify the thread group.
- * @times:     task_cputime structure in which we return the summed fields.
- *
- * Walk the list of CPUs to sum the per-CPU time fields in the thread group
- * time structure.
- */
-void thread_group_cputime(
-       struct task_struct *tsk,
-       struct task_cputime *times)
-{
-       struct task_cputime *totals, *tot;
-       int i;
-
-       totals = tsk->signal->cputime.totals;
-       if (!totals) {
-               times->utime = tsk->utime;
-               times->stime = tsk->stime;
-               times->sum_exec_runtime = tsk->se.sum_exec_runtime;
-               return;
-       }
-
-       times->stime = times->utime = cputime_zero;
-       times->sum_exec_runtime = 0;
-       for_each_possible_cpu(i) {
-               tot = per_cpu_ptr(totals, i);
-               times->utime = cputime_add(times->utime, tot->utime);
-               times->stime = cputime_add(times->stime, tot->stime);
-               times->sum_exec_runtime += tot->sum_exec_runtime;
-       }
-}
-
 /*
  * Called after updating RLIMIT_CPU to set timer expiration if necessary.
  */
index f2773b5d12260bbb10c3b293bd4fd1811b483848..8ab0cef8ecab44ab0751a30f33b08636a9edd30f 100644 (file)
@@ -296,6 +296,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
 static inline void account_group_user_time(struct task_struct *tsk,
                                           cputime_t cputime)
 {
+       struct task_cputime *times;
        struct signal_struct *sig;
 
        /* tsk == current, ensure it is safe to use ->signal */
@@ -303,13 +304,11 @@ static inline void account_group_user_time(struct task_struct *tsk,
                return;
 
        sig = tsk->signal;
-       if (sig->cputime.totals) {
-               struct task_cputime *times;
+       times = &sig->cputime.totals;
 
-               times = per_cpu_ptr(sig->cputime.totals, get_cpu());
-               times->utime = cputime_add(times->utime, cputime);
-               put_cpu_no_resched();
-       }
+       spin_lock(&times->lock);
+       times->utime = cputime_add(times->utime, cputime);
+       spin_unlock(&times->lock);
 }
 
 /**
@@ -325,6 +324,7 @@ static inline void account_group_user_time(struct task_struct *tsk,
 static inline void account_group_system_time(struct task_struct *tsk,
                                             cputime_t cputime)
 {
+       struct task_cputime *times;
        struct signal_struct *sig;
 
        /* tsk == current, ensure it is safe to use ->signal */
@@ -332,13 +332,11 @@ static inline void account_group_system_time(struct task_struct *tsk,
                return;
 
        sig = tsk->signal;
-       if (sig->cputime.totals) {
-               struct task_cputime *times;
+       times = &sig->cputime.totals;
 
-               times = per_cpu_ptr(sig->cputime.totals, get_cpu());
-               times->stime = cputime_add(times->stime, cputime);
-               put_cpu_no_resched();
-       }
+       spin_lock(&times->lock);
+       times->stime = cputime_add(times->stime, cputime);
+       spin_unlock(&times->lock);
 }
 
 /**
@@ -354,6 +352,7 @@ static inline void account_group_system_time(struct task_struct *tsk,
 static inline void account_group_exec_runtime(struct task_struct *tsk,
                                              unsigned long long ns)
 {
+       struct task_cputime *times;
        struct signal_struct *sig;
 
        sig = tsk->signal;
@@ -362,11 +361,9 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
        if (unlikely(!sig))
                return;
 
-       if (sig->cputime.totals) {
-               struct task_cputime *times;
+       times = &sig->cputime.totals;
 
-               times = per_cpu_ptr(sig->cputime.totals, get_cpu());
-               times->sum_exec_runtime += ns;
-               put_cpu_no_resched();
-       }
+       spin_lock(&times->lock);
+       times->sum_exec_runtime += ns;
+       spin_unlock(&times->lock);
 }
index 1b6c05bd0d0a99e4de3a7ade02c2186ca4fc4db7..d3f1ef4d5cbe7a15fdc1d0b5cd2ee195ea4890fc 100644 (file)
@@ -134,7 +134,7 @@ __setup("nohz=", setup_tick_nohz);
  * value. We do this unconditionally on any cpu, as we don't know whether the
  * cpu, which has the update task assigned is in a long sleep.
  */
-void tick_nohz_update_jiffies(void)
+static void tick_nohz_update_jiffies(void)
 {
        int cpu = smp_processor_id();
        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);