]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
tracehook: wait_task_inactive
authorRoland McGrath <roland@redhat.com>
Sat, 26 Jul 2008 02:45:58 +0000 (19:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 26 Jul 2008 19:00:09 +0000 (12:00 -0700)
This extends wait_task_inactive() with a new argument so it can be used in
a "soft" mode where it will check for the task changing state unexpectedly
and back off.  There is no change to existing callers.  This lays the
groundwork to allow robust, noninvasive tracing that can try to sample a
blocked thread but back off safely if it wakes up.

Signed-off-by: Roland McGrath <roland@redhat.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/ia64/kernel/perfmon.c
include/linux/sched.h
kernel/kthread.c
kernel/ptrace.c
kernel/sched.c

index 19d4493c619323d3f91acffeff43a08bf45aec6b..fc8f3509df270ba010ec4e2ce9765d0459d6f77e 100644 (file)
@@ -2626,7 +2626,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
        /*
         * make sure the task is off any CPU
         */
-       wait_task_inactive(task);
+       wait_task_inactive(task, 0);
 
        /* more to come... */
 
@@ -4774,7 +4774,7 @@ recheck:
 
                UNPROTECT_CTX(ctx, flags);
 
-               wait_task_inactive(task);
+               wait_task_inactive(task, 0);
 
                PROTECT_CTX(ctx, flags);
 
index a95d84d0da95ddd43d93dc4e66b4fe21de639f76..f59318a0099bb5b002001bdf3615cef4340c988e 100644 (file)
@@ -1882,9 +1882,13 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
 extern char *get_task_comm(char *to, struct task_struct *tsk);
 
 #ifdef CONFIG_SMP
-extern void wait_task_inactive(struct task_struct * p);
+extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
 #else
-#define wait_task_inactive(p)  do { } while (0)
+static inline unsigned long wait_task_inactive(struct task_struct *p,
+                                              long match_state)
+{
+       return 1;
+}
 #endif
 
 #define next_task(p)   list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
index 6111c27491b1a0808929e5d4619cbc3e033fcfa8..96cff2f8710baac71e7ce91ec96202665142b5e8 100644 (file)
@@ -176,7 +176,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
                return;
        }
        /* Must have done schedule() in kthread() before we set_task_cpu */
-       wait_task_inactive(k);
+       wait_task_inactive(k, 0);
        set_task_cpu(k, cpu);
        k->cpus_allowed = cpumask_of_cpu(cpu);
        k->rt.nr_cpus_allowed = 1;
index 8392a9da64504054bf804e5809f902bc296f676f..082b3fcb32a09e2110c97b6c7527d5f3eca137b7 100644 (file)
@@ -107,7 +107,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
        read_unlock(&tasklist_lock);
 
        if (!ret && !kill)
-               wait_task_inactive(child);
+               ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
 
        /* All systems go.. */
        return ret;
index fde1a10263597e4f73792b97b4fe0edaa6880f6d..0236958addcbac72ca908c6be8ac74275a512ad7 100644 (file)
@@ -1867,16 +1867,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
 /*
  * wait_task_inactive - wait for a thread to unschedule.
  *
+ * If @match_state is nonzero, it's the @p->state value just checked and
+ * not expected to change.  If it changes, i.e. @p might have woken up,
+ * then return zero.  When we succeed in waiting for @p to be off its CPU,
+ * we return a positive number (its total switch count).  If a second call
+ * a short while later returns the same number, the caller can be sure that
+ * @p has remained unscheduled the whole time.
+ *
  * The caller must ensure that the task *will* unschedule sometime soon,
  * else this function might spin for a *long* time. This function can't
  * be called with interrupts off, or it may introduce deadlock with
  * smp_call_function() if an IPI is sent by the same process we are
  * waiting to become inactive.
  */
-void wait_task_inactive(struct task_struct *p)
+unsigned long wait_task_inactive(struct task_struct *p, long match_state)
 {
        unsigned long flags;
        int running, on_rq;
+       unsigned long ncsw;
        struct rq *rq;
 
        for (;;) {
@@ -1899,8 +1907,11 @@ void wait_task_inactive(struct task_struct *p)
                 * return false if the runqueue has changed and p
                 * is actually now running somewhere else!
                 */
-               while (task_running(rq, p))
+               while (task_running(rq, p)) {
+                       if (match_state && unlikely(p->state != match_state))
+                               return 0;
                        cpu_relax();
+               }
 
                /*
                 * Ok, time to look more closely! We need the rq
@@ -1910,8 +1921,20 @@ void wait_task_inactive(struct task_struct *p)
                rq = task_rq_lock(p, &flags);
                running = task_running(rq, p);
                on_rq = p->se.on_rq;
+               ncsw = 0;
+               if (!match_state || p->state == match_state) {
+                       ncsw = p->nivcsw + p->nvcsw;
+                       if (unlikely(!ncsw))
+                               ncsw = 1;
+               }
                task_rq_unlock(rq, &flags);
 
+               /*
+                * If it changed from the expected state, bail out now.
+                */
+               if (unlikely(!ncsw))
+                       break;
+
                /*
                 * Was it really running after all now that we
                 * checked with the proper locks actually held?
@@ -1944,6 +1967,8 @@ void wait_task_inactive(struct task_struct *p)
                 */
                break;
        }
+
+       return ncsw;
 }
 
 /***