]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] add schedule_on_each_cpu()
authorChristoph Lameter <clameter@engr.sgi.com>
Sun, 8 Jan 2006 09:00:43 +0000 (01:00 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Mon, 9 Jan 2006 04:12:40 +0000 (20:12 -0800)
swap migration's isolate_lru_page() currently uses an IPI to notify other
processors that the lru caches need to be drained if the page cannot be
found on the LRU.  The IPI interrupt may interrupt a processor that is just
processing lru requests and cause a race condition.

This patch introduces a new function run_on_each_cpu() that uses the
keventd() to run the LRU draining on each processor.  Processors disable
preemption when dealing the LRU caches (these are per processor) and thus
executing LRU draining from another process is safe.

Thanks to Lee Schermerhorn <lee.schermerhorn@hp.com> for finding this race
condition.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/linux/workqueue.h
kernel/workqueue.c

index ac39d04d027cd6affbe914b914eed138fa1dab89..86b1113002319b5b9db5f765426d3ef2a35c687d 100644 (file)
@@ -65,6 +65,7 @@ extern int FASTCALL(schedule_work(struct work_struct *work));
 extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
 
 extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
+extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
 extern void flush_scheduled_work(void);
 extern int current_is_keventd(void);
 extern int keventd_up(void);
index 2bd5aee1c7369af14c8f8ba71a9715ad6f03346b..62d47220696a621d10661d82121719ce37a9c156 100644 (file)
@@ -419,6 +419,25 @@ int schedule_delayed_work_on(int cpu,
        return ret;
 }
 
+int schedule_on_each_cpu(void (*func) (void *info), void *info)
+{
+       int cpu;
+       struct work_struct *work;
+
+       work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL);
+
+       if (!work)
+               return -ENOMEM;
+       for_each_online_cpu(cpu) {
+               INIT_WORK(work + cpu, func, info);
+               __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
+                               work + cpu);
+       }
+       flush_workqueue(keventd_wq);
+       kfree(work);
+       return 0;
+}
+
 void flush_scheduled_work(void)
 {
        flush_workqueue(keventd_wq);