]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/slow-work.c
Remove executable permission for dma.c
[linux-2.6-omap-h63xx.git] / kernel / slow-work.c
index 5a7392734c828ca1562ae4ef5467fa2953277c1e..cf2bc01186efee7c8f9e30df12dfa815ca6f88ce 100644 (file)
@@ -7,6 +7,8 @@
  * modify it under the terms of the GNU General Public Licence
  * as published by the Free Software Foundation; either version
  * 2 of the Licence, or (at your option) any later version.
+ *
+ * See Documentation/slow-work.txt
  */
 
 #include <linux/module.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
 #include <linux/wait.h>
-#include <asm/system.h>
+
+#define SLOW_WORK_CULL_TIMEOUT (5 * HZ)        /* cull threads 5s after running out of
+                                        * things to do */
+#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
+                                        * OOM */
+
+static void slow_work_cull_timeout(unsigned long);
+static void slow_work_oom_timeout(unsigned long);
+
+#ifdef CONFIG_SYSCTL
+static int slow_work_min_threads_sysctl(struct ctl_table *, int, struct file *,
+                                       void __user *, size_t *, loff_t *);
+
+static int slow_work_max_threads_sysctl(struct ctl_table *, int , struct file *,
+                                       void __user *, size_t *, loff_t *);
+#endif
 
 /*
  * The pool of threads has at least min threads in it as long as someone is
@@ -26,9 +43,60 @@ static unsigned slow_work_min_threads = 2;
 static unsigned slow_work_max_threads = 4;
 static unsigned vslow_work_proportion = 50; /* % of threads that may process
                                             * very slow work */
+
+#ifdef CONFIG_SYSCTL
+static const int slow_work_min_min_threads = 2;
+static int slow_work_max_max_threads = 255;
+static const int slow_work_min_vslow = 1;
+static const int slow_work_max_vslow = 99;
+
+ctl_table slow_work_sysctls[] = {
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "min-threads",
+               .data           = &slow_work_min_threads,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = slow_work_min_threads_sysctl,
+               .extra1         = (void *) &slow_work_min_min_threads,
+               .extra2         = &slow_work_max_threads,
+       },
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "max-threads",
+               .data           = &slow_work_max_threads,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = slow_work_max_threads_sysctl,
+               .extra1         = &slow_work_min_threads,
+               .extra2         = (void *) &slow_work_max_max_threads,
+       },
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "vslow-percentage",
+               .data           = &vslow_work_proportion,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec_minmax,
+               .extra1         = (void *) &slow_work_min_vslow,
+               .extra2         = (void *) &slow_work_max_vslow,
+       },
+       { .ctl_name = 0 }
+};
+#endif
+
+/*
+ * The active state of the thread pool
+ */
 static atomic_t slow_work_thread_count;
 static atomic_t vslow_work_executing_count;
 
+static bool slow_work_may_not_start_new_thread;
+static bool slow_work_cull; /* cull a thread due to lack of activity */
+static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
+static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
+static struct slow_work slow_work_new_thread; /* new thread starter */
+
 /*
  * The queues of work items and the lock governing access to them.  These are
  * shared between all the CPUs.  It doesn't make sense to have per-CPU queues
@@ -89,6 +157,14 @@ static bool slow_work_execute(void)
 
        vsmax = slow_work_calc_vsmax();
 
+       /* see if we can schedule a new thread to be started if we're not
+        * keeping up with the work */
+       if (!waitqueue_active(&slow_work_thread_wq) &&
+           (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) &&
+           atomic_read(&slow_work_thread_count) < slow_work_max_threads &&
+           !slow_work_may_not_start_new_thread)
+               slow_work_enqueue(&slow_work_new_thread);
+
        /* find something to execute */
        spin_lock_irq(&slow_work_queue_lock);
        if (!list_empty(&vslow_work_queue) &&
@@ -242,6 +318,33 @@ cant_get_ref:
 }
 EXPORT_SYMBOL(slow_work_enqueue);
 
+/*
+ * Worker thread culling algorithm
+ */
+static bool slow_work_cull_thread(void)
+{
+       unsigned long flags;
+       bool do_cull = false;
+
+       spin_lock_irqsave(&slow_work_queue_lock, flags);
+
+       if (slow_work_cull) {
+               slow_work_cull = false;
+
+               if (list_empty(&slow_work_queue) &&
+                   list_empty(&vslow_work_queue) &&
+                   atomic_read(&slow_work_thread_count) >
+                   slow_work_min_threads) {
+                       mod_timer(&slow_work_cull_timer,
+                                 jiffies + SLOW_WORK_CULL_TIMEOUT);
+                       do_cull = true;
+               }
+       }
+
+       spin_unlock_irqrestore(&slow_work_queue_lock, flags);
+       return do_cull;
+}
+
 /*
  * Determine if there is slow work available for dispatch
  */
@@ -273,7 +376,8 @@ static int slow_work_thread(void *_data)
                                TASK_INTERRUPTIBLE);
                if (!freezing(current) &&
                    !slow_work_threads_should_exit &&
-                   !slow_work_available(vsmax))
+                   !slow_work_available(vsmax) &&
+                   !slow_work_cull)
                        schedule();
                finish_wait(&slow_work_thread_wq, &wait);
 
@@ -285,11 +389,20 @@ static int slow_work_thread(void *_data)
 
                if (slow_work_available(vsmax) && slow_work_execute()) {
                        cond_resched();
+                       if (list_empty(&slow_work_queue) &&
+                           list_empty(&vslow_work_queue) &&
+                           atomic_read(&slow_work_thread_count) >
+                           slow_work_min_threads)
+                               mod_timer(&slow_work_cull_timer,
+                                         jiffies + SLOW_WORK_CULL_TIMEOUT);
                        continue;
                }
 
                if (slow_work_threads_should_exit)
                        break;
+
+               if (slow_work_cull && slow_work_cull_thread())
+                       break;
        }
 
        if (atomic_dec_and_test(&slow_work_thread_count))
@@ -297,6 +410,135 @@ static int slow_work_thread(void *_data)
        return 0;
 }
 
+/*
+ * Handle thread cull timer expiration
+ */
+static void slow_work_cull_timeout(unsigned long data)
+{
+       slow_work_cull = true;
+       wake_up(&slow_work_thread_wq);
+}
+
+/*
+ * Get a reference on slow work thread starter
+ */
+static int slow_work_new_thread_get_ref(struct slow_work *work)
+{
+       return 0;
+}
+
+/*
+ * Drop a reference on slow work thread starter
+ */
+static void slow_work_new_thread_put_ref(struct slow_work *work)
+{
+}
+
+/*
+ * Start a new slow work thread
+ */
+static void slow_work_new_thread_execute(struct slow_work *work)
+{
+       struct task_struct *p;
+
+       if (slow_work_threads_should_exit)
+               return;
+
+       if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads)
+               return;
+
+       if (!mutex_trylock(&slow_work_user_lock))
+               return;
+
+       slow_work_may_not_start_new_thread = true;
+       atomic_inc(&slow_work_thread_count);
+       p = kthread_run(slow_work_thread, NULL, "kslowd");
+       if (IS_ERR(p)) {
+               printk(KERN_DEBUG "Slow work thread pool: OOM\n");
+               if (atomic_dec_and_test(&slow_work_thread_count))
+                       BUG(); /* we're running on a slow work thread... */
+               mod_timer(&slow_work_oom_timer,
+                         jiffies + SLOW_WORK_OOM_TIMEOUT);
+       } else {
+               /* ratelimit the starting of new threads */
+               mod_timer(&slow_work_oom_timer, jiffies + 1);
+       }
+
+       mutex_unlock(&slow_work_user_lock);
+}
+
+static const struct slow_work_ops slow_work_new_thread_ops = {
+       .get_ref        = slow_work_new_thread_get_ref,
+       .put_ref        = slow_work_new_thread_put_ref,
+       .execute        = slow_work_new_thread_execute,
+};
+
+/*
+ * post-OOM new thread start suppression expiration
+ */
+static void slow_work_oom_timeout(unsigned long data)
+{
+       slow_work_may_not_start_new_thread = false;
+}
+
+#ifdef CONFIG_SYSCTL
+/*
+ * Handle adjustment of the minimum number of threads
+ */
+static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
+                                       struct file *filp, void __user *buffer,
+                                       size_t *lenp, loff_t *ppos)
+{
+       int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+       int n;
+
+       if (ret == 0) {
+               mutex_lock(&slow_work_user_lock);
+               if (slow_work_user_count > 0) {
+                       /* see if we need to start or stop threads */
+                       n = atomic_read(&slow_work_thread_count) -
+                               slow_work_min_threads;
+
+                       if (n < 0 && !slow_work_may_not_start_new_thread)
+                               slow_work_enqueue(&slow_work_new_thread);
+                       else if (n > 0)
+                               mod_timer(&slow_work_cull_timer,
+                                         jiffies + SLOW_WORK_CULL_TIMEOUT);
+               }
+               mutex_unlock(&slow_work_user_lock);
+       }
+
+       return ret;
+}
+
+/*
+ * Handle adjustment of the maximum number of threads
+ */
+static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
+                                       struct file *filp, void __user *buffer,
+                                       size_t *lenp, loff_t *ppos)
+{
+       int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+       int n;
+
+       if (ret == 0) {
+               mutex_lock(&slow_work_user_lock);
+               if (slow_work_user_count > 0) {
+                       /* see if we need to stop threads */
+                       n = slow_work_max_threads -
+                               atomic_read(&slow_work_thread_count);
+
+                       if (n < 0)
+                               mod_timer(&slow_work_cull_timer,
+                                         jiffies + SLOW_WORK_CULL_TIMEOUT);
+               }
+               mutex_unlock(&slow_work_user_lock);
+       }
+
+       return ret;
+}
+#endif /* CONFIG_SYSCTL */
+
 /**
  * slow_work_register_user - Register a user of the facility
  *
@@ -316,6 +558,10 @@ int slow_work_register_user(void)
                init_completion(&slow_work_last_thread_exited);
 
                slow_work_threads_should_exit = false;
+               slow_work_init(&slow_work_new_thread,
+                              &slow_work_new_thread_ops);
+               slow_work_may_not_start_new_thread = false;
+               slow_work_cull = false;
 
                /* start the minimum number of threads */
                for (loop = 0; loop < slow_work_min_threads; loop++) {
@@ -369,6 +615,8 @@ void slow_work_unregister_user(void)
                       " Shut down complete\n");
        }
 
+       del_timer_sync(&slow_work_cull_timer);
+
        mutex_unlock(&slow_work_user_lock);
 }
 EXPORT_SYMBOL(slow_work_unregister_user);
@@ -380,8 +628,12 @@ static int __init init_slow_work(void)
 {
        unsigned nr_cpus = num_possible_cpus();
 
-       if (nr_cpus > slow_work_max_threads)
+       if (slow_work_max_threads < nr_cpus)
                slow_work_max_threads = nr_cpus;
+#ifdef CONFIG_SYSCTL
+       if (slow_work_max_max_threads < nr_cpus * 2)
+               slow_work_max_max_threads = nr_cpus * 2;
+#endif
        return 0;
 }