]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/softirq.c
Merge branches 'timers/clocksource', 'timers/hrtimers', 'timers/nohz', 'timers/ntp...
[linux-2.6-omap-h63xx.git] / kernel / softirq.c
index c506f266a6b90ee9d887b9eeecdd45061e74ef1f..7110daeb9a90b2b585d97cf0fda94b7eb0b1869f 100644 (file)
@@ -6,6 +6,8 @@
  *     Distribute under GPLv2.
  *
  *     Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
+ *
+ *     Remote softirq infrastructure is by Jens Axboe.
  */
 
 #include <linux/module.h>
@@ -46,7 +48,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
 EXPORT_SYMBOL(irq_stat);
 #endif
 
-static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
+static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 
 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
@@ -205,7 +207,18 @@ restart:
 
        do {
                if (pending & 1) {
+                       int prev_count = preempt_count();
+
                        h->action(h);
+
+                       if (unlikely(prev_count != preempt_count())) {
+                               printk(KERN_ERR "huh, entered softirq %td %p"
+                                      "with preempt_count %08x,"
+                                      " exited with %08x?\n", h - softirq_vec,
+                                      h->action, prev_count, preempt_count());
+                               preempt_count() = prev_count;
+                       }
+
                        rcu_bh_qsctr_inc(cpu);
                }
                h++;
@@ -254,16 +267,12 @@ asmlinkage void do_softirq(void)
  */
 void irq_enter(void)
 {
-#ifdef CONFIG_NO_HZ
        int cpu = smp_processor_id();
+
        if (idle_cpu(cpu) && !in_interrupt())
-               tick_nohz_stop_idle(cpu);
-#endif
+               tick_check_idle(cpu);
+
        __irq_enter();
-#ifdef CONFIG_NO_HZ
-       if (idle_cpu(cpu))
-               tick_nohz_update_jiffies();
-#endif
 }
 
 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
@@ -463,17 +472,144 @@ void tasklet_kill(struct tasklet_struct *t)
 
 EXPORT_SYMBOL(tasklet_kill);
 
+DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
+EXPORT_PER_CPU_SYMBOL(softirq_work_list);
+
+static void __local_trigger(struct call_single_data *cp, int softirq)
+{
+       struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
+
+       list_add_tail(&cp->list, head);
+
+       /* Trigger the softirq only if the list was previously empty.  */
+       if (head->next == &cp->list)
+               raise_softirq_irqoff(softirq);
+}
+
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+static void remote_softirq_receive(void *data)
+{
+       struct call_single_data *cp = data;
+       unsigned long flags;
+       int softirq;
+
+       softirq = cp->priv;
+
+       local_irq_save(flags);
+       __local_trigger(cp, softirq);
+       local_irq_restore(flags);
+}
+
+static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
+{
+       if (cpu_online(cpu)) {
+               cp->func = remote_softirq_receive;
+               cp->info = cp;
+               cp->flags = 0;
+               cp->priv = softirq;
+
+               __smp_call_function_single(cpu, cp);
+               return 0;
+       }
+       return 1;
+}
+#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
+static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
+{
+       return 1;
+}
+#endif
+
+/**
+ * __send_remote_softirq - try to schedule softirq work on a remote cpu
+ * @cp: private SMP call function data area
+ * @cpu: the remote cpu
+ * @this_cpu: the currently executing cpu
+ * @softirq: the softirq for the work
+ *
+ * Attempt to schedule softirq work on a remote cpu.  If this cannot be
+ * done, the work is instead queued up on the local cpu.
+ *
+ * Interrupts must be disabled.
+ */
+void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
+{
+       if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
+               __local_trigger(cp, softirq);
+}
+EXPORT_SYMBOL(__send_remote_softirq);
+
+/**
+ * send_remote_softirq - try to schedule softirq work on a remote cpu
+ * @cp: private SMP call function data area
+ * @cpu: the remote cpu
+ * @softirq: the softirq for the work
+ *
+ * Like __send_remote_softirq except that disabling interrupts and
+ * computing the current cpu is done for the caller.
+ */
+void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
+{
+       unsigned long flags;
+       int this_cpu;
+
+       local_irq_save(flags);
+       this_cpu = smp_processor_id();
+       __send_remote_softirq(cp, cpu, this_cpu, softirq);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(send_remote_softirq);
+
+static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
+                                              unsigned long action, void *hcpu)
+{
+       /*
+        * If a CPU goes away, splice its entries to the current CPU
+        * and trigger a run of the softirq
+        */
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+               int cpu = (unsigned long) hcpu;
+               int i;
+
+               local_irq_disable();
+               for (i = 0; i < NR_SOFTIRQS; i++) {
+                       struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
+                       struct list_head *local_head;
+
+                       if (list_empty(head))
+                               continue;
+
+                       local_head = &__get_cpu_var(softirq_work_list[i]);
+                       list_splice_init(head, local_head);
+                       raise_softirq_irqoff(i);
+               }
+               local_irq_enable();
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
+       .notifier_call  = remote_softirq_cpu_notify,
+};
+
 void __init softirq_init(void)
 {
        int cpu;
 
        for_each_possible_cpu(cpu) {
+               int i;
+
                per_cpu(tasklet_vec, cpu).tail =
                        &per_cpu(tasklet_vec, cpu).head;
                per_cpu(tasklet_hi_vec, cpu).tail =
                        &per_cpu(tasklet_hi_vec, cpu).head;
+               for (i = 0; i < NR_SOFTIRQS; i++)
+                       INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
        }
 
+       register_hotcpu_notifier(&remote_softirq_cpu_notifier);
+
        open_softirq(TASKLET_SOFTIRQ, tasklet_action);
        open_softirq(HI_SOFTIRQ, tasklet_hi_action);
 }