]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/irq/handle.c
cpumask fallout: Initialize irq_default_affinity earlier
[linux-2.6-omap-h63xx.git] / kernel / irq / handle.c
index c815b42d0f5bf12baed5e25eb92c7f2d5e4b4472..3aba8d12f328ec91e59f5c72217ceb0c0d2fd0a1 100644 (file)
 #include <linux/random.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
+#include <linux/rculist.h>
+#include <linux/hash.h>
 
 #include "internals.h"
 
+/*
+ * lockdep: we want to handle all irq_desc locks as a single lock-class:
+ */
+struct lock_class_key irq_desc_lock_class;
+
 /**
  * handle_bad_irq - handle spurious and unhandled irqs
  * @irq:       the interrupt number
@@ -32,6 +39,18 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
        ack_bad_irq(irq);
 }
 
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
+static void __init init_irq_default_affinity(void)
+{
+       alloc_bootmem_cpumask_var(&irq_default_affinity);
+       cpumask_setall(irq_default_affinity);
+}
+#else
+static void __init init_irq_default_affinity(void)
+{
+}
+#endif
+
 /*
  * Linux has a controller-independent interrupt architecture.
  * Every controller has a 'controller-template', that is used
@@ -49,6 +68,152 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
 int nr_irqs = NR_IRQS;
 EXPORT_SYMBOL_GPL(nr_irqs);
 
+#ifdef CONFIG_SPARSE_IRQ
+static struct irq_desc irq_desc_init = {
+       .irq        = -1,
+       .status     = IRQ_DISABLED,
+       .chip       = &no_irq_chip,
+       .handle_irq = handle_bad_irq,
+       .depth      = 1,
+       .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+#ifdef CONFIG_SMP
+       .affinity   = CPU_MASK_ALL
+#endif
+};
+
+void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
+{
+       unsigned long bytes;
+       char *ptr;
+       int node;
+
+       /* Compute how many bytes we need per irq and allocate them */
+       bytes = nr * sizeof(unsigned int);
+
+       node = cpu_to_node(cpu);
+       ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
+       printk(KERN_DEBUG "  alloc kstat_irqs on cpu %d node %d\n", cpu, node);
+
+       if (ptr)
+               desc->kstat_irqs = (unsigned int *)ptr;
+}
+
+static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
+{
+       memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
+
+       spin_lock_init(&desc->lock);
+       desc->irq = irq;
+#ifdef CONFIG_SMP
+       desc->cpu = cpu;
+#endif
+       lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+       init_kstat_irqs(desc, cpu, nr_cpu_ids);
+       if (!desc->kstat_irqs) {
+               printk(KERN_ERR "can not alloc kstat_irqs\n");
+               BUG_ON(1);
+       }
+       arch_init_chip_data(desc, cpu);
+}
+
+/*
+ * Protect the sparse_irqs:
+ */
+DEFINE_SPINLOCK(sparse_irq_lock);
+
+struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
+
+static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
+       [0 ... NR_IRQS_LEGACY-1] = {
+               .irq        = -1,
+               .status     = IRQ_DISABLED,
+               .chip       = &no_irq_chip,
+               .handle_irq = handle_bad_irq,
+               .depth      = 1,
+               .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+#ifdef CONFIG_SMP
+               .affinity   = CPU_MASK_ALL
+#endif
+       }
+};
+
+/* FIXME: use bootmem alloc ...*/
+static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
+
+int __init early_irq_init(void)
+{
+       struct irq_desc *desc;
+       int legacy_count;
+       int i;
+
+       init_irq_default_affinity();
+
+       desc = irq_desc_legacy;
+       legacy_count = ARRAY_SIZE(irq_desc_legacy);
+
+       for (i = 0; i < legacy_count; i++) {
+               desc[i].irq = i;
+               desc[i].kstat_irqs = kstat_irqs_legacy[i];
+               lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
+
+               irq_desc_ptrs[i] = desc + i;
+       }
+
+       for (i = legacy_count; i < NR_IRQS; i++)
+               irq_desc_ptrs[i] = NULL;
+
+       return arch_early_irq_init();
+}
+
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+       return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
+}
+
+struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
+{
+       struct irq_desc *desc;
+       unsigned long flags;
+       int node;
+
+       if (irq >= NR_IRQS) {
+               printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
+                               irq, NR_IRQS);
+               WARN_ON(1);
+               return NULL;
+       }
+
+       desc = irq_desc_ptrs[irq];
+       if (desc)
+               return desc;
+
+       spin_lock_irqsave(&sparse_irq_lock, flags);
+
+       /* We have to check it to avoid races with another CPU */
+       desc = irq_desc_ptrs[irq];
+       if (desc)
+               goto out_unlock;
+
+       node = cpu_to_node(cpu);
+       desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
+       printk(KERN_DEBUG "  alloc irq_desc for %d on cpu %d node %d\n",
+                irq, cpu, node);
+       if (!desc) {
+               printk(KERN_ERR "can not alloc irq_desc\n");
+               BUG_ON(1);
+       }
+       init_one_irq_desc(irq, desc, cpu);
+
+       irq_desc_ptrs[irq] = desc;
+
+out_unlock:
+       spin_unlock_irqrestore(&sparse_irq_lock, flags);
+
+       return desc;
+}
+
+#else /* !CONFIG_SPARSE_IRQ */
+
 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
        [0 ... NR_IRQS-1] = {
                .status = IRQ_DISABLED,
@@ -62,6 +227,34 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
        }
 };
 
+int __init early_irq_init(void)
+{
+       struct irq_desc *desc;
+       int count;
+       int i;
+
+       init_irq_default_affinity();
+
+       desc = irq_desc;
+       count = ARRAY_SIZE(irq_desc);
+
+       for (i = 0; i < count; i++)
+               desc[i].irq = i;
+
+       return arch_early_irq_init();
+}
+
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+       return (irq < NR_IRQS) ? irq_desc + irq : NULL;
+}
+
+struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
+{
+       return irq_to_desc(irq);
+}
+#endif /* !CONFIG_SPARSE_IRQ */
+
 /*
  * What should we do if we get a hw irq event on an illegal vector?
  * Each architecture has to answer this themself.
@@ -179,8 +372,11 @@ unsigned int __do_IRQ(unsigned int irq)
                /*
                 * No locking required for CPU-local interrupts:
                 */
-               if (desc->chip->ack)
+               if (desc->chip->ack) {
                        desc->chip->ack(irq);
+                       /* get new one */
+                       desc = irq_remap_to_desc(irq, desc);
+               }
                if (likely(!(desc->status & IRQ_DISABLED))) {
                        action_ret = handle_IRQ_event(irq, desc->action);
                        if (!noirqdebug)
@@ -191,8 +387,10 @@ unsigned int __do_IRQ(unsigned int irq)
        }
 
        spin_lock(&desc->lock);
-       if (desc->chip->ack)
+       if (desc->chip->ack) {
                desc->chip->ack(irq);
+               desc = irq_remap_to_desc(irq, desc);
+       }
        /*
         * REPLAY is when Linux resends an IRQ that was dropped earlier
         * WAITING is used by probe to mark irqs that are being tested
@@ -259,19 +457,22 @@ out:
 }
 #endif
 
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-/*
- * lockdep: we want to handle all irq_desc locks as a single lock-class:
- */
-static struct lock_class_key irq_desc_lock_class;
-
 void early_init_irq_lock_class(void)
 {
        struct irq_desc *desc;
        int i;
 
-       for_each_irq_desc(i, desc)
+       for_each_irq_desc(i, desc) {
                lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+       }
+}
+
+#ifdef CONFIG_SPARSE_IRQ
+unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       return desc ? desc->kstat_irqs[cpu] : 0;
 }
 #endif
+EXPORT_SYMBOL(kstat_irqs_cpu);
+