]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/irq/manage.c
cpumask fallout: Initialize irq_default_affinity earlier
[linux-2.6-omap-h63xx.git] / kernel / irq / manage.c
index 46953a06f4a82057d9420863bc3fe3a1a94c43bb..291f03664552387658690f947b1d3a4d9562dcc6 100644 (file)
@@ -15,9 +15,8 @@
 
 #include "internals.h"
 
-#ifdef CONFIG_SMP
-
-cpumask_t irq_default_affinity = CPU_MASK_ALL;
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
+cpumask_var_t irq_default_affinity;
 
 /**
  *     synchronize_irq - wait for pending IRQ handlers (on other CPUs)
@@ -79,7 +78,7 @@ int irq_can_set_affinity(unsigned int irq)
  *     @cpumask:       cpumask
  *
  */
-int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
+int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
@@ -91,14 +90,14 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
-               desc->affinity = cpumask;
+               cpumask_copy(&desc->affinity, cpumask);
                desc->chip->set_affinity(irq, cpumask);
        } else {
                desc->status |= IRQ_MOVE_PENDING;
-               desc->pending_mask = cpumask;
+               cpumask_copy(&desc->pending_mask, cpumask);
        }
 #else
-       desc->affinity = cpumask;
+       cpumask_copy(&desc->affinity, cpumask);
        desc->chip->set_affinity(irq, cpumask);
 #endif
        desc->status |= IRQ_AFFINITY_SET;
@@ -112,26 +111,24 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
  */
 int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
 {
-       cpumask_t mask;
-
        if (!irq_can_set_affinity(irq))
                return 0;
 
-       cpus_and(mask, cpu_online_map, irq_default_affinity);
-
        /*
         * Preserve an userspace affinity setup, but make sure that
         * one of the targets is online.
         */
        if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
-               if (cpus_intersects(desc->affinity, cpu_online_map))
-                       mask = desc->affinity;
+               if (cpumask_any_and(&desc->affinity, cpu_online_mask)
+                   < nr_cpu_ids)
+                       goto set_affinity;
                else
                        desc->status &= ~IRQ_AFFINITY_SET;
        }
 
-       desc->affinity = mask;
-       desc->chip->set_affinity(irq, mask);
+       cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
+set_affinity:
+       desc->chip->set_affinity(irq, &desc->affinity);
 
        return 0;
 }
@@ -676,6 +673,18 @@ int request_irq(unsigned int irq, irq_handler_t handler,
        struct irq_desc *desc;
        int retval;
 
+       /*
+        * handle_IRQ_event() always ignores IRQF_DISABLED except for
+        * the _first_ irqaction (sigh).  That can cause oopsing, but
+        * the behavior is classified as "will not fix" so we need to
+        * start nudging drivers away from using that idiom.
+        */
+       if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
+                       == (IRQF_SHARED|IRQF_DISABLED))
+               pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
+                               "guaranteed on shared IRQs\n",
+                               irq, devname);
+
 #ifdef CONFIG_LOCKDEP
        /*
         * Lockdep wants atomic interrupt handlers: