]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/irq/manage.c
Make irq_*_affinity depend on CONFIG_GENERIC_HARDIRQS too.
[linux-2.6-omap-h63xx.git] / kernel / irq / manage.c
index 801addda3c43d4a7a767b27468fee05232cb6f89..618a64f1915a81334ddded9b6b6175e44a169880 100644 (file)
 
 #include "internals.h"
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
+cpumask_var_t irq_default_affinity;
 
-cpumask_t irq_default_affinity = CPU_MASK_ALL;
+static int init_irq_default_affinity(void)
+{
+       alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
+       cpumask_setall(irq_default_affinity);
+       return 0;
+}
+core_initcall(init_irq_default_affinity);
 
 /**
  *     synchronize_irq - wait for pending IRQ handlers (on other CPUs)
@@ -79,7 +86,7 @@ int irq_can_set_affinity(unsigned int irq)
  *     @cpumask:       cpumask
  *
  */
-int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
+int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
@@ -91,14 +98,14 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
-               desc->affinity = cpumask;
+               cpumask_copy(&desc->affinity, cpumask);
                desc->chip->set_affinity(irq, cpumask);
        } else {
                desc->status |= IRQ_MOVE_PENDING;
-               desc->pending_mask = cpumask;
+               cpumask_copy(&desc->pending_mask, cpumask);
        }
 #else
-       desc->affinity = cpumask;
+       cpumask_copy(&desc->affinity, cpumask);
        desc->chip->set_affinity(irq, cpumask);
 #endif
        desc->status |= IRQ_AFFINITY_SET;
@@ -112,26 +119,24 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
  */
 int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
 {
-       cpumask_t mask;
-
        if (!irq_can_set_affinity(irq))
                return 0;
 
-       cpus_and(mask, cpu_online_map, irq_default_affinity);
-
        /*
         * Preserve an userspace affinity setup, but make sure that
         * one of the targets is online.
         */
        if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
-               if (cpus_intersects(desc->affinity, cpu_online_map))
-                       mask = desc->affinity;
+               if (cpumask_any_and(&desc->affinity, cpu_online_mask)
+                   < nr_cpu_ids)
+                       goto set_affinity;
                else
                        desc->status &= ~IRQ_AFFINITY_SET;
        }
 
-       desc->affinity = mask;
-       desc->chip->set_affinity(irq, mask);
+       cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
+set_affinity:
+       desc->chip->set_affinity(irq, &desc->affinity);
 
        return 0;
 }
@@ -370,16 +375,18 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                return 0;
        }
 
-       ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK);
+       /* caller masked out all except trigger mode flags */
+       ret = chip->set_type(irq, flags);
 
        if (ret)
                pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
-                               (int)(flags & IRQF_TRIGGER_MASK),
-                               irq, chip->set_type);
+                               (int)flags, irq, chip->set_type);
        else {
+               if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+                       flags |= IRQ_LEVEL;
                /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
-               desc->status &= ~IRQ_TYPE_SENSE_MASK;
-               desc->status |= flags & IRQ_TYPE_SENSE_MASK;
+               desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
+               desc->status |= flags;
        }
 
        return ret;
@@ -459,7 +466,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
 
                /* Setup the type (level, edge polarity) if configured: */
                if (new->flags & IRQF_TRIGGER_MASK) {
-                       ret = __irq_set_trigger(desc, irq, new->flags);
+                       ret = __irq_set_trigger(desc, irq,
+                                       new->flags & IRQF_TRIGGER_MASK);
 
                        if (ret) {
                                spin_unlock_irqrestore(&desc->lock, flags);
@@ -673,6 +681,18 @@ int request_irq(unsigned int irq, irq_handler_t handler,
        struct irq_desc *desc;
        int retval;
 
+       /*
+        * handle_IRQ_event() always ignores IRQF_DISABLED except for
+        * the _first_ irqaction (sigh).  That can cause oopsing, but
+        * the behavior is classified as "will not fix" so we need to
+        * start nudging drivers away from using that idiom.
+        */
+       if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
+                       == (IRQF_SHARED|IRQF_DISABLED))
+               pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
+                               "guaranteed on shared IRQs\n",
+                               irq, devname);
+
 #ifdef CONFIG_LOCKDEP
        /*
         * Lockdep wants atomic interrupt handlers: