handle_bad_irq(unsigned int irq, struct irq_desc *desc)
{
print_irq_desc(irq, desc);
+#ifdef CONFIG_HAVE_DYN_ARRAY
kstat_irqs_this_cpu(desc)++;
+#else
+ kstat_irqs_this_cpu(irq)++;
+#endif
ack_bad_irq(irq);
}
}
#ifdef CONFIG_HAVE_SPARSE_IRQ
+/*
+ * Protect the sparse_irqs_free freelist:
+ */
+static DEFINE_SPINLOCK(sparse_irq_lock);
static struct irq_desc *sparse_irqs_free;
struct irq_desc *sparse_irqs;
#endif
}
return NULL;
}
+
struct irq_desc *irq_to_desc_alloc(unsigned int irq)
{
struct irq_desc *desc, *desc_pri;
- int i;
+ unsigned long flags;
int count = 0;
+ int i;
desc_pri = desc = sparse_irqs;
while (desc) {
count++;
}
+ spin_lock_irqsave(&sparse_irq_lock, flags);
/*
* we run out of pre-allocate ones, allocate more
*/
else
sparse_irqs = desc;
desc->irq = irq;
- printk(KERN_DEBUG "found new irq_desc for irq %d\n", desc->irq);
-#ifdef CONFIG_HAVE_SPARSE_IRQ_DEBUG
- {
- /* dump the results */
- struct irq_desc *desc;
- unsigned long phys;
- unsigned long bytes = sizeof(struct irq_desc);
- unsigned int irqx;
-
- printk(KERN_DEBUG "=========================== %d\n", irq);
- printk(KERN_DEBUG "irq_desc dump after get that for %d\n", irq);
- for_each_irq_desc(irqx, desc) {
- phys = __pa(desc);
- printk(KERN_DEBUG "irq_desc %d ==> [%#lx - %#lx]\n", irqx, phys, phys + bytes);
- }
- printk(KERN_DEBUG "===========================\n");
- }
-#endif
+
+ spin_unlock_irqrestore(&sparse_irq_lock, flags);
+
return desc;
}
#else
struct irqaction *action;
unsigned int status;
+#ifdef CONFIG_HAVE_DYN_ARRAY
kstat_irqs_this_cpu(desc)++;
+#else
+ kstat_irqs_this_cpu(irq)++;
+#endif
if (CHECK_IRQ_PER_CPU(desc->status)) {
irqreturn_t action_ret;
}
#endif
+#ifdef CONFIG_HAVE_DYN_ARRAY
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
return desc->kstat_irqs[cpu];
}
+#endif
EXPORT_SYMBOL(kstat_irqs_cpu);