goto skip;
seq_printf(p, "%3d: ", i);
for_each_online_cpu(j)
-- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++ seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
seq_printf(p, " %8s", irq_desc[i].chip->name);
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
#endif
generic_handle_irq(irq);
- #ifndef CONFIG_IPIPE /* Useless and bugous over the I-pipe: IRQs are threaded. */
- /* If we're the only interrupt running (ignoring IRQ15 which is for
- syscalls), lower our priority to IRQ14 so that softirqs run at
- that level. If there's another, lower-level interrupt, irq_exit
- will defer softirqs to that. */
+ #ifndef CONFIG_IPIPE
+ /*
+ * If we're the only interrupt running (ignoring IRQ15 which
+ * is for syscalls), lower our priority to IRQ14 so that
+ * softirqs run at that level. If there's another,
+ * lower-level interrupt, irq_exit will defer softirqs to
+ * that. If the interrupt pipeline is enabled, we are already
+ * running at IRQ14 priority, so we don't need this code.
+ */
CSYNC();
pending = bfin_read_IPEND() & ~0x8000;
other_ints = pending & (pending - 1);
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for_each_online_cpu(j)
-- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++ seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
seq_printf(p, " %14s", irq_desc[i].chip->name);
- seq_printf(p, "-%-8s", irq_desc[i].name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
}
#ifdef CONFIG_SMP
-- int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
++ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
{
int cpu_dest;
if (CHECK_IRQ_PER_CPU(irq)) {
/* Bad linux design decision. The mask has already
* been set; we must reset it */
-- irq_desc[irq].affinity = CPU_MASK_ALL;
++ cpumask_setall(&irq_desc[irq].affinity);
return -EINVAL;
}
/* whatever mask they set, we just allow one CPU */
cpu_dest = first_cpu(*dest);
-- *dest = cpumask_of_cpu(cpu_dest);
-- return 0;
++ return cpu_dest;
}
static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
{
-- if (cpu_check_affinity(irq, dest))
++ int cpu_dest;
++
++ cpu_dest = cpu_check_affinity(irq, dest);
++ if (cpu_dest < 0)
return;
-- irq_desc[irq].affinity = *dest;
++ cpumask_copy(&irq_desc[irq].affinity, &cpumask_of_cpu(cpu_dest));
}
#endif
seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
for_each_online_cpu(j)
-- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++ seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#else
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
-- irq_desc[irq].affinity = cpumask_of_cpu(cpu);
++ cpumask_copy(&irq_desc[irq].affinity, cpumask_of(cpu));
#endif
return per_cpu(cpu_data, cpu).txn_addr;
irq = eirr_to_irq(eirr_val);
#ifdef CONFIG_SMP
-- dest = irq_desc[irq].affinity;
++ cpumask_copy(&dest, &irq_desc[irq].affinity);
if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
!cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest);
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for_each_online_cpu(j)
-- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++ seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
seq_printf(p, " %9s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
sun4u_irq_enable(virt_irq);
}
++ /* Don't do anything. The desc->status check for IRQ_DISABLED in
++ * handler_irq() will skip the handler call and that will leave the
++ * interrupt in the sent state. The next ->enable() call will hit the
++ * ICLR register to reset the state machine.
++ *
++ * This scheme is necessary, instead of clearing the Valid bit in the
++ * IMAP register, to handle the case of IMAP registers being shared by
++ * multiple INOs (and thus ICLR registers). Since we use a different
++ * virtual IRQ for each shared IMAP instance, the generic code thinks
++ * there is only one user so it prematurely calls ->disable() on
++ * free_irq().
++ *
++ * We have to provide an explicit ->disable() method instead of using
++ * NULL to get the default. The reason is that if the generic code
++ * sees that, it also hooks up a default ->shutdown method which
++ * invokes ->mask() which we do not want. See irq_chip_set_defaults().
++ */
static void sun4u_irq_disable(unsigned int virt_irq)
{
-- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
--
-- if (likely(data)) {
-- unsigned long imap = data->imap;
-- unsigned long tmp = upa_readq(imap);
--
-- tmp &= ~IMAP_VALID;
-- upa_writeq(tmp, imap);
-- }
}
static void sun4u_irq_eoi(unsigned int virt_irq)
desc = irq_desc + virt_irq;
-- desc->handle_irq(virt_irq, desc);
++ if (!(desc->status & IRQ_DISABLED))
++ desc->handle_irq(virt_irq, desc);
bucket_pa = next_pa;
}
u8 irte_mask;
};
--#ifdef CONFIG_SPARSE_IRQ
++#ifdef CONFIG_GENERIC_HARDIRQS
static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
{
struct irq_2_iommu *iommu;
return index;
}
- static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
+ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
{
struct qi_desc desc;
| QI_IEC_SELECTIVE;
desc.high = 0;
- qi_submit_sync(&desc, iommu);
+ return qi_submit_sync(&desc, iommu);
}
int map_irq_to_irte_handle(int irq, u16 *sub_handle)
int modify_irte(int irq, struct irte *irte_modified)
{
+ int rc;
int index;
struct irte *irte;
struct intel_iommu *iommu;
set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
__iommu_flush_cache(iommu, irte, sizeof(*irte));
- qi_flush_iec(iommu, index, 0);
-
+ rc = qi_flush_iec(iommu, index, 0);
spin_unlock(&irq_2_ir_lock);
- return 0;
+
+ return rc;
}
int flush_irte(int irq)
{
+ int rc;
int index;
struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
index = irq_iommu->irte_index + irq_iommu->sub_handle;
- qi_flush_iec(iommu, index, irq_iommu->irte_mask);
+ rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
spin_unlock(&irq_2_ir_lock);
- return 0;
+ return rc;
}
struct intel_iommu *map_ioapic_to_ir(int apic)
int free_irte(int irq)
{
+ int rc = 0;
int index, i;
struct irte *irte;
struct intel_iommu *iommu;
if (!irq_iommu->sub_handle) {
for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
set_64bit((unsigned long *)irte, 0);
- qi_flush_iec(iommu, index, irq_iommu->irte_mask);
+ rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}
irq_iommu->iommu = NULL;
spin_unlock(&irq_2_ir_lock);
- return 0;
+ return rc;
}
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
*/
struct irq_desc {
unsigned int irq;
--#ifdef CONFIG_SPARSE_IRQ
struct timer_rand_state *timer_rand_state;
unsigned int *kstat_irqs;
--# ifdef CONFIG_INTR_REMAP
++#ifdef CONFIG_INTR_REMAP
struct irq_2_iommu *irq_2_iommu;
--# endif
#endif
irq_flow_handler_t handle_irq;
struct irq_chip *chip;
extern struct irq_desc irq_desc[NR_IRQS];
#else /* CONFIG_SPARSE_IRQ */
extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
--
--#define kstat_irqs_this_cpu(DESC) \
-- ((DESC)->kstat_irqs[smp_processor_id()])
--#define kstat_incr_irqs_this_cpu(irqno, DESC) \
-- ((DESC)->kstat_irqs[smp_processor_id()]++)
--
#endif /* CONFIG_SPARSE_IRQ */
extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
* Migration helpers for obsolete names, they will go away:
*/
#define hw_interrupt_type irq_chip
- -typedef struct irq_chip hw_irq_controller;
#define no_irq_type no_irq_chip
typedef struct irq_desc irq_desc_t;
#include <asm/hw_irq.h>
extern int setup_irq(unsigned int irq, struct irqaction *new);
+ +extern void remove_irq(unsigned int irq, struct irqaction *act);
#ifdef CONFIG_GENERIC_HARDIRQS
}
/* Handle irq action chains: */
- -extern int handle_IRQ_event(unsigned int irq, struct irqaction *action);
+ +extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
/*
* Built-in IRQ handlers for various IRQ types,
/* Handling of unhandled and spurious interrupts: */
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
- - int action_ret);
+ + irqreturn_t action_ret);
/* Resending of interrupts :*/
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
{
-- unsigned long bytes;
-- char *ptr;
int node;
--
-- /* Compute how many bytes we need per irq and allocate them */
-- bytes = nr * sizeof(unsigned int);
++ void *ptr;
node = cpu_to_node(cpu);
-- ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
-- printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
++ ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
-- if (ptr)
-- desc->kstat_irqs = (unsigned int *)ptr;
++ /*
++ * don't overwite if can not get new one
++ * init_copy_kstat_irqs() could still use old one
++ */
++ if (ptr) {
++ printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n",
++ cpu, node);
++ desc->kstat_irqs = ptr;
++ }
}
static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
}
};
++static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
int __init early_irq_init(void)
{
struct irq_desc *desc;
desc = irq_desc;
count = ARRAY_SIZE(irq_desc);
-- for (i = 0; i < count; i++)
++ for (i = 0; i < count; i++) {
desc[i].irq = i;
++ desc[i].kstat_irqs = kstat_irqs_all[i];
++ }
return arch_early_irq_init();
}
}
#endif /* !CONFIG_SPARSE_IRQ */
++void clear_kstat_irqs(struct irq_desc *desc)
++{
++ memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
++}
++
/*
* What should we do if we get a hw irq event on an illegal vector?
* Each architecture has to answer this themself.
irqreturn_t ret, retval = IRQ_NONE;
unsigned int status = 0;
+ + WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
+ +
if (!(action->flags & IRQF_DISABLED))
local_irq_enable_in_hardirq();
}
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
+ +
+ +#ifdef CONFIG_ENABLE_WARN_DEPRECATED
+ +# warning __do_IRQ is deprecated. Please convert to proper flow handlers
+ +#endif
+ +
/**
* __do_IRQ - original all in one highlevel IRQ handler
* @irq: the interrupt number
}
}
--#ifdef CONFIG_SPARSE_IRQ
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
return desc ? desc->kstat_irqs[cpu] : 0;
}
--#endif
EXPORT_SYMBOL(kstat_irqs_cpu);
#include "internals.h"
- #ifdef CONFIG_SMP
+ #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
cpumask_var_t irq_default_affinity;
/**
/*
* Generic version of the affinity autoselector.
*/
--int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
++static int setup_affinity(unsigned int irq, struct irq_desc *desc)
{
if (!irq_can_set_affinity(irq))
return 0;
return 0;
}
#else
--static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
++static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
{
return irq_select_affinity(irq);
}
int ret;
spin_lock_irqsave(&desc->lock, flags);
-- ret = do_irq_select_affinity(irq, desc);
++ ret = setup_affinity(irq, desc);
spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
#else
--static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
++static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
{
return 0;
}
* allocate special interrupts that are part of the architecture.
*/
static int
- -__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
+ +__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
{
- - struct irqaction *old, **p;
+ + struct irqaction *old, **old_ptr;
const char *old_name = NULL;
unsigned long flags;
int shared = 0;
* The following block of code has to be executed atomically
*/
spin_lock_irqsave(&desc->lock, flags);
- - p = &desc->action;
- - old = *p;
+ + old_ptr = &desc->action;
+ + old = *old_ptr;
if (old) {
/*
* Can't share interrupts unless both agree to and are
/* add new interrupt at end of irq queue */
do {
- - p = &old->next;
- - old = *p;
+ + old_ptr = &old->next;
+ + old = *old_ptr;
} while (old);
shared = 1;
}
desc->status |= IRQ_NO_BALANCING;
/* Set default affinity mask once everything is setup */
-- do_irq_select_affinity(irq, desc);
++ setup_affinity(irq, desc);
} else if ((new->flags & IRQF_TRIGGER_MASK)
&& (new->flags & IRQF_TRIGGER_MASK)
(int)(new->flags & IRQF_TRIGGER_MASK));
}
- - *p = new;
+ + *old_ptr = new;
/* Reset broken irq detection when installing new handler */
desc->irq_count = 0;
return __setup_irq(irq, desc, act);
}
+ +EXPORT_SYMBOL_GPL(setup_irq);
- -/**
- - * free_irq - free an interrupt
- - * @irq: Interrupt line to free
- - * @dev_id: Device identity to free
- - *
- - * Remove an interrupt handler. The handler is removed and if the
- - * interrupt line is no longer in use by any driver it is disabled.
- - * On a shared IRQ the caller must ensure the interrupt is disabled
- - * on the card it drives before calling this function. The function
- - * does not return until any executing interrupts for this IRQ
- - * have completed.
- - *
- - * This function must not be called from interrupt context.
+ + /*
+ + * Internal function to unregister an irqaction - used to free
+ + * regular and special interrupts that are part of the architecture.
*/
- -void free_irq(unsigned int irq, void *dev_id)
+ +static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
- - struct irqaction **p;
+ + struct irqaction *action, **action_ptr;
unsigned long flags;
- - WARN_ON(in_interrupt());
+ + WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
if (!desc)
- - return;
+ + return NULL;
spin_lock_irqsave(&desc->lock, flags);
- - p = &desc->action;
+ +
+ + /*
+ + * There can be multiple actions per IRQ descriptor, find the right
+ + * one based on the dev_id:
+ + */
+ + action_ptr = &desc->action;
for (;;) {
- - struct irqaction *action = *p;
+ + action = *action_ptr;
- - if (action) {
- - struct irqaction **pp = p;
+ + if (!action) {
+ + WARN(1, "Trying to free already-free IRQ %d\n", irq);
+ + spin_unlock_irqrestore(&desc->lock, flags);
- - p = &action->next;
- - if (action->dev_id != dev_id)
- - continue;
+ + return NULL;
+ + }
- - /* Found it - now remove it from the list of entries */
- - *pp = action->next;
+ + if (action->dev_id == dev_id)
+ + break;
+ + action_ptr = &action->next;
+ + }
- - /* Currently used only by UML, might disappear one day.*/
+ + /* Found it - now remove it from the list of entries: */
+ + *action_ptr = action->next;
+ +
+ + /* Currently used only by UML, might disappear one day: */
#ifdef CONFIG_IRQ_RELEASE_METHOD
- - if (desc->chip->release)
- - desc->chip->release(irq, dev_id);
+ + if (desc->chip->release)
+ + desc->chip->release(irq, dev_id);
#endif
- - if (!desc->action) {
- - desc->status |= IRQ_DISABLED;
- - if (desc->chip->shutdown)
- - desc->chip->shutdown(irq);
- - else
- - desc->chip->disable(irq);
- - }
- - spin_unlock_irqrestore(&desc->lock, flags);
- - unregister_handler_proc(irq, action);
+ + /* If this was the last handler, shut down the IRQ line: */
+ + if (!desc->action) {
+ + desc->status |= IRQ_DISABLED;
+ + if (desc->chip->shutdown)
+ + desc->chip->shutdown(irq);
+ + else
+ + desc->chip->disable(irq);
+ + }
+ + spin_unlock_irqrestore(&desc->lock, flags);
+ +
+ + unregister_handler_proc(irq, action);
+ +
+ + /* Make sure it's not being used on another CPU: */
+ + synchronize_irq(irq);
- - /* Make sure it's not being used on another CPU */
- - synchronize_irq(irq);
- -#ifdef CONFIG_DEBUG_SHIRQ
- - /*
- - * It's a shared IRQ -- the driver ought to be
- - * prepared for it to happen even now it's
- - * being freed, so let's make sure.... We do
- - * this after actually deregistering it, to
- - * make sure that a 'real' IRQ doesn't run in
- - * parallel with our fake
- - */
- - if (action->flags & IRQF_SHARED) {
- - local_irq_save(flags);
- - action->handler(irq, dev_id);
- - local_irq_restore(flags);
- - }
- -#endif
- - kfree(action);
- - return;
- - }
- - printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
#ifdef CONFIG_DEBUG_SHIRQ
- - dump_stack();
- -#endif
- - spin_unlock_irqrestore(&desc->lock, flags);
- - return;
+ + /*
+ + * It's a shared IRQ -- the driver ought to be prepared for an IRQ
+ + * event to happen even now it's being freed, so let's make sure that
+ + * is so by doing an extra call to the handler ....
+ + *
+ + * ( We do this after actually deregistering it, to make sure that a
+ + * 'real' IRQ doesn't run in * parallel with our fake. )
+ + */
+ + if (action->flags & IRQF_SHARED) {
+ + local_irq_save(flags);
+ + action->handler(irq, dev_id);
+ + local_irq_restore(flags);
}
+ +#endif
+ + return action;
+ +}
+ +
+ +/**
+ + * remove_irq - free an interrupt
+ + * @irq: Interrupt line to free
+ + * @act: irqaction for the interrupt
+ + *
+ + * Used to remove interrupts statically setup by the early boot process.
+ + */
+ +void remove_irq(unsigned int irq, struct irqaction *act)
+ +{
+ + __free_irq(irq, act->dev_id);
+ +}
+ +EXPORT_SYMBOL_GPL(remove_irq);
+ +
+ +/**
+ + * free_irq - free an interrupt allocated with request_irq
+ + * @irq: Interrupt line to free
+ + * @dev_id: Device identity to free
+ + *
+ + * Remove an interrupt handler. The handler is removed and if the
+ + * interrupt line is no longer in use by any driver it is disabled.
+ + * On a shared IRQ the caller must ensure the interrupt is disabled
+ + * on the card it drives before calling this function. The function
+ + * does not return until any executing interrupts for this IRQ
+ + * have completed.
+ + *
+ + * This function must not be called from interrupt context.
+ + */
+ +void free_irq(unsigned int irq, void *dev_id)
+ +{
+ + kfree(__free_irq(irq, dev_id));
}
EXPORT_SYMBOL(free_irq);
* the behavior is classified as "will not fix" so we need to
* start nudging drivers away from using that idiom.
*/
- - if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
- - == (IRQF_SHARED|IRQF_DISABLED))
- - pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
- - "guaranteed on shared IRQs\n",
- - irq, devname);
+ + if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
+ + (IRQF_SHARED|IRQF_DISABLED)) {
+ + pr_warning(
+ + "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
+ + irq, devname);
+ + }
#ifdef CONFIG_LOCKDEP
/*
if (!handler)
return -EINVAL;
- action = kmalloc(sizeof(struct irqaction), GFP_KERNEL);
- action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+ + action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
return -ENOMEM;
action->handler = handler;
action->flags = irqflags;
- - cpus_clear(action->mask);
action->name = devname;
- - action->next = NULL;
action->dev_id = dev_id;
retval = __setup_irq(irq, desc, action);