]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/irq/handle.c
cpumask: update irq_desc to use cpumask_var_t
[linux-2.6-omap-h63xx.git] / kernel / irq / handle.c
1 /*
2  * linux/kernel/irq/handle.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code.
8  *
9  * Detailed information is available in Documentation/DocBook/genericirq
10  *
11  */
12
13 #include <linux/irq.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/rculist.h>
19 #include <linux/hash.h>
20
21 #include "internals.h"
22
23 /*
24  * lockdep: we want to handle all irq_desc locks as a single lock-class:
25  */
26 struct lock_class_key irq_desc_lock_class;
27
28 /**
29  * handle_bad_irq - handle spurious and unhandled irqs
30  * @irq:       the interrupt number
31  * @desc:      description of the interrupt
32  *
33  * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
34  */
35 void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
36 {
37         print_irq_desc(irq, desc);
38         kstat_incr_irqs_this_cpu(irq, desc);
39         ack_bad_irq(irq);
40 }
41
42 /*
43  * Linux has a controller-independent interrupt architecture.
44  * Every controller has a 'controller-template', that is used
45  * by the main code to do the right thing. Each driver-visible
46  * interrupt source is transparently wired to the appropriate
47  * controller. Thus drivers need not be aware of the
48  * interrupt-controller.
49  *
50  * The code is designed to be easily extended with new/different
51  * interrupt controllers, without having to do assembly magic or
52  * having to touch the generic code.
53  *
54  * Controller mappings for all interrupt sources:
55  */
56 int nr_irqs = NR_IRQS;
57 EXPORT_SYMBOL_GPL(nr_irqs);
58
59 #ifdef CONFIG_SPARSE_IRQ
60 static struct irq_desc irq_desc_init = {
61         .irq        = -1,
62         .status     = IRQ_DISABLED,
63         .chip       = &no_irq_chip,
64         .handle_irq = handle_bad_irq,
65         .depth      = 1,
66         .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
67 };
68
69 void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
70 {
71         unsigned long bytes;
72         char *ptr;
73         int node;
74
75         /* Compute how many bytes we need per irq and allocate them */
76         bytes = nr * sizeof(unsigned int);
77
78         node = cpu_to_node(cpu);
79         ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
80         printk(KERN_DEBUG "  alloc kstat_irqs on cpu %d node %d\n", cpu, node);
81
82         if (ptr)
83                 desc->kstat_irqs = (unsigned int *)ptr;
84 }
85
86 static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
87 {
88         int node = cpu_to_node(cpu);
89
90         memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
91
92         spin_lock_init(&desc->lock);
93         desc->irq = irq;
94 #ifdef CONFIG_SMP
95         desc->cpu = cpu;
96 #endif
97         lockdep_set_class(&desc->lock, &irq_desc_lock_class);
98         init_kstat_irqs(desc, cpu, nr_cpu_ids);
99         if (!desc->kstat_irqs) {
100                 printk(KERN_ERR "can not alloc kstat_irqs\n");
101                 BUG_ON(1);
102         }
103         if (!init_alloc_desc_masks(desc, node, false)) {
104                 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
105                 BUG_ON(1);
106         }
107         arch_init_chip_data(desc, cpu);
108 }
109
110 /*
111  * Protect the sparse_irqs:
112  */
113 DEFINE_SPINLOCK(sparse_irq_lock);
114
115 struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
116
117 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
118         [0 ... NR_IRQS_LEGACY-1] = {
119                 .irq        = -1,
120                 .status     = IRQ_DISABLED,
121                 .chip       = &no_irq_chip,
122                 .handle_irq = handle_bad_irq,
123                 .depth      = 1,
124                 .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
125         }
126 };
127
128 /* FIXME: use bootmem alloc ...*/
129 static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
130
131 int __init early_irq_init(void)
132 {
133         struct irq_desc *desc;
134         int legacy_count;
135         int i;
136
137         desc = irq_desc_legacy;
138         legacy_count = ARRAY_SIZE(irq_desc_legacy);
139
140         for (i = 0; i < legacy_count; i++) {
141                 desc[i].irq = i;
142                 desc[i].kstat_irqs = kstat_irqs_legacy[i];
143                 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
144                 init_alloc_desc_masks(&desc[i], 0, true);
145                 irq_desc_ptrs[i] = desc + i;
146         }
147
148         for (i = legacy_count; i < NR_IRQS; i++)
149                 irq_desc_ptrs[i] = NULL;
150
151         return arch_early_irq_init();
152 }
153
154 struct irq_desc *irq_to_desc(unsigned int irq)
155 {
156         return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
157 }
158
159 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
160 {
161         struct irq_desc *desc;
162         unsigned long flags;
163         int node;
164
165         if (irq >= NR_IRQS) {
166                 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
167                                 irq, NR_IRQS);
168                 WARN_ON(1);
169                 return NULL;
170         }
171
172         desc = irq_desc_ptrs[irq];
173         if (desc)
174                 return desc;
175
176         spin_lock_irqsave(&sparse_irq_lock, flags);
177
178         /* We have to check it to avoid races with another CPU */
179         desc = irq_desc_ptrs[irq];
180         if (desc)
181                 goto out_unlock;
182
183         node = cpu_to_node(cpu);
184         desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
185         printk(KERN_DEBUG "  alloc irq_desc for %d on cpu %d node %d\n",
186                  irq, cpu, node);
187         if (!desc) {
188                 printk(KERN_ERR "can not alloc irq_desc\n");
189                 BUG_ON(1);
190         }
191         if (!init_alloc_desc_masks(desc, node, false)) {
192                 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
193                 BUG_ON(1);
194         }
195         init_one_irq_desc(irq, desc, cpu);
196
197         irq_desc_ptrs[irq] = desc;
198
199 out_unlock:
200         spin_unlock_irqrestore(&sparse_irq_lock, flags);
201
202         return desc;
203 }
204
205 #else /* !CONFIG_SPARSE_IRQ */
206
207 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
208         [0 ... NR_IRQS-1] = {
209                 .status = IRQ_DISABLED,
210                 .chip = &no_irq_chip,
211                 .handle_irq = handle_bad_irq,
212                 .depth = 1,
213                 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
214         }
215 };
216
217 int __init early_irq_init(void)
218 {
219         struct irq_desc *desc;
220         int count;
221         int i;
222
223         desc = irq_desc;
224         count = ARRAY_SIZE(irq_desc);
225
226         for (i = 0; i < count; i++) {
227                 desc[i].irq = i;
228                 init_alloc_desc_masks(&desc[i], 0, true);
229         }
230         return arch_early_irq_init();
231 }
232
233 struct irq_desc *irq_to_desc(unsigned int irq)
234 {
235         return (irq < NR_IRQS) ? irq_desc + irq : NULL;
236 }
237
238 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
239 {
240         return irq_to_desc(irq);
241 }
242 #endif /* !CONFIG_SPARSE_IRQ */
243
244 /*
245  * What should we do if we get a hw irq event on an illegal vector?
246  * Each architecture has to answer this themself.
247  */
248 static void ack_bad(unsigned int irq)
249 {
250         struct irq_desc *desc = irq_to_desc(irq);
251
252         print_irq_desc(irq, desc);
253         ack_bad_irq(irq);
254 }
255
256 /*
257  * NOP functions
258  */
259 static void noop(unsigned int irq)
260 {
261 }
262
263 static unsigned int noop_ret(unsigned int irq)
264 {
265         return 0;
266 }
267
268 /*
269  * Generic no controller implementation
270  */
271 struct irq_chip no_irq_chip = {
272         .name           = "none",
273         .startup        = noop_ret,
274         .shutdown       = noop,
275         .enable         = noop,
276         .disable        = noop,
277         .ack            = ack_bad,
278         .end            = noop,
279 };
280
281 /*
282  * Generic dummy implementation which can be used for
283  * real dumb interrupt sources
284  */
285 struct irq_chip dummy_irq_chip = {
286         .name           = "dummy",
287         .startup        = noop_ret,
288         .shutdown       = noop,
289         .enable         = noop,
290         .disable        = noop,
291         .ack            = noop,
292         .mask           = noop,
293         .unmask         = noop,
294         .end            = noop,
295 };
296
297 /*
298  * Special, empty irq handler:
299  */
300 irqreturn_t no_action(int cpl, void *dev_id)
301 {
302         return IRQ_NONE;
303 }
304
305 /**
306  * handle_IRQ_event - irq action chain handler
307  * @irq:        the interrupt number
308  * @action:     the interrupt action chain for this irq
309  *
310  * Handles the action chain of an irq event
311  */
312 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
313 {
314         irqreturn_t ret, retval = IRQ_NONE;
315         unsigned int status = 0;
316
317         if (!(action->flags & IRQF_DISABLED))
318                 local_irq_enable_in_hardirq();
319
320         do {
321                 ret = action->handler(irq, action->dev_id);
322                 if (ret == IRQ_HANDLED)
323                         status |= action->flags;
324                 retval |= ret;
325                 action = action->next;
326         } while (action);
327
328         if (status & IRQF_SAMPLE_RANDOM)
329                 add_interrupt_randomness(irq);
330         local_irq_disable();
331
332         return retval;
333 }
334
335 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
336 /**
337  * __do_IRQ - original all in one highlevel IRQ handler
338  * @irq:        the interrupt number
339  *
340  * __do_IRQ handles all normal device IRQ's (the special
341  * SMP cross-CPU interrupts have their own specific
342  * handlers).
343  *
344  * This is the original x86 implementation which is used for every
345  * interrupt type.
346  */
347 unsigned int __do_IRQ(unsigned int irq)
348 {
349         struct irq_desc *desc = irq_to_desc(irq);
350         struct irqaction *action;
351         unsigned int status;
352
353         kstat_incr_irqs_this_cpu(irq, desc);
354
355         if (CHECK_IRQ_PER_CPU(desc->status)) {
356                 irqreturn_t action_ret;
357
358                 /*
359                  * No locking required for CPU-local interrupts:
360                  */
361                 if (desc->chip->ack) {
362                         desc->chip->ack(irq);
363                         /* get new one */
364                         desc = irq_remap_to_desc(irq, desc);
365                 }
366                 if (likely(!(desc->status & IRQ_DISABLED))) {
367                         action_ret = handle_IRQ_event(irq, desc->action);
368                         if (!noirqdebug)
369                                 note_interrupt(irq, desc, action_ret);
370                 }
371                 desc->chip->end(irq);
372                 return 1;
373         }
374
375         spin_lock(&desc->lock);
376         if (desc->chip->ack) {
377                 desc->chip->ack(irq);
378                 desc = irq_remap_to_desc(irq, desc);
379         }
380         /*
381          * REPLAY is when Linux resends an IRQ that was dropped earlier
382          * WAITING is used by probe to mark irqs that are being tested
383          */
384         status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
385         status |= IRQ_PENDING; /* we _want_ to handle it */
386
387         /*
388          * If the IRQ is disabled for whatever reason, we cannot
389          * use the action we have.
390          */
391         action = NULL;
392         if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
393                 action = desc->action;
394                 status &= ~IRQ_PENDING; /* we commit to handling */
395                 status |= IRQ_INPROGRESS; /* we are handling it */
396         }
397         desc->status = status;
398
399         /*
400          * If there is no IRQ handler or it was disabled, exit early.
401          * Since we set PENDING, if another processor is handling
402          * a different instance of this same irq, the other processor
403          * will take care of it.
404          */
405         if (unlikely(!action))
406                 goto out;
407
408         /*
409          * Edge triggered interrupts need to remember
410          * pending events.
411          * This applies to any hw interrupts that allow a second
412          * instance of the same irq to arrive while we are in do_IRQ
413          * or in the handler. But the code here only handles the _second_
414          * instance of the irq, not the third or fourth. So it is mostly
415          * useful for irq hardware that does not mask cleanly in an
416          * SMP environment.
417          */
418         for (;;) {
419                 irqreturn_t action_ret;
420
421                 spin_unlock(&desc->lock);
422
423                 action_ret = handle_IRQ_event(irq, action);
424                 if (!noirqdebug)
425                         note_interrupt(irq, desc, action_ret);
426
427                 spin_lock(&desc->lock);
428                 if (likely(!(desc->status & IRQ_PENDING)))
429                         break;
430                 desc->status &= ~IRQ_PENDING;
431         }
432         desc->status &= ~IRQ_INPROGRESS;
433
434 out:
435         /*
436          * The ->end() handler has to deal with interrupts which got
437          * disabled while the handler was running.
438          */
439         desc->chip->end(irq);
440         spin_unlock(&desc->lock);
441
442         return 1;
443 }
444 #endif
445
446 void early_init_irq_lock_class(void)
447 {
448         struct irq_desc *desc;
449         int i;
450
451         for_each_irq_desc(i, desc) {
452                 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
453         }
454 }
455
456 #ifdef CONFIG_SPARSE_IRQ
457 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
458 {
459         struct irq_desc *desc = irq_to_desc(irq);
460         return desc ? desc->kstat_irqs[cpu] : 0;
461 }
462 #endif
463 EXPORT_SYMBOL(kstat_irqs_cpu);
464