2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/sched.h>
14 #include <linux/irqflags.h>
15 #include <linux/smp.h>
16 #include <linux/percpu.h>
18 #include <asm/atomic.h>
19 #include <asm/ptrace.h>
20 #include <asm/system.h>
23 * These correspond to the IORESOURCE_IRQ_* defines in
24 * linux/ioport.h to select the interrupt line behaviour. When
25 * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 * setting should be assumed to be "as already configured", which
27 * may be as per machine or firmware initialisation.
29 #define IRQF_TRIGGER_NONE 0x00000000
30 #define IRQF_TRIGGER_RISING 0x00000001
31 #define IRQF_TRIGGER_FALLING 0x00000002
32 #define IRQF_TRIGGER_HIGH 0x00000004
33 #define IRQF_TRIGGER_LOW 0x00000008
34 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36 #define IRQF_TRIGGER_PROBE 0x00000010
39 * These flags used only by the kernel as part of the
40 * irq handling routines.
42 * IRQF_DISABLED - keep irqs disabled when calling the action handler
43 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
44 * IRQF_SHARED - allow sharing the irq among several devices
45 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
46 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
47 * IRQF_PERCPU - Interrupt is per cpu
48 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
49 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
50 * registered first in an shared interrupt is considered for
51 * performance reasons)
53 #define IRQF_DISABLED 0x00000020
54 #define IRQF_SAMPLE_RANDOM 0x00000040
55 #define IRQF_SHARED 0x00000080
56 #define IRQF_PROBE_SHARED 0x00000100
57 #define IRQF_TIMER 0x00000200
58 #define IRQF_PERCPU 0x00000400
59 #define IRQF_NOBALANCING 0x00000800
60 #define IRQF_IRQPOLL 0x00001000
63 * Bits used by threaded handlers:
64 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
65 * IRQTF_DIED - handler thread died
66 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
74 typedef irqreturn_t (*irq_handler_t)(int, void *);
77 * struct irqaction - per interrupt action descriptor
78 * @handler: interrupt handler function
79 * @flags: flags (see IRQF_* above)
80 * @mask: no comment as it is useless and about to be removed
81 * @name: name of the device
82 * @dev_id: cookie to identify the device
83 * @next: pointer to the next irqaction for shared interrupts
84 * @irq: interrupt number
85 * @dir: pointer to the proc/irq/NN/name entry
86 * @thread_fn: interupt handler function for threaded interrupts
87 * @thread: thread pointer for threaded interrupts
88 * @thread_flags: flags related to @thread
91 irq_handler_t handler;
96 struct irqaction *next;
98 struct proc_dir_entry *dir;
99 irq_handler_t thread_fn;
100 struct task_struct *thread;
101 unsigned long thread_flags;
104 extern irqreturn_t no_action(int cpl, void *dev_id);
106 extern int __must_check
107 request_threaded_irq(unsigned int irq, irq_handler_t handler,
108 irq_handler_t thread_fn,
109 unsigned long flags, const char *name, void *dev);
111 static inline int __must_check
112 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
113 const char *name, void *dev)
115 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
118 #ifdef CONFIG_GENERIC_HARDIRQS
119 extern void exit_irq_thread(void);
121 static inline void exit_irq_thread(void) { }
124 extern void free_irq(unsigned int, void *);
128 extern int __must_check
129 devm_request_threaded_irq(struct device *dev, unsigned int irq,
130 irq_handler_t handler, irq_handler_t thread_fn,
131 unsigned long irqflags, const char *devname,
134 static inline int __must_check
135 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
136 unsigned long irqflags, const char *devname, void *dev_id)
138 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
142 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
145 * On lockdep we dont want to enable hardirqs in hardirq
146 * context. Use local_irq_enable_in_hardirq() to annotate
147 * kernel code that has to do this nevertheless (pretty much
148 * the only valid case is for old/broken hardware that is
151 * NOTE: in theory this might break fragile code that relies
152 * on hardirq delivery - in practice we dont seem to have such
153 * places left. So the only effect should be slightly increased
154 * irqs-off latencies.
156 #ifdef CONFIG_LOCKDEP
157 # define local_irq_enable_in_hardirq() do { } while (0)
159 # define local_irq_enable_in_hardirq() local_irq_enable()
162 extern void disable_irq_nosync(unsigned int irq);
163 extern void disable_irq(unsigned int irq);
164 extern void enable_irq(unsigned int irq);
166 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
168 extern cpumask_var_t irq_default_affinity;
170 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
171 extern int irq_can_set_affinity(unsigned int irq);
172 extern int irq_select_affinity(unsigned int irq);
174 #else /* CONFIG_SMP */
176 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
181 static inline int irq_can_set_affinity(unsigned int irq)
186 static inline int irq_select_affinity(unsigned int irq) { return 0; }
188 #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
190 #ifdef CONFIG_GENERIC_HARDIRQS
192 * Special lockdep variants of irq disabling/enabling.
193 * These should be used for locking constructs that
194 * know that a particular irq context which is disabled,
195 * and which is the only irq-context user of a lock,
196 * that it's safe to take the lock in the irq-disabled
197 * section without disabling hardirqs.
199 * On !CONFIG_LOCKDEP they are equivalent to the normal
200 * irq disable/enable methods.
202 static inline void disable_irq_nosync_lockdep(unsigned int irq)
204 disable_irq_nosync(irq);
205 #ifdef CONFIG_LOCKDEP
210 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
212 disable_irq_nosync(irq);
213 #ifdef CONFIG_LOCKDEP
214 local_irq_save(*flags);
218 static inline void disable_irq_lockdep(unsigned int irq)
221 #ifdef CONFIG_LOCKDEP
226 static inline void enable_irq_lockdep(unsigned int irq)
228 #ifdef CONFIG_LOCKDEP
234 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
236 #ifdef CONFIG_LOCKDEP
237 local_irq_restore(*flags);
242 /* IRQ wakeup (PM) control: */
243 extern int set_irq_wake(unsigned int irq, unsigned int on);
245 static inline int enable_irq_wake(unsigned int irq)
247 return set_irq_wake(irq, 1);
250 static inline int disable_irq_wake(unsigned int irq)
252 return set_irq_wake(irq, 0);
255 #else /* !CONFIG_GENERIC_HARDIRQS */
257 * NOTE: non-genirq architectures, if they want to support the lock
258 * validator need to define the methods below in their asm/irq.h
259 * files, under an #ifdef CONFIG_LOCKDEP section.
261 #ifndef CONFIG_LOCKDEP
262 # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
263 # define disable_irq_nosync_lockdep_irqsave(irq, flags) \
264 disable_irq_nosync(irq)
265 # define disable_irq_lockdep(irq) disable_irq(irq)
266 # define enable_irq_lockdep(irq) enable_irq(irq)
267 # define enable_irq_lockdep_irqrestore(irq, flags) \
271 static inline int enable_irq_wake(unsigned int irq)
276 static inline int disable_irq_wake(unsigned int irq)
280 #endif /* CONFIG_GENERIC_HARDIRQS */
282 #ifndef __ARCH_SET_SOFTIRQ_PENDING
283 #define set_softirq_pending(x) (local_softirq_pending() = (x))
284 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
287 /* Some architectures might implement lazy enabling/disabling of
288 * interrupts. In some cases, such as stop_machine, we might want
289 * to ensure that after a local_irq_disable(), interrupts have
290 * really been disabled in hardware. Such architectures need to
291 * implement the following hook.
293 #ifndef hard_irq_disable
294 #define hard_irq_disable() do { } while(0)
297 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
298 frequency threaded job scheduling. For almost all the purposes
299 tasklets are more than enough. F.e. all serial device BHs et
300 al. should be converted to tasklets, not to softirqs.
313 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
318 /* softirq mask and active fields moved to irq_cpustat_t in
319 * asm/hardirq.h to get better cache usage. KAO
322 struct softirq_action
324 void (*action)(struct softirq_action *);
327 asmlinkage void do_softirq(void);
328 asmlinkage void __do_softirq(void);
329 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
330 extern void softirq_init(void);
331 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
332 extern void raise_softirq_irqoff(unsigned int nr);
333 extern void raise_softirq(unsigned int nr);
335 /* This is the worklist that queues up per-cpu softirq work.
337 * send_remote_sendirq() adds work to these lists, and
338 * the softirq handler itself dequeues from them. The queues
339 * are protected by disabling local cpu interrupts and they must
340 * only be accessed by the local cpu that they are for.
342 DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
344 /* Try to send a softirq to a remote cpu. If this cannot be done, the
345 * work will be queued to the local cpu.
347 extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
349 /* Like send_remote_softirq(), but the caller must disable local cpu interrupts
350 * and compute the current cpu, passed in as 'this_cpu'.
352 extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
353 int this_cpu, int softirq);
355 /* Tasklets --- multithreaded analogue of BHs.
357 Main feature differing them of generic softirqs: tasklet
358 is running only on one CPU simultaneously.
360 Main feature differing them of BHs: different tasklets
361 may be run simultaneously on different CPUs.
364 * If tasklet_schedule() is called, then tasklet is guaranteed
365 to be executed on some cpu at least once after this.
366 * If the tasklet is already scheduled, but its excecution is still not
367 started, it will be executed only once.
368 * If this tasklet is already running on another CPU (or schedule is called
369 from tasklet itself), it is rescheduled for later.
370 * Tasklet is strictly serialized wrt itself, but not
371 wrt another tasklets. If client needs some intertask synchronization,
372 he makes it with spinlocks.
375 struct tasklet_struct
377 struct tasklet_struct *next;
380 void (*func)(unsigned long);
384 #define DECLARE_TASKLET(name, func, data) \
385 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
387 #define DECLARE_TASKLET_DISABLED(name, func, data) \
388 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
393 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
394 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
398 static inline int tasklet_trylock(struct tasklet_struct *t)
400 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
403 static inline void tasklet_unlock(struct tasklet_struct *t)
405 smp_mb__before_clear_bit();
406 clear_bit(TASKLET_STATE_RUN, &(t)->state);
409 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
411 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
414 #define tasklet_trylock(t) 1
415 #define tasklet_unlock_wait(t) do { } while (0)
416 #define tasklet_unlock(t) do { } while (0)
419 extern void __tasklet_schedule(struct tasklet_struct *t);
421 static inline void tasklet_schedule(struct tasklet_struct *t)
423 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
424 __tasklet_schedule(t);
427 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
429 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
431 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
432 __tasklet_hi_schedule(t);
436 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
438 atomic_inc(&t->count);
439 smp_mb__after_atomic_inc();
442 static inline void tasklet_disable(struct tasklet_struct *t)
444 tasklet_disable_nosync(t);
445 tasklet_unlock_wait(t);
449 static inline void tasklet_enable(struct tasklet_struct *t)
451 smp_mb__before_atomic_dec();
452 atomic_dec(&t->count);
455 static inline void tasklet_hi_enable(struct tasklet_struct *t)
457 smp_mb__before_atomic_dec();
458 atomic_dec(&t->count);
461 extern void tasklet_kill(struct tasklet_struct *t);
462 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
463 extern void tasklet_init(struct tasklet_struct *t,
464 void (*func)(unsigned long), unsigned long data);
467 * Autoprobing for irqs:
469 * probe_irq_on() and probe_irq_off() provide robust primitives
470 * for accurate IRQ probing during kernel initialization. They are
471 * reasonably simple to use, are not "fooled" by spurious interrupts,
472 * and, unlike other attempts at IRQ probing, they do not get hung on
473 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
475 * For reasonably foolproof probing, use them as follows:
477 * 1. clear and/or mask the device's internal interrupt.
479 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
480 * 4. enable the device and cause it to trigger an interrupt.
481 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
482 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
483 * 7. service the device to clear its pending interrupt.
484 * 8. loop again if paranoia is required.
486 * probe_irq_on() returns a mask of allocated irq's.
488 * probe_irq_off() takes the mask as a parameter,
489 * and returns the irq number which occurred,
490 * or zero if none occurred, or a negative irq number
491 * if more than one irq occurred.
494 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
495 static inline unsigned long probe_irq_on(void)
499 static inline int probe_irq_off(unsigned long val)
503 static inline unsigned int probe_irq_mask(unsigned long val)
508 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
509 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
510 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
513 #ifdef CONFIG_PROC_FS
514 /* Initialize /proc/irq/ */
515 extern void init_irq_proc(void);
517 static inline void init_irq_proc(void)
522 #if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ)
523 extern void debug_poll_all_shared_irqs(void);
525 static inline void debug_poll_all_shared_irqs(void) { }
528 int show_interrupts(struct seq_file *p, void *v);
532 extern int early_irq_init(void);
533 extern int arch_probe_nr_irqs(void);
534 extern int arch_early_irq_init(void);
535 extern int arch_init_chip_data(struct irq_desc *desc, int cpu);