2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
40 #include <acpi/acpi_bus.h>
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
44 #include <linux/hpet.h>
51 #include <asm/proto.h>
54 #include <asm/timer.h>
55 #include <asm/i8259.h>
57 #include <asm/msidef.h>
58 #include <asm/hypertransport.h>
59 #include <asm/setup.h>
60 #include <asm/irq_remapping.h>
62 #include <asm/uv/uv_hub.h>
63 #include <asm/uv/uv_irq.h>
65 #include <asm/genapic.h>
67 #define __apicdebuginit(type) static type __init
70 * Is the SiS APIC rmw bug present ?
71 * -1 = don't know, 0 = no, 1 = yes
73 int sis_apic_bug = -1;
75 static DEFINE_SPINLOCK(ioapic_lock);
76 static DEFINE_SPINLOCK(vector_lock);
79 * # of IRQ routing registers
81 int nr_ioapic_registers[MAX_IO_APICS];
83 /* I/O APIC entries */
84 struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
87 /* MP IRQ source entries */
88 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
90 /* # of MP IRQ source entries */
93 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
94 int mp_bus_id_to_type[MAX_MP_BUSSES];
97 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
99 int skip_ioapic_setup;
101 void arch_disable_smp_support(void)
105 noioapicreroute = -1;
107 skip_ioapic_setup = 1;
110 static int __init parse_noapic(char *str)
112 /* disable IO-APIC */
113 arch_disable_smp_support();
116 early_param("noapic", parse_noapic);
121 * This is performance-critical, we want to do it O(1)
123 * the indexing order of this array favors 1:1 mappings
124 * between pins and IRQs.
127 struct irq_pin_list {
129 struct irq_pin_list *next;
132 static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
134 struct irq_pin_list *pin;
137 node = cpu_to_node(cpu);
139 pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
145 struct irq_pin_list *irq_2_pin;
146 cpumask_var_t domain;
147 cpumask_var_t old_domain;
148 unsigned move_cleanup_count;
150 u8 move_in_progress : 1;
151 #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
152 u8 move_desc_pending : 1;
156 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
157 #ifdef CONFIG_SPARSE_IRQ
158 static struct irq_cfg irq_cfgx[] = {
160 static struct irq_cfg irq_cfgx[NR_IRQS] = {
162 [0] = { .vector = IRQ0_VECTOR, },
163 [1] = { .vector = IRQ1_VECTOR, },
164 [2] = { .vector = IRQ2_VECTOR, },
165 [3] = { .vector = IRQ3_VECTOR, },
166 [4] = { .vector = IRQ4_VECTOR, },
167 [5] = { .vector = IRQ5_VECTOR, },
168 [6] = { .vector = IRQ6_VECTOR, },
169 [7] = { .vector = IRQ7_VECTOR, },
170 [8] = { .vector = IRQ8_VECTOR, },
171 [9] = { .vector = IRQ9_VECTOR, },
172 [10] = { .vector = IRQ10_VECTOR, },
173 [11] = { .vector = IRQ11_VECTOR, },
174 [12] = { .vector = IRQ12_VECTOR, },
175 [13] = { .vector = IRQ13_VECTOR, },
176 [14] = { .vector = IRQ14_VECTOR, },
177 [15] = { .vector = IRQ15_VECTOR, },
180 int __init arch_early_irq_init(void)
183 struct irq_desc *desc;
188 count = ARRAY_SIZE(irq_cfgx);
190 for (i = 0; i < count; i++) {
191 desc = irq_to_desc(i);
192 desc->chip_data = &cfg[i];
193 alloc_bootmem_cpumask_var(&cfg[i].domain);
194 alloc_bootmem_cpumask_var(&cfg[i].old_domain);
195 if (i < NR_IRQS_LEGACY)
196 cpumask_setall(cfg[i].domain);
202 #ifdef CONFIG_SPARSE_IRQ
203 static struct irq_cfg *irq_cfg(unsigned int irq)
205 struct irq_cfg *cfg = NULL;
206 struct irq_desc *desc;
208 desc = irq_to_desc(irq);
210 cfg = desc->chip_data;
215 static struct irq_cfg *get_one_free_irq_cfg(int cpu)
220 node = cpu_to_node(cpu);
222 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
224 if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
227 } else if (!alloc_cpumask_var_node(&cfg->old_domain,
229 free_cpumask_var(cfg->domain);
233 cpumask_clear(cfg->domain);
234 cpumask_clear(cfg->old_domain);
241 int arch_init_chip_data(struct irq_desc *desc, int cpu)
245 cfg = desc->chip_data;
247 desc->chip_data = get_one_free_irq_cfg(cpu);
248 if (!desc->chip_data) {
249 printk(KERN_ERR "can not alloc irq_cfg\n");
257 #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
260 init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu)
262 struct irq_pin_list *old_entry, *head, *tail, *entry;
264 cfg->irq_2_pin = NULL;
265 old_entry = old_cfg->irq_2_pin;
269 entry = get_one_free_irq_2_pin(cpu);
273 entry->apic = old_entry->apic;
274 entry->pin = old_entry->pin;
277 old_entry = old_entry->next;
279 entry = get_one_free_irq_2_pin(cpu);
287 /* still use the old one */
290 entry->apic = old_entry->apic;
291 entry->pin = old_entry->pin;
294 old_entry = old_entry->next;
298 cfg->irq_2_pin = head;
301 static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg)
303 struct irq_pin_list *entry, *next;
305 if (old_cfg->irq_2_pin == cfg->irq_2_pin)
308 entry = old_cfg->irq_2_pin;
315 old_cfg->irq_2_pin = NULL;
318 void arch_init_copy_chip_data(struct irq_desc *old_desc,
319 struct irq_desc *desc, int cpu)
322 struct irq_cfg *old_cfg;
324 cfg = get_one_free_irq_cfg(cpu);
329 desc->chip_data = cfg;
331 old_cfg = old_desc->chip_data;
333 memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
335 init_copy_irq_2_pin(old_cfg, cfg, cpu);
338 static void free_irq_cfg(struct irq_cfg *old_cfg)
343 void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
345 struct irq_cfg *old_cfg, *cfg;
347 old_cfg = old_desc->chip_data;
348 cfg = desc->chip_data;
354 free_irq_2_pin(old_cfg, cfg);
355 free_irq_cfg(old_cfg);
356 old_desc->chip_data = NULL;
361 set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
363 struct irq_cfg *cfg = desc->chip_data;
365 if (!cfg->move_in_progress) {
366 /* it means that domain is not changed */
367 if (!cpumask_intersects(desc->affinity, mask))
368 cfg->move_desc_pending = 1;
374 static struct irq_cfg *irq_cfg(unsigned int irq)
376 return irq < nr_irqs ? irq_cfgx + irq : NULL;
381 #ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC
383 set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
390 unsigned int unused[3];
394 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
396 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
397 + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
400 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
402 struct io_apic __iomem *io_apic = io_apic_base(apic);
403 writel(reg, &io_apic->index);
404 return readl(&io_apic->data);
407 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
409 struct io_apic __iomem *io_apic = io_apic_base(apic);
410 writel(reg, &io_apic->index);
411 writel(value, &io_apic->data);
415 * Re-write a value: to be used for read-modify-write
416 * cycles where the read already set up the index register.
418 * Older SiS APIC requires we rewrite the index register
420 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
422 struct io_apic __iomem *io_apic = io_apic_base(apic);
425 writel(reg, &io_apic->index);
426 writel(value, &io_apic->data);
429 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
431 struct irq_pin_list *entry;
434 spin_lock_irqsave(&ioapic_lock, flags);
435 entry = cfg->irq_2_pin;
443 reg = io_apic_read(entry->apic, 0x10 + pin*2);
444 /* Is the remote IRR bit set? */
445 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
446 spin_unlock_irqrestore(&ioapic_lock, flags);
453 spin_unlock_irqrestore(&ioapic_lock, flags);
459 struct { u32 w1, w2; };
460 struct IO_APIC_route_entry entry;
463 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
465 union entry_union eu;
467 spin_lock_irqsave(&ioapic_lock, flags);
468 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
469 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
470 spin_unlock_irqrestore(&ioapic_lock, flags);
475 * When we write a new IO APIC routing entry, we need to write the high
476 * word first! If the mask bit in the low word is clear, we will enable
477 * the interrupt, and we need to make sure the entry is fully populated
478 * before that happens.
481 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
483 union entry_union eu;
485 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
486 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
489 void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
492 spin_lock_irqsave(&ioapic_lock, flags);
493 __ioapic_write_entry(apic, pin, e);
494 spin_unlock_irqrestore(&ioapic_lock, flags);
498 * When we mask an IO APIC routing entry, we need to write the low
499 * word first, in order to set the mask bit before we change the
502 static void ioapic_mask_entry(int apic, int pin)
505 union entry_union eu = { .entry.mask = 1 };
507 spin_lock_irqsave(&ioapic_lock, flags);
508 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
509 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
510 spin_unlock_irqrestore(&ioapic_lock, flags);
514 static void send_cleanup_vector(struct irq_cfg *cfg)
516 cpumask_var_t cleanup_mask;
518 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
520 cfg->move_cleanup_count = 0;
521 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
522 cfg->move_cleanup_count++;
523 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
524 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
526 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
527 cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
528 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
529 free_cpumask_var(cleanup_mask);
531 cfg->move_in_progress = 0;
534 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
537 struct irq_pin_list *entry;
538 u8 vector = cfg->vector;
540 entry = cfg->irq_2_pin;
549 #ifdef CONFIG_INTR_REMAP
551 * With interrupt-remapping, destination information comes
552 * from interrupt-remapping table entry.
554 if (!irq_remapped(irq))
555 io_apic_write(apic, 0x11 + pin*2, dest);
557 io_apic_write(apic, 0x11 + pin*2, dest);
559 reg = io_apic_read(apic, 0x10 + pin*2);
560 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
562 io_apic_modify(apic, 0x10 + pin*2, reg);
570 assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
573 * Either sets desc->affinity to a valid value, and returns
574 * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
575 * leaves desc->affinity untouched.
578 set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
583 if (!cpumask_intersects(mask, cpu_online_mask))
587 cfg = desc->chip_data;
588 if (assign_irq_vector(irq, cfg, mask))
591 cpumask_and(desc->affinity, cfg->domain, mask);
592 set_extra_move_desc(desc, mask);
594 return apic->cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
598 set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
606 cfg = desc->chip_data;
608 spin_lock_irqsave(&ioapic_lock, flags);
609 dest = set_desc_affinity(desc, mask);
610 if (dest != BAD_APICID) {
611 /* Only the high 8 bits are valid. */
612 dest = SET_APIC_LOGICAL_ID(dest);
613 __target_IO_APIC_irq(irq, dest, cfg);
615 spin_unlock_irqrestore(&ioapic_lock, flags);
619 set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
621 struct irq_desc *desc;
623 desc = irq_to_desc(irq);
625 set_ioapic_affinity_irq_desc(desc, mask);
627 #endif /* CONFIG_SMP */
630 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
631 * shared ISA-space IRQs, so we have to support them. We are super
632 * fast in the common case, and fast for shared ISA-space IRQs.
634 static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin)
636 struct irq_pin_list *entry;
638 entry = cfg->irq_2_pin;
640 entry = get_one_free_irq_2_pin(cpu);
642 printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n",
646 cfg->irq_2_pin = entry;
652 while (entry->next) {
653 /* not again, please */
654 if (entry->apic == apic && entry->pin == pin)
660 entry->next = get_one_free_irq_2_pin(cpu);
667 * Reroute an IRQ to a different pin.
669 static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu,
670 int oldapic, int oldpin,
671 int newapic, int newpin)
673 struct irq_pin_list *entry = cfg->irq_2_pin;
677 if (entry->apic == oldapic && entry->pin == oldpin) {
678 entry->apic = newapic;
681 /* every one is different, right? */
687 /* why? call replace before add? */
689 add_pin_to_irq_cpu(cfg, cpu, newapic, newpin);
692 static inline void io_apic_modify_irq(struct irq_cfg *cfg,
693 int mask_and, int mask_or,
694 void (*final)(struct irq_pin_list *entry))
697 struct irq_pin_list *entry;
699 for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
702 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
705 io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
711 static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
713 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
717 static void io_apic_sync(struct irq_pin_list *entry)
720 * Synchronize the IO-APIC and the CPU by doing
721 * a dummy read from the IO-APIC
723 struct io_apic __iomem *io_apic;
724 io_apic = io_apic_base(entry->apic);
725 readl(&io_apic->data);
728 static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
730 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
732 #else /* CONFIG_X86_32 */
733 static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
735 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL);
738 static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg)
740 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER,
741 IO_APIC_REDIR_MASKED, NULL);
744 static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg)
746 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED,
747 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
749 #endif /* CONFIG_X86_32 */
751 static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
753 struct irq_cfg *cfg = desc->chip_data;
758 spin_lock_irqsave(&ioapic_lock, flags);
759 __mask_IO_APIC_irq(cfg);
760 spin_unlock_irqrestore(&ioapic_lock, flags);
763 static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
765 struct irq_cfg *cfg = desc->chip_data;
768 spin_lock_irqsave(&ioapic_lock, flags);
769 __unmask_IO_APIC_irq(cfg);
770 spin_unlock_irqrestore(&ioapic_lock, flags);
773 static void mask_IO_APIC_irq(unsigned int irq)
775 struct irq_desc *desc = irq_to_desc(irq);
777 mask_IO_APIC_irq_desc(desc);
779 static void unmask_IO_APIC_irq(unsigned int irq)
781 struct irq_desc *desc = irq_to_desc(irq);
783 unmask_IO_APIC_irq_desc(desc);
786 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
788 struct IO_APIC_route_entry entry;
790 /* Check delivery_mode to be sure we're not clearing an SMI pin */
791 entry = ioapic_read_entry(apic, pin);
792 if (entry.delivery_mode == dest_SMI)
795 * Disable it in the IO-APIC irq-routing table:
797 ioapic_mask_entry(apic, pin);
800 static void clear_IO_APIC (void)
804 for (apic = 0; apic < nr_ioapics; apic++)
805 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
806 clear_IO_APIC_pin(apic, pin);
811 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
812 * specific CPU-side IRQs.
816 static int pirq_entries [MAX_PIRQS];
817 static int pirqs_enabled;
819 static int __init ioapic_pirq_setup(char *str)
822 int ints[MAX_PIRQS+1];
824 get_options(str, ARRAY_SIZE(ints), ints);
826 for (i = 0; i < MAX_PIRQS; i++)
827 pirq_entries[i] = -1;
830 apic_printk(APIC_VERBOSE, KERN_INFO
831 "PIRQ redirection, working around broken MP-BIOS.\n");
833 if (ints[0] < MAX_PIRQS)
836 for (i = 0; i < max; i++) {
837 apic_printk(APIC_VERBOSE, KERN_DEBUG
838 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
840 * PIRQs are mapped upside down, usually.
842 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
847 __setup("pirq=", ioapic_pirq_setup);
848 #endif /* CONFIG_X86_32 */
850 #ifdef CONFIG_INTR_REMAP
851 /* I/O APIC RTE contents at the OS boot up */
852 static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
855 * Saves and masks all the unmasked IO-APIC RTE's
857 int save_mask_IO_APIC_setup(void)
859 union IO_APIC_reg_01 reg_01;
864 * The number of IO-APIC IRQ registers (== #pins):
866 for (apic = 0; apic < nr_ioapics; apic++) {
867 spin_lock_irqsave(&ioapic_lock, flags);
868 reg_01.raw = io_apic_read(apic, 1);
869 spin_unlock_irqrestore(&ioapic_lock, flags);
870 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
873 for (apic = 0; apic < nr_ioapics; apic++) {
874 early_ioapic_entries[apic] =
875 kzalloc(sizeof(struct IO_APIC_route_entry) *
876 nr_ioapic_registers[apic], GFP_KERNEL);
877 if (!early_ioapic_entries[apic])
881 for (apic = 0; apic < nr_ioapics; apic++)
882 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
883 struct IO_APIC_route_entry entry;
885 entry = early_ioapic_entries[apic][pin] =
886 ioapic_read_entry(apic, pin);
889 ioapic_write_entry(apic, pin, entry);
897 kfree(early_ioapic_entries[apic--]);
898 memset(early_ioapic_entries, 0,
899 ARRAY_SIZE(early_ioapic_entries));
904 void restore_IO_APIC_setup(void)
908 for (apic = 0; apic < nr_ioapics; apic++) {
909 if (!early_ioapic_entries[apic])
911 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
912 ioapic_write_entry(apic, pin,
913 early_ioapic_entries[apic][pin]);
914 kfree(early_ioapic_entries[apic]);
915 early_ioapic_entries[apic] = NULL;
919 void reinit_intr_remapped_IO_APIC(int intr_remapping)
922 * for now plain restore of previous settings.
923 * TBD: In the case of OS enabling interrupt-remapping,
924 * IO-APIC RTE's need to be setup to point to interrupt-remapping
925 * table entries. for now, do a plain restore, and wait for
926 * the setup_IO_APIC_irqs() to do proper initialization.
928 restore_IO_APIC_setup();
933 * Find the IRQ entry number of a certain pin.
935 static int find_irq_entry(int apic, int pin, int type)
939 for (i = 0; i < mp_irq_entries; i++)
940 if (mp_irqs[i].irqtype == type &&
941 (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
942 mp_irqs[i].dstapic == MP_APIC_ALL) &&
943 mp_irqs[i].dstirq == pin)
950 * Find the pin to which IRQ[irq] (ISA) is connected
952 static int __init find_isa_irq_pin(int irq, int type)
956 for (i = 0; i < mp_irq_entries; i++) {
957 int lbus = mp_irqs[i].srcbus;
959 if (test_bit(lbus, mp_bus_not_pci) &&
960 (mp_irqs[i].irqtype == type) &&
961 (mp_irqs[i].srcbusirq == irq))
963 return mp_irqs[i].dstirq;
968 static int __init find_isa_irq_apic(int irq, int type)
972 for (i = 0; i < mp_irq_entries; i++) {
973 int lbus = mp_irqs[i].srcbus;
975 if (test_bit(lbus, mp_bus_not_pci) &&
976 (mp_irqs[i].irqtype == type) &&
977 (mp_irqs[i].srcbusirq == irq))
980 if (i < mp_irq_entries) {
982 for(apic = 0; apic < nr_ioapics; apic++) {
983 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
992 * Find a specific PCI IRQ entry.
993 * Not an __init, possibly needed by modules
995 static int pin_2_irq(int idx, int apic, int pin);
997 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
999 int apic, i, best_guess = -1;
1001 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
1003 if (test_bit(bus, mp_bus_not_pci)) {
1004 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
1007 for (i = 0; i < mp_irq_entries; i++) {
1008 int lbus = mp_irqs[i].srcbus;
1010 for (apic = 0; apic < nr_ioapics; apic++)
1011 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
1012 mp_irqs[i].dstapic == MP_APIC_ALL)
1015 if (!test_bit(lbus, mp_bus_not_pci) &&
1016 !mp_irqs[i].irqtype &&
1018 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
1019 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
1021 if (!(apic || IO_APIC_IRQ(irq)))
1024 if (pin == (mp_irqs[i].srcbusirq & 3))
1027 * Use the first all-but-pin matching entry as a
1028 * best-guess fuzzy result for broken mptables.
1037 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1039 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1041 * EISA Edge/Level control register, ELCR
1043 static int EISA_ELCR(unsigned int irq)
1045 if (irq < NR_IRQS_LEGACY) {
1046 unsigned int port = 0x4d0 + (irq >> 3);
1047 return (inb(port) >> (irq & 7)) & 1;
1049 apic_printk(APIC_VERBOSE, KERN_INFO
1050 "Broken MPtable reports ISA irq %d\n", irq);
1056 /* ISA interrupts are always polarity zero edge triggered,
1057 * when listed as conforming in the MP table. */
1059 #define default_ISA_trigger(idx) (0)
1060 #define default_ISA_polarity(idx) (0)
1062 /* EISA interrupts are always polarity zero and can be edge or level
1063 * trigger depending on the ELCR value. If an interrupt is listed as
1064 * EISA conforming in the MP table, that means its trigger type must
1065 * be read in from the ELCR */
1067 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
1068 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
1070 /* PCI interrupts are always polarity one level triggered,
1071 * when listed as conforming in the MP table. */
1073 #define default_PCI_trigger(idx) (1)
1074 #define default_PCI_polarity(idx) (1)
1076 /* MCA interrupts are always polarity zero level triggered,
1077 * when listed as conforming in the MP table. */
1079 #define default_MCA_trigger(idx) (1)
1080 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
1082 static int MPBIOS_polarity(int idx)
1084 int bus = mp_irqs[idx].srcbus;
1088 * Determine IRQ line polarity (high active or low active):
1090 switch (mp_irqs[idx].irqflag & 3)
1092 case 0: /* conforms, ie. bus-type dependent polarity */
1093 if (test_bit(bus, mp_bus_not_pci))
1094 polarity = default_ISA_polarity(idx);
1096 polarity = default_PCI_polarity(idx);
1098 case 1: /* high active */
1103 case 2: /* reserved */
1105 printk(KERN_WARNING "broken BIOS!!\n");
1109 case 3: /* low active */
1114 default: /* invalid */
1116 printk(KERN_WARNING "broken BIOS!!\n");
1124 static int MPBIOS_trigger(int idx)
1126 int bus = mp_irqs[idx].srcbus;
1130 * Determine IRQ trigger mode (edge or level sensitive):
1132 switch ((mp_irqs[idx].irqflag>>2) & 3)
1134 case 0: /* conforms, ie. bus-type dependent */
1135 if (test_bit(bus, mp_bus_not_pci))
1136 trigger = default_ISA_trigger(idx);
1138 trigger = default_PCI_trigger(idx);
1139 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1140 switch (mp_bus_id_to_type[bus]) {
1141 case MP_BUS_ISA: /* ISA pin */
1143 /* set before the switch */
1146 case MP_BUS_EISA: /* EISA pin */
1148 trigger = default_EISA_trigger(idx);
1151 case MP_BUS_PCI: /* PCI pin */
1153 /* set before the switch */
1156 case MP_BUS_MCA: /* MCA pin */
1158 trigger = default_MCA_trigger(idx);
1163 printk(KERN_WARNING "broken BIOS!!\n");
1175 case 2: /* reserved */
1177 printk(KERN_WARNING "broken BIOS!!\n");
1186 default: /* invalid */
1188 printk(KERN_WARNING "broken BIOS!!\n");
1196 static inline int irq_polarity(int idx)
1198 return MPBIOS_polarity(idx);
1201 static inline int irq_trigger(int idx)
1203 return MPBIOS_trigger(idx);
1206 int (*ioapic_renumber_irq)(int ioapic, int irq);
1207 static int pin_2_irq(int idx, int apic, int pin)
1210 int bus = mp_irqs[idx].srcbus;
1213 * Debugging check, we are in big trouble if this message pops up!
1215 if (mp_irqs[idx].dstirq != pin)
1216 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1218 if (test_bit(bus, mp_bus_not_pci)) {
1219 irq = mp_irqs[idx].srcbusirq;
1222 * PCI IRQs are mapped in order
1226 irq += nr_ioapic_registers[i++];
1229 * For MPS mode, so far only needed by ES7000 platform
1231 if (ioapic_renumber_irq)
1232 irq = ioapic_renumber_irq(apic, irq);
1235 #ifdef CONFIG_X86_32
1237 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1239 if ((pin >= 16) && (pin <= 23)) {
1240 if (pirq_entries[pin-16] != -1) {
1241 if (!pirq_entries[pin-16]) {
1242 apic_printk(APIC_VERBOSE, KERN_DEBUG
1243 "disabling PIRQ%d\n", pin-16);
1245 irq = pirq_entries[pin-16];
1246 apic_printk(APIC_VERBOSE, KERN_DEBUG
1247 "using PIRQ%d -> IRQ %d\n",
1257 void lock_vector_lock(void)
1259 /* Used to the online set of cpus does not change
1260 * during assign_irq_vector.
1262 spin_lock(&vector_lock);
1265 void unlock_vector_lock(void)
1267 spin_unlock(&vector_lock);
1271 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1274 * NOTE! The local APIC isn't very good at handling
1275 * multiple interrupts at the same interrupt level.
1276 * As the interrupt level is determined by taking the
1277 * vector number and shifting that right by 4, we
1278 * want to spread these out a bit so that they don't
1279 * all fall in the same interrupt level.
1281 * Also, we've got to be careful not to trash gate
1282 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1284 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1285 unsigned int old_vector;
1287 cpumask_var_t tmp_mask;
1289 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1292 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1295 old_vector = cfg->vector;
1297 cpumask_and(tmp_mask, mask, cpu_online_mask);
1298 cpumask_and(tmp_mask, cfg->domain, tmp_mask);
1299 if (!cpumask_empty(tmp_mask)) {
1300 free_cpumask_var(tmp_mask);
1305 /* Only try and allocate irqs on cpus that are present */
1307 for_each_cpu_and(cpu, mask, cpu_online_mask) {
1311 apic->vector_allocation_domain(cpu, tmp_mask);
1313 vector = current_vector;
1314 offset = current_offset;
1317 if (vector >= first_system_vector) {
1318 /* If out of vectors on large boxen, must share them. */
1319 offset = (offset + 1) % 8;
1320 vector = FIRST_DEVICE_VECTOR + offset;
1322 if (unlikely(current_vector == vector))
1325 if (test_bit(vector, used_vectors))
1328 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1329 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1332 current_vector = vector;
1333 current_offset = offset;
1335 cfg->move_in_progress = 1;
1336 cpumask_copy(cfg->old_domain, cfg->domain);
1338 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1339 per_cpu(vector_irq, new_cpu)[vector] = irq;
1340 cfg->vector = vector;
1341 cpumask_copy(cfg->domain, tmp_mask);
1345 free_cpumask_var(tmp_mask);
1350 assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1353 unsigned long flags;
1355 spin_lock_irqsave(&vector_lock, flags);
1356 err = __assign_irq_vector(irq, cfg, mask);
1357 spin_unlock_irqrestore(&vector_lock, flags);
1361 static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1365 BUG_ON(!cfg->vector);
1367 vector = cfg->vector;
1368 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1369 per_cpu(vector_irq, cpu)[vector] = -1;
1372 cpumask_clear(cfg->domain);
1374 if (likely(!cfg->move_in_progress))
1376 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1377 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1379 if (per_cpu(vector_irq, cpu)[vector] != irq)
1381 per_cpu(vector_irq, cpu)[vector] = -1;
1385 cfg->move_in_progress = 0;
1388 void __setup_vector_irq(int cpu)
1390 /* Initialize vector_irq on a new cpu */
1391 /* This function must be called with vector_lock held */
1393 struct irq_cfg *cfg;
1394 struct irq_desc *desc;
1396 /* Mark the inuse vectors */
1397 for_each_irq_desc(irq, desc) {
1398 cfg = desc->chip_data;
1399 if (!cpumask_test_cpu(cpu, cfg->domain))
1401 vector = cfg->vector;
1402 per_cpu(vector_irq, cpu)[vector] = irq;
1404 /* Mark the free vectors */
1405 for (vector = 0; vector < NR_VECTORS; ++vector) {
1406 irq = per_cpu(vector_irq, cpu)[vector];
1411 if (!cpumask_test_cpu(cpu, cfg->domain))
1412 per_cpu(vector_irq, cpu)[vector] = -1;
1416 static struct irq_chip ioapic_chip;
1417 #ifdef CONFIG_INTR_REMAP
1418 static struct irq_chip ir_ioapic_chip;
1421 #define IOAPIC_AUTO -1
1422 #define IOAPIC_EDGE 0
1423 #define IOAPIC_LEVEL 1
1425 #ifdef CONFIG_X86_32
1426 static inline int IO_APIC_irq_trigger(int irq)
1430 for (apic = 0; apic < nr_ioapics; apic++) {
1431 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1432 idx = find_irq_entry(apic, pin, mp_INT);
1433 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1434 return irq_trigger(idx);
1438 * nonexistent IRQs are edge default
1443 static inline int IO_APIC_irq_trigger(int irq)
1449 static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger)
1452 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1453 trigger == IOAPIC_LEVEL)
1454 desc->status |= IRQ_LEVEL;
1456 desc->status &= ~IRQ_LEVEL;
1458 #ifdef CONFIG_INTR_REMAP
1459 if (irq_remapped(irq)) {
1460 desc->status |= IRQ_MOVE_PCNTXT;
1462 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1466 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1467 handle_edge_irq, "edge");
1471 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1472 trigger == IOAPIC_LEVEL)
1473 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1477 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1478 handle_edge_irq, "edge");
1481 int setup_ioapic_entry(int apic_id, int irq,
1482 struct IO_APIC_route_entry *entry,
1483 unsigned int destination, int trigger,
1484 int polarity, int vector)
1487 * add it to the IO-APIC irq-routing table:
1489 memset(entry,0,sizeof(*entry));
1491 #ifdef CONFIG_INTR_REMAP
1492 if (intr_remapping_enabled) {
1493 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
1495 struct IR_IO_APIC_route_entry *ir_entry =
1496 (struct IR_IO_APIC_route_entry *) entry;
1500 panic("No mapping iommu for ioapic %d\n", apic_id);
1502 index = alloc_irte(iommu, irq, 1);
1504 panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
1506 memset(&irte, 0, sizeof(irte));
1509 irte.dst_mode = apic->irq_dest_mode;
1510 irte.trigger_mode = trigger;
1511 irte.dlvry_mode = apic->irq_delivery_mode;
1512 irte.vector = vector;
1513 irte.dest_id = IRTE_DEST(destination);
1515 modify_irte(irq, &irte);
1517 ir_entry->index2 = (index >> 15) & 0x1;
1519 ir_entry->format = 1;
1520 ir_entry->index = (index & 0x7fff);
1524 entry->delivery_mode = apic->irq_delivery_mode;
1525 entry->dest_mode = apic->irq_dest_mode;
1526 entry->dest = destination;
1529 entry->mask = 0; /* enable IRQ */
1530 entry->trigger = trigger;
1531 entry->polarity = polarity;
1532 entry->vector = vector;
1534 /* Mask level triggered irqs.
1535 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1542 static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc,
1543 int trigger, int polarity)
1545 struct irq_cfg *cfg;
1546 struct IO_APIC_route_entry entry;
1549 if (!IO_APIC_IRQ(irq))
1552 cfg = desc->chip_data;
1554 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1557 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
1559 apic_printk(APIC_VERBOSE,KERN_DEBUG
1560 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1561 "IRQ %d Mode:%i Active:%i)\n",
1562 apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
1563 irq, trigger, polarity);
1566 if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
1567 dest, trigger, polarity, cfg->vector)) {
1568 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1569 mp_ioapics[apic_id].apicid, pin);
1570 __clear_irq_vector(irq, cfg);
1574 ioapic_register_intr(irq, desc, trigger);
1575 if (irq < NR_IRQS_LEGACY)
1576 disable_8259A_irq(irq);
1578 ioapic_write_entry(apic_id, pin, entry);
1581 static void __init setup_IO_APIC_irqs(void)
1583 int apic_id, pin, idx, irq;
1585 struct irq_desc *desc;
1586 struct irq_cfg *cfg;
1587 int cpu = boot_cpu_id;
1589 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1591 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
1592 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
1594 idx = find_irq_entry(apic_id, pin, mp_INT);
1598 apic_printk(APIC_VERBOSE,
1599 KERN_DEBUG " %d-%d",
1600 mp_ioapics[apic_id].apicid, pin);
1602 apic_printk(APIC_VERBOSE, " %d-%d",
1603 mp_ioapics[apic_id].apicid, pin);
1607 apic_printk(APIC_VERBOSE,
1608 " (apicid-pin) not connected\n");
1612 irq = pin_2_irq(idx, apic_id, pin);
1615 * Skip the timer IRQ if there's a quirk handler
1616 * installed and if it returns 1:
1618 if (apic->multi_timer_check &&
1619 apic->multi_timer_check(apic_id, irq))
1622 desc = irq_to_desc_alloc_cpu(irq, cpu);
1624 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1627 cfg = desc->chip_data;
1628 add_pin_to_irq_cpu(cfg, cpu, apic_id, pin);
1630 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1631 irq_trigger(idx), irq_polarity(idx));
1636 apic_printk(APIC_VERBOSE,
1637 " (apicid-pin) not connected\n");
1641 * Set up the timer pin, possibly with the 8259A-master behind.
1643 static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
1646 struct IO_APIC_route_entry entry;
1648 #ifdef CONFIG_INTR_REMAP
1649 if (intr_remapping_enabled)
1653 memset(&entry, 0, sizeof(entry));
1656 * We use logical delivery to get the timer IRQ
1659 entry.dest_mode = apic->irq_dest_mode;
1660 entry.mask = 0; /* don't mask IRQ for edge */
1661 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
1662 entry.delivery_mode = apic->irq_delivery_mode;
1665 entry.vector = vector;
1668 * The timer IRQ doesn't have to know that behind the
1669 * scene we may have a 8259A-master in AEOI mode ...
1671 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1674 * Add it to the IO-APIC irq-routing table:
1676 ioapic_write_entry(apic_id, pin, entry);
1680 __apicdebuginit(void) print_IO_APIC(void)
1683 union IO_APIC_reg_00 reg_00;
1684 union IO_APIC_reg_01 reg_01;
1685 union IO_APIC_reg_02 reg_02;
1686 union IO_APIC_reg_03 reg_03;
1687 unsigned long flags;
1688 struct irq_cfg *cfg;
1689 struct irq_desc *desc;
1692 if (apic_verbosity == APIC_QUIET)
1695 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1696 for (i = 0; i < nr_ioapics; i++)
1697 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1698 mp_ioapics[i].apicid, nr_ioapic_registers[i]);
1701 * We are a bit conservative about what we expect. We have to
1702 * know about every hardware change ASAP.
1704 printk(KERN_INFO "testing the IO APIC.......................\n");
1706 for (apic = 0; apic < nr_ioapics; apic++) {
1708 spin_lock_irqsave(&ioapic_lock, flags);
1709 reg_00.raw = io_apic_read(apic, 0);
1710 reg_01.raw = io_apic_read(apic, 1);
1711 if (reg_01.bits.version >= 0x10)
1712 reg_02.raw = io_apic_read(apic, 2);
1713 if (reg_01.bits.version >= 0x20)
1714 reg_03.raw = io_apic_read(apic, 3);
1715 spin_unlock_irqrestore(&ioapic_lock, flags);
1718 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
1719 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1720 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1721 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1722 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1724 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1725 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1727 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1728 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1731 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1732 * but the value of reg_02 is read as the previous read register
1733 * value, so ignore it if reg_02 == reg_01.
1735 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1736 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1737 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1741 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1742 * or reg_03, but the value of reg_0[23] is read as the previous read
1743 * register value, so ignore it if reg_03 == reg_0[12].
1745 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1746 reg_03.raw != reg_01.raw) {
1747 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1748 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1751 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1753 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1754 " Stat Dmod Deli Vect: \n");
1756 for (i = 0; i <= reg_01.bits.entries; i++) {
1757 struct IO_APIC_route_entry entry;
1759 entry = ioapic_read_entry(apic, i);
1761 printk(KERN_DEBUG " %02x %03X ",
1766 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1771 entry.delivery_status,
1773 entry.delivery_mode,
1778 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1779 for_each_irq_desc(irq, desc) {
1780 struct irq_pin_list *entry;
1782 cfg = desc->chip_data;
1783 entry = cfg->irq_2_pin;
1786 printk(KERN_DEBUG "IRQ%d ", irq);
1788 printk("-> %d:%d", entry->apic, entry->pin);
1791 entry = entry->next;
1796 printk(KERN_INFO ".................................... done.\n");
1801 __apicdebuginit(void) print_APIC_bitfield(int base)
1806 if (apic_verbosity == APIC_QUIET)
1809 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1810 for (i = 0; i < 8; i++) {
1811 v = apic_read(base + i*0x10);
1812 for (j = 0; j < 32; j++) {
1822 __apicdebuginit(void) print_local_APIC(void *dummy)
1824 unsigned int v, ver, maxlvt;
1827 if (apic_verbosity == APIC_QUIET)
1830 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1831 smp_processor_id(), hard_smp_processor_id());
1832 v = apic_read(APIC_ID);
1833 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1834 v = apic_read(APIC_LVR);
1835 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1836 ver = GET_APIC_VERSION(v);
1837 maxlvt = lapic_get_maxlvt();
1839 v = apic_read(APIC_TASKPRI);
1840 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1842 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1843 if (!APIC_XAPIC(ver)) {
1844 v = apic_read(APIC_ARBPRI);
1845 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1846 v & APIC_ARBPRI_MASK);
1848 v = apic_read(APIC_PROCPRI);
1849 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1853 * Remote read supported only in the 82489DX and local APIC for
1854 * Pentium processors.
1856 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1857 v = apic_read(APIC_RRR);
1858 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1861 v = apic_read(APIC_LDR);
1862 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1863 if (!x2apic_enabled()) {
1864 v = apic_read(APIC_DFR);
1865 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1867 v = apic_read(APIC_SPIV);
1868 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1870 printk(KERN_DEBUG "... APIC ISR field:\n");
1871 print_APIC_bitfield(APIC_ISR);
1872 printk(KERN_DEBUG "... APIC TMR field:\n");
1873 print_APIC_bitfield(APIC_TMR);
1874 printk(KERN_DEBUG "... APIC IRR field:\n");
1875 print_APIC_bitfield(APIC_IRR);
1877 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1878 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1879 apic_write(APIC_ESR, 0);
1881 v = apic_read(APIC_ESR);
1882 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1885 icr = apic_icr_read();
1886 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1887 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1889 v = apic_read(APIC_LVTT);
1890 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1892 if (maxlvt > 3) { /* PC is LVT#4. */
1893 v = apic_read(APIC_LVTPC);
1894 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1896 v = apic_read(APIC_LVT0);
1897 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1898 v = apic_read(APIC_LVT1);
1899 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1901 if (maxlvt > 2) { /* ERR is LVT#3. */
1902 v = apic_read(APIC_LVTERR);
1903 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1906 v = apic_read(APIC_TMICT);
1907 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1908 v = apic_read(APIC_TMCCT);
1909 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1910 v = apic_read(APIC_TDCR);
1911 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1915 __apicdebuginit(void) print_all_local_APICs(void)
1920 for_each_online_cpu(cpu)
1921 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1925 __apicdebuginit(void) print_PIC(void)
1928 unsigned long flags;
1930 if (apic_verbosity == APIC_QUIET)
1933 printk(KERN_DEBUG "\nprinting PIC contents\n");
1935 spin_lock_irqsave(&i8259A_lock, flags);
1937 v = inb(0xa1) << 8 | inb(0x21);
1938 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1940 v = inb(0xa0) << 8 | inb(0x20);
1941 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1945 v = inb(0xa0) << 8 | inb(0x20);
1949 spin_unlock_irqrestore(&i8259A_lock, flags);
1951 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1953 v = inb(0x4d1) << 8 | inb(0x4d0);
1954 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1957 __apicdebuginit(int) print_all_ICs(void)
1960 print_all_local_APICs();
1966 fs_initcall(print_all_ICs);
1969 /* Where if anywhere is the i8259 connect in external int mode */
1970 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1972 void __init enable_IO_APIC(void)
1974 union IO_APIC_reg_01 reg_01;
1975 int i8259_apic, i8259_pin;
1977 unsigned long flags;
1979 #ifdef CONFIG_X86_32
1982 for (i = 0; i < MAX_PIRQS; i++)
1983 pirq_entries[i] = -1;
1987 * The number of IO-APIC IRQ registers (== #pins):
1989 for (apic = 0; apic < nr_ioapics; apic++) {
1990 spin_lock_irqsave(&ioapic_lock, flags);
1991 reg_01.raw = io_apic_read(apic, 1);
1992 spin_unlock_irqrestore(&ioapic_lock, flags);
1993 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1995 for(apic = 0; apic < nr_ioapics; apic++) {
1997 /* See if any of the pins is in ExtINT mode */
1998 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1999 struct IO_APIC_route_entry entry;
2000 entry = ioapic_read_entry(apic, pin);
2002 /* If the interrupt line is enabled and in ExtInt mode
2003 * I have found the pin where the i8259 is connected.
2005 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
2006 ioapic_i8259.apic = apic;
2007 ioapic_i8259.pin = pin;
2013 /* Look to see what if the MP table has reported the ExtINT */
2014 /* If we could not find the appropriate pin by looking at the ioapic
2015 * the i8259 probably is not connected the ioapic but give the
2016 * mptable a chance anyway.
2018 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
2019 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
2020 /* Trust the MP table if nothing is setup in the hardware */
2021 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
2022 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
2023 ioapic_i8259.pin = i8259_pin;
2024 ioapic_i8259.apic = i8259_apic;
2026 /* Complain if the MP table and the hardware disagree */
2027 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
2028 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
2030 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
2034 * Do not trust the IO-APIC being empty at bootup
2040 * Not an __init, needed by the reboot code
2042 void disable_IO_APIC(void)
2045 * Clear the IO-APIC before rebooting:
2050 * If the i8259 is routed through an IOAPIC
2051 * Put that IOAPIC in virtual wire mode
2052 * so legacy interrupts can be delivered.
2054 if (ioapic_i8259.pin != -1) {
2055 struct IO_APIC_route_entry entry;
2057 memset(&entry, 0, sizeof(entry));
2058 entry.mask = 0; /* Enabled */
2059 entry.trigger = 0; /* Edge */
2061 entry.polarity = 0; /* High */
2062 entry.delivery_status = 0;
2063 entry.dest_mode = 0; /* Physical */
2064 entry.delivery_mode = dest_ExtINT; /* ExtInt */
2066 entry.dest = read_apic_id();
2069 * Add it to the IO-APIC irq-routing table:
2071 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
2074 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
2077 #ifdef CONFIG_X86_32
2079 * function to set the IO-APIC physical IDs based on the
2080 * values stored in the MPC table.
2082 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
2085 static void __init setup_ioapic_ids_from_mpc(void)
2087 union IO_APIC_reg_00 reg_00;
2088 physid_mask_t phys_id_present_map;
2091 unsigned char old_id;
2092 unsigned long flags;
2094 if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
2098 * Don't check I/O APIC IDs for xAPIC systems. They have
2099 * no meaning without the serial APIC bus.
2101 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2102 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
2105 * This is broken; anything with a real cpu count has to
2106 * circumvent this idiocy regardless.
2108 phys_id_present_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
2111 * Set the IOAPIC ID to the value stored in the MPC table.
2113 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
2115 /* Read the register 0 value */
2116 spin_lock_irqsave(&ioapic_lock, flags);
2117 reg_00.raw = io_apic_read(apic_id, 0);
2118 spin_unlock_irqrestore(&ioapic_lock, flags);
2120 old_id = mp_ioapics[apic_id].apicid;
2122 if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
2123 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2124 apic_id, mp_ioapics[apic_id].apicid);
2125 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2127 mp_ioapics[apic_id].apicid = reg_00.bits.ID;
2131 * Sanity check, is the ID really free? Every APIC in a
2132 * system must have a unique ID or we get lots of nice
2133 * 'stuck on smp_invalidate_needed IPI wait' messages.
2135 if (apic->check_apicid_used(phys_id_present_map,
2136 mp_ioapics[apic_id].apicid)) {
2137 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2138 apic_id, mp_ioapics[apic_id].apicid);
2139 for (i = 0; i < get_physical_broadcast(); i++)
2140 if (!physid_isset(i, phys_id_present_map))
2142 if (i >= get_physical_broadcast())
2143 panic("Max APIC ID exceeded!\n");
2144 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2146 physid_set(i, phys_id_present_map);
2147 mp_ioapics[apic_id].apicid = i;
2150 tmp = apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid);
2151 apic_printk(APIC_VERBOSE, "Setting %d in the "
2152 "phys_id_present_map\n",
2153 mp_ioapics[apic_id].apicid);
2154 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2159 * We need to adjust the IRQ routing table
2160 * if the ID changed.
2162 if (old_id != mp_ioapics[apic_id].apicid)
2163 for (i = 0; i < mp_irq_entries; i++)
2164 if (mp_irqs[i].dstapic == old_id)
2166 = mp_ioapics[apic_id].apicid;
2169 * Read the right value from the MPC table and
2170 * write it into the ID register.
2172 apic_printk(APIC_VERBOSE, KERN_INFO
2173 "...changing IO-APIC physical APIC ID to %d ...",
2174 mp_ioapics[apic_id].apicid);
2176 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2177 spin_lock_irqsave(&ioapic_lock, flags);
2178 io_apic_write(apic_id, 0, reg_00.raw);
2179 spin_unlock_irqrestore(&ioapic_lock, flags);
2184 spin_lock_irqsave(&ioapic_lock, flags);
2185 reg_00.raw = io_apic_read(apic_id, 0);
2186 spin_unlock_irqrestore(&ioapic_lock, flags);
2187 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2188 printk("could not set ID!\n");
2190 apic_printk(APIC_VERBOSE, " ok.\n");
2195 int no_timer_check __initdata;
2197 static int __init notimercheck(char *s)
2202 __setup("no_timer_check", notimercheck);
2205 * There is a nasty bug in some older SMP boards, their mptable lies
2206 * about the timer IRQ. We do the following to work around the situation:
2208 * - timer IRQ defaults to IO-APIC IRQ
2209 * - if this function detects that timer IRQs are defunct, then we fall
2210 * back to ISA timer IRQs
2212 static int __init timer_irq_works(void)
2214 unsigned long t1 = jiffies;
2215 unsigned long flags;
2220 local_save_flags(flags);
2222 /* Let ten ticks pass... */
2223 mdelay((10 * 1000) / HZ);
2224 local_irq_restore(flags);
2227 * Expect a few ticks at least, to be sure some possible
2228 * glue logic does not lock up after one or two first
2229 * ticks in a non-ExtINT mode. Also the local APIC
2230 * might have cached one ExtINT interrupt. Finally, at
2231 * least one tick may be lost due to delays.
2235 if (time_after(jiffies, t1 + 4))
2241 * In the SMP+IOAPIC case it might happen that there are an unspecified
2242 * number of pending IRQ events unhandled. These cases are very rare,
2243 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2244 * better to do it this way as thus we do not have to be aware of
2245 * 'pending' interrupts in the IRQ path, except at this point.
2248 * Edge triggered needs to resend any interrupt
2249 * that was delayed but this is now handled in the device
2254 * Starting up a edge-triggered IO-APIC interrupt is
2255 * nasty - we need to make sure that we get the edge.
2256 * If it is already asserted for some reason, we need
2257 * return 1 to indicate that is was pending.
2259 * This is not complete - we should be able to fake
2260 * an edge even if it isn't on the 8259A...
2263 static unsigned int startup_ioapic_irq(unsigned int irq)
2265 int was_pending = 0;
2266 unsigned long flags;
2267 struct irq_cfg *cfg;
2269 spin_lock_irqsave(&ioapic_lock, flags);
2270 if (irq < NR_IRQS_LEGACY) {
2271 disable_8259A_irq(irq);
2272 if (i8259A_irq_pending(irq))
2276 __unmask_IO_APIC_irq(cfg);
2277 spin_unlock_irqrestore(&ioapic_lock, flags);
2282 #ifdef CONFIG_X86_64
2283 static int ioapic_retrigger_irq(unsigned int irq)
2286 struct irq_cfg *cfg = irq_cfg(irq);
2287 unsigned long flags;
2289 spin_lock_irqsave(&vector_lock, flags);
2290 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2291 spin_unlock_irqrestore(&vector_lock, flags);
2296 static int ioapic_retrigger_irq(unsigned int irq)
2298 apic->send_IPI_self(irq_cfg(irq)->vector);
2305 * Level and edge triggered IO-APIC interrupts need different handling,
2306 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2307 * handled with the level-triggered descriptor, but that one has slightly
2308 * more overhead. Level-triggered interrupts cannot be handled with the
2309 * edge-triggered handler, without risking IRQ storms and other ugly
2315 #ifdef CONFIG_INTR_REMAP
2316 static void ir_irq_migration(struct work_struct *work);
2318 static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2321 * Migrate the IO-APIC irq in the presence of intr-remapping.
2323 * For edge triggered, irq migration is a simple atomic update(of vector
2324 * and cpu destination) of IRTE and flush the hardware cache.
2326 * For level triggered, we need to modify the io-apic RTE aswell with the update
2327 * vector information, along with modifying IRTE with vector and destination.
2328 * So irq migration for level triggered is little bit more complex compared to
2329 * edge triggered migration. But the good news is, we use the same algorithm
2330 * for level triggered migration as we have today, only difference being,
2331 * we now initiate the irq migration from process context instead of the
2332 * interrupt context.
2334 * In future, when we do a directed EOI (combined with cpu EOI broadcast
2335 * suppression) to the IO-APIC, level triggered irq migration will also be
2336 * as simple as edge triggered migration and we can do the irq migration
2337 * with a simple atomic update to IO-APIC RTE.
2340 migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2342 struct irq_cfg *cfg;
2344 int modify_ioapic_rte;
2346 unsigned long flags;
2349 if (!cpumask_intersects(mask, cpu_online_mask))
2353 if (get_irte(irq, &irte))
2356 cfg = desc->chip_data;
2357 if (assign_irq_vector(irq, cfg, mask))
2360 set_extra_move_desc(desc, mask);
2362 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
2364 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2365 if (modify_ioapic_rte) {
2366 spin_lock_irqsave(&ioapic_lock, flags);
2367 __target_IO_APIC_irq(irq, dest, cfg);
2368 spin_unlock_irqrestore(&ioapic_lock, flags);
2371 irte.vector = cfg->vector;
2372 irte.dest_id = IRTE_DEST(dest);
2375 * Modified the IRTE and flushes the Interrupt entry cache.
2377 modify_irte(irq, &irte);
2379 if (cfg->move_in_progress)
2380 send_cleanup_vector(cfg);
2382 cpumask_copy(desc->affinity, mask);
2385 static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2388 struct irq_cfg *cfg = desc->chip_data;
2390 mask_IO_APIC_irq_desc(desc);
2392 if (io_apic_level_ack_pending(cfg)) {
2394 * Interrupt in progress. Migrating irq now will change the
2395 * vector information in the IO-APIC RTE and that will confuse
2396 * the EOI broadcast performed by cpu.
2397 * So, delay the irq migration to the next instance.
2399 schedule_delayed_work(&ir_migration_work, 1);
2403 /* everthing is clear. we have right of way */
2404 migrate_ioapic_irq_desc(desc, desc->pending_mask);
2407 desc->status &= ~IRQ_MOVE_PENDING;
2408 cpumask_clear(desc->pending_mask);
2411 unmask_IO_APIC_irq_desc(desc);
2416 static void ir_irq_migration(struct work_struct *work)
2419 struct irq_desc *desc;
2421 for_each_irq_desc(irq, desc) {
2422 if (desc->status & IRQ_MOVE_PENDING) {
2423 unsigned long flags;
2425 spin_lock_irqsave(&desc->lock, flags);
2426 if (!desc->chip->set_affinity ||
2427 !(desc->status & IRQ_MOVE_PENDING)) {
2428 desc->status &= ~IRQ_MOVE_PENDING;
2429 spin_unlock_irqrestore(&desc->lock, flags);
2433 desc->chip->set_affinity(irq, desc->pending_mask);
2434 spin_unlock_irqrestore(&desc->lock, flags);
2440 * Migrates the IRQ destination in the process context.
2442 static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2443 const struct cpumask *mask)
2445 if (desc->status & IRQ_LEVEL) {
2446 desc->status |= IRQ_MOVE_PENDING;
2447 cpumask_copy(desc->pending_mask, mask);
2448 migrate_irq_remapped_level_desc(desc);
2452 migrate_ioapic_irq_desc(desc, mask);
2454 static void set_ir_ioapic_affinity_irq(unsigned int irq,
2455 const struct cpumask *mask)
2457 struct irq_desc *desc = irq_to_desc(irq);
2459 set_ir_ioapic_affinity_irq_desc(desc, mask);
2463 asmlinkage void smp_irq_move_cleanup_interrupt(void)
2465 unsigned vector, me;
2471 me = smp_processor_id();
2472 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2474 struct irq_desc *desc;
2475 struct irq_cfg *cfg;
2476 irq = __get_cpu_var(vector_irq)[vector];
2481 desc = irq_to_desc(irq);
2486 spin_lock(&desc->lock);
2487 if (!cfg->move_cleanup_count)
2490 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2493 __get_cpu_var(vector_irq)[vector] = -1;
2494 cfg->move_cleanup_count--;
2496 spin_unlock(&desc->lock);
2502 static void irq_complete_move(struct irq_desc **descp)
2504 struct irq_desc *desc = *descp;
2505 struct irq_cfg *cfg = desc->chip_data;
2506 unsigned vector, me;
2508 if (likely(!cfg->move_in_progress)) {
2509 #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
2510 if (likely(!cfg->move_desc_pending))
2513 /* domain has not changed, but affinity did */
2514 me = smp_processor_id();
2515 if (cpumask_test_cpu(me, desc->affinity)) {
2516 *descp = desc = move_irq_desc(desc, me);
2517 /* get the new one */
2518 cfg = desc->chip_data;
2519 cfg->move_desc_pending = 0;
2525 vector = ~get_irq_regs()->orig_ax;
2526 me = smp_processor_id();
2528 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) {
2529 #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
2530 *descp = desc = move_irq_desc(desc, me);
2531 /* get the new one */
2532 cfg = desc->chip_data;
2534 send_cleanup_vector(cfg);
2538 static inline void irq_complete_move(struct irq_desc **descp) {}
2541 #ifdef CONFIG_INTR_REMAP
2542 static void ack_x2apic_level(unsigned int irq)
2547 static void ack_x2apic_edge(unsigned int irq)
2554 static void ack_apic_edge(unsigned int irq)
2556 struct irq_desc *desc = irq_to_desc(irq);
2558 irq_complete_move(&desc);
2559 move_native_irq(irq);
2563 atomic_t irq_mis_count;
2565 static void ack_apic_level(unsigned int irq)
2567 struct irq_desc *desc = irq_to_desc(irq);
2569 #ifdef CONFIG_X86_32
2573 struct irq_cfg *cfg;
2574 int do_unmask_irq = 0;
2576 irq_complete_move(&desc);
2577 #ifdef CONFIG_GENERIC_PENDING_IRQ
2578 /* If we are moving the irq we need to mask it */
2579 if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2581 mask_IO_APIC_irq_desc(desc);
2585 #ifdef CONFIG_X86_32
2587 * It appears there is an erratum which affects at least version 0x11
2588 * of I/O APIC (that's the 82093AA and cores integrated into various
2589 * chipsets). Under certain conditions a level-triggered interrupt is
2590 * erroneously delivered as edge-triggered one but the respective IRR
2591 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2592 * message but it will never arrive and further interrupts are blocked
2593 * from the source. The exact reason is so far unknown, but the
2594 * phenomenon was observed when two consecutive interrupt requests
2595 * from a given source get delivered to the same CPU and the source is
2596 * temporarily disabled in between.
2598 * A workaround is to simulate an EOI message manually. We achieve it
2599 * by setting the trigger mode to edge and then to level when the edge
2600 * trigger mode gets detected in the TMR of a local APIC for a
2601 * level-triggered interrupt. We mask the source for the time of the
2602 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2603 * The idea is from Manfred Spraul. --macro
2605 cfg = desc->chip_data;
2608 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2612 * We must acknowledge the irq before we move it or the acknowledge will
2613 * not propagate properly.
2617 /* Now we can move and renable the irq */
2618 if (unlikely(do_unmask_irq)) {
2619 /* Only migrate the irq if the ack has been received.
2621 * On rare occasions the broadcast level triggered ack gets
2622 * delayed going to ioapics, and if we reprogram the
2623 * vector while Remote IRR is still set the irq will never
2626 * To prevent this scenario we read the Remote IRR bit
2627 * of the ioapic. This has two effects.
2628 * - On any sane system the read of the ioapic will
2629 * flush writes (and acks) going to the ioapic from
2631 * - We get to see if the ACK has actually been delivered.
2633 * Based on failed experiments of reprogramming the
2634 * ioapic entry from outside of irq context starting
2635 * with masking the ioapic entry and then polling until
2636 * Remote IRR was clear before reprogramming the
2637 * ioapic I don't trust the Remote IRR bit to be
2638 * completey accurate.
2640 * However there appears to be no other way to plug
2641 * this race, so if the Remote IRR bit is not
2642 * accurate and is causing problems then it is a hardware bug
2643 * and you can go talk to the chipset vendor about it.
2645 cfg = desc->chip_data;
2646 if (!io_apic_level_ack_pending(cfg))
2647 move_masked_irq(irq);
2648 unmask_IO_APIC_irq_desc(desc);
2651 #ifdef CONFIG_X86_32
2652 if (!(v & (1 << (i & 0x1f)))) {
2653 atomic_inc(&irq_mis_count);
2654 spin_lock(&ioapic_lock);
2655 __mask_and_edge_IO_APIC_irq(cfg);
2656 __unmask_and_level_IO_APIC_irq(cfg);
2657 spin_unlock(&ioapic_lock);
2662 static struct irq_chip ioapic_chip __read_mostly = {
2664 .startup = startup_ioapic_irq,
2665 .mask = mask_IO_APIC_irq,
2666 .unmask = unmask_IO_APIC_irq,
2667 .ack = ack_apic_edge,
2668 .eoi = ack_apic_level,
2670 .set_affinity = set_ioapic_affinity_irq,
2672 .retrigger = ioapic_retrigger_irq,
2675 #ifdef CONFIG_INTR_REMAP
2676 static struct irq_chip ir_ioapic_chip __read_mostly = {
2677 .name = "IR-IO-APIC",
2678 .startup = startup_ioapic_irq,
2679 .mask = mask_IO_APIC_irq,
2680 .unmask = unmask_IO_APIC_irq,
2681 .ack = ack_x2apic_edge,
2682 .eoi = ack_x2apic_level,
2684 .set_affinity = set_ir_ioapic_affinity_irq,
2686 .retrigger = ioapic_retrigger_irq,
2690 static inline void init_IO_APIC_traps(void)
2693 struct irq_desc *desc;
2694 struct irq_cfg *cfg;
2697 * NOTE! The local APIC isn't very good at handling
2698 * multiple interrupts at the same interrupt level.
2699 * As the interrupt level is determined by taking the
2700 * vector number and shifting that right by 4, we
2701 * want to spread these out a bit so that they don't
2702 * all fall in the same interrupt level.
2704 * Also, we've got to be careful not to trash gate
2705 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2707 for_each_irq_desc(irq, desc) {
2708 cfg = desc->chip_data;
2709 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2711 * Hmm.. We don't have an entry for this,
2712 * so default to an old-fashioned 8259
2713 * interrupt if we can..
2715 if (irq < NR_IRQS_LEGACY)
2716 make_8259A_irq(irq);
2718 /* Strange. Oh, well.. */
2719 desc->chip = &no_irq_chip;
2725 * The local APIC irq-chip implementation:
2728 static void mask_lapic_irq(unsigned int irq)
2732 v = apic_read(APIC_LVT0);
2733 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2736 static void unmask_lapic_irq(unsigned int irq)
2740 v = apic_read(APIC_LVT0);
2741 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2744 static void ack_lapic_irq(unsigned int irq)
2749 static struct irq_chip lapic_chip __read_mostly = {
2750 .name = "local-APIC",
2751 .mask = mask_lapic_irq,
2752 .unmask = unmask_lapic_irq,
2753 .ack = ack_lapic_irq,
2756 static void lapic_register_intr(int irq, struct irq_desc *desc)
2758 desc->status &= ~IRQ_LEVEL;
2759 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2763 static void __init setup_nmi(void)
2766 * Dirty trick to enable the NMI watchdog ...
2767 * We put the 8259A master into AEOI mode and
2768 * unmask on all local APICs LVT0 as NMI.
2770 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2771 * is from Maciej W. Rozycki - so we do not have to EOI from
2772 * the NMI handler or the timer interrupt.
2774 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2776 enable_NMI_through_LVT0();
2778 apic_printk(APIC_VERBOSE, " done.\n");
2782 * This looks a bit hackish but it's about the only one way of sending
2783 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2784 * not support the ExtINT mode, unfortunately. We need to send these
2785 * cycles as some i82489DX-based boards have glue logic that keeps the
2786 * 8259A interrupt line asserted until INTA. --macro
2788 static inline void __init unlock_ExtINT_logic(void)
2791 struct IO_APIC_route_entry entry0, entry1;
2792 unsigned char save_control, save_freq_select;
2794 pin = find_isa_irq_pin(8, mp_INT);
2799 apic = find_isa_irq_apic(8, mp_INT);
2805 entry0 = ioapic_read_entry(apic, pin);
2806 clear_IO_APIC_pin(apic, pin);
2808 memset(&entry1, 0, sizeof(entry1));
2810 entry1.dest_mode = 0; /* physical delivery */
2811 entry1.mask = 0; /* unmask IRQ now */
2812 entry1.dest = hard_smp_processor_id();
2813 entry1.delivery_mode = dest_ExtINT;
2814 entry1.polarity = entry0.polarity;
2818 ioapic_write_entry(apic, pin, entry1);
2820 save_control = CMOS_READ(RTC_CONTROL);
2821 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2822 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2824 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2829 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2833 CMOS_WRITE(save_control, RTC_CONTROL);
2834 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2835 clear_IO_APIC_pin(apic, pin);
2837 ioapic_write_entry(apic, pin, entry0);
2840 static int disable_timer_pin_1 __initdata;
2841 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2842 static int __init disable_timer_pin_setup(char *arg)
2844 disable_timer_pin_1 = 1;
2847 early_param("disable_timer_pin_1", disable_timer_pin_setup);
2849 int timer_through_8259 __initdata;
2852 * This code may look a bit paranoid, but it's supposed to cooperate with
2853 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2854 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2855 * fanatically on his truly buggy board.
2857 * FIXME: really need to revamp this for all platforms.
2859 static inline void __init check_timer(void)
2861 struct irq_desc *desc = irq_to_desc(0);
2862 struct irq_cfg *cfg = desc->chip_data;
2863 int cpu = boot_cpu_id;
2864 int apic1, pin1, apic2, pin2;
2865 unsigned long flags;
2868 local_irq_save(flags);
2871 * get/set the timer IRQ vector:
2873 disable_8259A_irq(0);
2874 assign_irq_vector(0, cfg, apic->target_cpus());
2877 * As IRQ0 is to be enabled in the 8259A, the virtual
2878 * wire has to be disabled in the local APIC. Also
2879 * timer interrupts need to be acknowledged manually in
2880 * the 8259A for the i82489DX when using the NMI
2881 * watchdog as that APIC treats NMIs as level-triggered.
2882 * The AEOI mode will finish them in the 8259A
2885 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2887 #ifdef CONFIG_X86_32
2891 ver = apic_read(APIC_LVR);
2892 ver = GET_APIC_VERSION(ver);
2893 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2897 pin1 = find_isa_irq_pin(0, mp_INT);
2898 apic1 = find_isa_irq_apic(0, mp_INT);
2899 pin2 = ioapic_i8259.pin;
2900 apic2 = ioapic_i8259.apic;
2902 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2903 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2904 cfg->vector, apic1, pin1, apic2, pin2);
2907 * Some BIOS writers are clueless and report the ExtINTA
2908 * I/O APIC input from the cascaded 8259A as the timer
2909 * interrupt input. So just in case, if only one pin
2910 * was found above, try it both directly and through the
2914 #ifdef CONFIG_INTR_REMAP
2915 if (intr_remapping_enabled)
2916 panic("BIOS bug: timer not connected to IO-APIC");
2921 } else if (pin2 == -1) {
2928 * Ok, does IRQ0 through the IOAPIC work?
2931 add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
2932 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2934 /* for edge trigger, setup_IO_APIC_irq already
2935 * leave it unmasked.
2936 * so only need to unmask if it is level-trigger
2937 * do we really have level trigger timer?
2940 idx = find_irq_entry(apic1, pin1, mp_INT);
2941 if (idx != -1 && irq_trigger(idx))
2942 unmask_IO_APIC_irq_desc(desc);
2944 if (timer_irq_works()) {
2945 if (nmi_watchdog == NMI_IO_APIC) {
2947 enable_8259A_irq(0);
2949 if (disable_timer_pin_1 > 0)
2950 clear_IO_APIC_pin(0, pin1);
2953 #ifdef CONFIG_INTR_REMAP
2954 if (intr_remapping_enabled)
2955 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2957 local_irq_disable();
2958 clear_IO_APIC_pin(apic1, pin1);
2960 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2961 "8254 timer not connected to IO-APIC\n");
2963 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2964 "(IRQ0) through the 8259A ...\n");
2965 apic_printk(APIC_QUIET, KERN_INFO
2966 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2968 * legacy devices should be connected to IO APIC #0
2970 replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
2971 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2972 enable_8259A_irq(0);
2973 if (timer_irq_works()) {
2974 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2975 timer_through_8259 = 1;
2976 if (nmi_watchdog == NMI_IO_APIC) {
2977 disable_8259A_irq(0);
2979 enable_8259A_irq(0);
2984 * Cleanup, just in case ...
2986 local_irq_disable();
2987 disable_8259A_irq(0);
2988 clear_IO_APIC_pin(apic2, pin2);
2989 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2992 if (nmi_watchdog == NMI_IO_APIC) {
2993 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2994 "through the IO-APIC - disabling NMI Watchdog!\n");
2995 nmi_watchdog = NMI_NONE;
2997 #ifdef CONFIG_X86_32
3001 apic_printk(APIC_QUIET, KERN_INFO
3002 "...trying to set up timer as Virtual Wire IRQ...\n");
3004 lapic_register_intr(0, desc);
3005 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
3006 enable_8259A_irq(0);
3008 if (timer_irq_works()) {
3009 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3012 local_irq_disable();
3013 disable_8259A_irq(0);
3014 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
3015 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
3017 apic_printk(APIC_QUIET, KERN_INFO
3018 "...trying to set up timer as ExtINT IRQ...\n");
3022 apic_write(APIC_LVT0, APIC_DM_EXTINT);
3024 unlock_ExtINT_logic();
3026 if (timer_irq_works()) {
3027 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3030 local_irq_disable();
3031 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
3032 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
3033 "report. Then try booting with the 'noapic' option.\n");
3035 local_irq_restore(flags);
3039 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
3040 * to devices. However there may be an I/O APIC pin available for
3041 * this interrupt regardless. The pin may be left unconnected, but
3042 * typically it will be reused as an ExtINT cascade interrupt for
3043 * the master 8259A. In the MPS case such a pin will normally be
3044 * reported as an ExtINT interrupt in the MP table. With ACPI
3045 * there is no provision for ExtINT interrupts, and in the absence
3046 * of an override it would be treated as an ordinary ISA I/O APIC
3047 * interrupt, that is edge-triggered and unmasked by default. We
3048 * used to do this, but it caused problems on some systems because
3049 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
3050 * the same ExtINT cascade interrupt to drive the local APIC of the
3051 * bootstrap processor. Therefore we refrain from routing IRQ2 to
3052 * the I/O APIC in all cases now. No actual device should request
3053 * it anyway. --macro
3055 #define PIC_IRQS (1 << PIC_CASCADE_IR)
3057 void __init setup_IO_APIC(void)
3060 #ifdef CONFIG_X86_32
3064 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
3068 io_apic_irqs = ~PIC_IRQS;
3070 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
3072 * Set up IO-APIC IRQ routing.
3074 #ifdef CONFIG_X86_32
3076 setup_ioapic_ids_from_mpc();
3079 setup_IO_APIC_irqs();
3080 init_IO_APIC_traps();
3085 * Called after all the initialization is done. If we didnt find any
3086 * APIC bugs then we can allow the modify fast path
3089 static int __init io_apic_bug_finalize(void)
3091 if (sis_apic_bug == -1)
3096 late_initcall(io_apic_bug_finalize);
3098 struct sysfs_ioapic_data {
3099 struct sys_device dev;
3100 struct IO_APIC_route_entry entry[0];
3102 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
3104 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
3106 struct IO_APIC_route_entry *entry;
3107 struct sysfs_ioapic_data *data;
3110 data = container_of(dev, struct sysfs_ioapic_data, dev);
3111 entry = data->entry;
3112 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
3113 *entry = ioapic_read_entry(dev->id, i);
3118 static int ioapic_resume(struct sys_device *dev)
3120 struct IO_APIC_route_entry *entry;
3121 struct sysfs_ioapic_data *data;
3122 unsigned long flags;
3123 union IO_APIC_reg_00 reg_00;
3126 data = container_of(dev, struct sysfs_ioapic_data, dev);
3127 entry = data->entry;
3129 spin_lock_irqsave(&ioapic_lock, flags);
3130 reg_00.raw = io_apic_read(dev->id, 0);
3131 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
3132 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
3133 io_apic_write(dev->id, 0, reg_00.raw);
3135 spin_unlock_irqrestore(&ioapic_lock, flags);
3136 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3137 ioapic_write_entry(dev->id, i, entry[i]);
3142 static struct sysdev_class ioapic_sysdev_class = {
3144 .suspend = ioapic_suspend,
3145 .resume = ioapic_resume,
3148 static int __init ioapic_init_sysfs(void)
3150 struct sys_device * dev;
3153 error = sysdev_class_register(&ioapic_sysdev_class);
3157 for (i = 0; i < nr_ioapics; i++ ) {
3158 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
3159 * sizeof(struct IO_APIC_route_entry);
3160 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
3161 if (!mp_ioapic_data[i]) {
3162 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3165 dev = &mp_ioapic_data[i]->dev;
3167 dev->cls = &ioapic_sysdev_class;
3168 error = sysdev_register(dev);
3170 kfree(mp_ioapic_data[i]);
3171 mp_ioapic_data[i] = NULL;
3172 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3180 device_initcall(ioapic_init_sysfs);
3182 static int nr_irqs_gsi = NR_IRQS_LEGACY;
3184 * Dynamic irq allocate and deallocation
3186 unsigned int create_irq_nr(unsigned int irq_want)
3188 /* Allocate an unused irq */
3191 unsigned long flags;
3192 struct irq_cfg *cfg_new = NULL;
3193 int cpu = boot_cpu_id;
3194 struct irq_desc *desc_new = NULL;
3197 if (irq_want < nr_irqs_gsi)
3198 irq_want = nr_irqs_gsi;
3200 spin_lock_irqsave(&vector_lock, flags);
3201 for (new = irq_want; new < nr_irqs; new++) {
3202 desc_new = irq_to_desc_alloc_cpu(new, cpu);
3204 printk(KERN_INFO "can not get irq_desc for %d\n", new);
3207 cfg_new = desc_new->chip_data;
3209 if (cfg_new->vector != 0)
3211 if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
3215 spin_unlock_irqrestore(&vector_lock, flags);
3218 dynamic_irq_init(irq);
3219 /* restore it, in case dynamic_irq_init clear it */
3221 desc_new->chip_data = cfg_new;
3226 int create_irq(void)
3228 unsigned int irq_want;
3231 irq_want = nr_irqs_gsi;
3232 irq = create_irq_nr(irq_want);
3240 void destroy_irq(unsigned int irq)
3242 unsigned long flags;
3243 struct irq_cfg *cfg;
3244 struct irq_desc *desc;
3246 /* store it, in case dynamic_irq_cleanup clear it */
3247 desc = irq_to_desc(irq);
3248 cfg = desc->chip_data;
3249 dynamic_irq_cleanup(irq);
3250 /* connect back irq_cfg */
3252 desc->chip_data = cfg;
3254 #ifdef CONFIG_INTR_REMAP
3257 spin_lock_irqsave(&vector_lock, flags);
3258 __clear_irq_vector(irq, cfg);
3259 spin_unlock_irqrestore(&vector_lock, flags);
3263 * MSI message composition
3265 #ifdef CONFIG_PCI_MSI
3266 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
3268 struct irq_cfg *cfg;
3276 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3280 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
3282 #ifdef CONFIG_INTR_REMAP
3283 if (irq_remapped(irq)) {
3288 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
3289 BUG_ON(ir_index == -1);
3291 memset (&irte, 0, sizeof(irte));
3294 irte.dst_mode = apic->irq_dest_mode;
3295 irte.trigger_mode = 0; /* edge */
3296 irte.dlvry_mode = apic->irq_delivery_mode;
3297 irte.vector = cfg->vector;
3298 irte.dest_id = IRTE_DEST(dest);
3300 modify_irte(irq, &irte);
3302 msg->address_hi = MSI_ADDR_BASE_HI;
3303 msg->data = sub_handle;
3304 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
3306 MSI_ADDR_IR_INDEX1(ir_index) |
3307 MSI_ADDR_IR_INDEX2(ir_index);
3311 msg->address_hi = MSI_ADDR_BASE_HI;
3314 ((apic->irq_dest_mode == 0) ?
3315 MSI_ADDR_DEST_MODE_PHYSICAL:
3316 MSI_ADDR_DEST_MODE_LOGICAL) |
3317 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3318 MSI_ADDR_REDIRECTION_CPU:
3319 MSI_ADDR_REDIRECTION_LOWPRI) |
3320 MSI_ADDR_DEST_ID(dest);
3323 MSI_DATA_TRIGGER_EDGE |
3324 MSI_DATA_LEVEL_ASSERT |
3325 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3326 MSI_DATA_DELIVERY_FIXED:
3327 MSI_DATA_DELIVERY_LOWPRI) |
3328 MSI_DATA_VECTOR(cfg->vector);
3334 static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3336 struct irq_desc *desc = irq_to_desc(irq);
3337 struct irq_cfg *cfg;
3341 dest = set_desc_affinity(desc, mask);
3342 if (dest == BAD_APICID)
3345 cfg = desc->chip_data;
3347 read_msi_msg_desc(desc, &msg);
3349 msg.data &= ~MSI_DATA_VECTOR_MASK;
3350 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3351 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3352 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3354 write_msi_msg_desc(desc, &msg);
3356 #ifdef CONFIG_INTR_REMAP
3358 * Migrate the MSI irq to another cpumask. This migration is
3359 * done in the process context using interrupt-remapping hardware.
3362 ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3364 struct irq_desc *desc = irq_to_desc(irq);
3365 struct irq_cfg *cfg = desc->chip_data;
3369 if (get_irte(irq, &irte))
3372 dest = set_desc_affinity(desc, mask);
3373 if (dest == BAD_APICID)
3376 irte.vector = cfg->vector;
3377 irte.dest_id = IRTE_DEST(dest);
3380 * atomically update the IRTE with the new destination and vector.
3382 modify_irte(irq, &irte);
3385 * After this point, all the interrupts will start arriving
3386 * at the new destination. So, time to cleanup the previous
3387 * vector allocation.
3389 if (cfg->move_in_progress)
3390 send_cleanup_vector(cfg);
3394 #endif /* CONFIG_SMP */
3397 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3398 * which implement the MSI or MSI-X Capability Structure.
3400 static struct irq_chip msi_chip = {
3402 .unmask = unmask_msi_irq,
3403 .mask = mask_msi_irq,
3404 .ack = ack_apic_edge,
3406 .set_affinity = set_msi_irq_affinity,
3408 .retrigger = ioapic_retrigger_irq,
3411 #ifdef CONFIG_INTR_REMAP
3412 static struct irq_chip msi_ir_chip = {
3413 .name = "IR-PCI-MSI",
3414 .unmask = unmask_msi_irq,
3415 .mask = mask_msi_irq,
3416 .ack = ack_x2apic_edge,
3418 .set_affinity = ir_set_msi_irq_affinity,
3420 .retrigger = ioapic_retrigger_irq,
3424 * Map the PCI dev to the corresponding remapping hardware unit
3425 * and allocate 'nvec' consecutive interrupt-remapping table entries
3428 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3430 struct intel_iommu *iommu;
3433 iommu = map_dev_to_ir(dev);
3436 "Unable to map PCI %s to iommu\n", pci_name(dev));
3440 index = alloc_irte(iommu, irq, nvec);
3443 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3451 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3456 ret = msi_compose_msg(dev, irq, &msg);
3460 set_irq_msi(irq, msidesc);
3461 write_msi_msg(irq, &msg);
3463 #ifdef CONFIG_INTR_REMAP
3464 if (irq_remapped(irq)) {
3465 struct irq_desc *desc = irq_to_desc(irq);
3467 * irq migration in process context
3469 desc->status |= IRQ_MOVE_PCNTXT;
3470 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3473 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3475 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
3480 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3483 int ret, sub_handle;
3484 struct msi_desc *msidesc;
3485 unsigned int irq_want;
3487 #ifdef CONFIG_INTR_REMAP
3488 struct intel_iommu *iommu = 0;
3492 irq_want = nr_irqs_gsi;
3494 list_for_each_entry(msidesc, &dev->msi_list, list) {
3495 irq = create_irq_nr(irq_want);
3499 #ifdef CONFIG_INTR_REMAP
3500 if (!intr_remapping_enabled)
3505 * allocate the consecutive block of IRTE's
3508 index = msi_alloc_irte(dev, irq, nvec);
3514 iommu = map_dev_to_ir(dev);
3520 * setup the mapping between the irq and the IRTE
3521 * base index, the sub_handle pointing to the
3522 * appropriate interrupt remap table entry.
3524 set_irte_irq(irq, iommu, index, sub_handle);
3528 ret = setup_msi_irq(dev, msidesc, irq);
3540 void arch_teardown_msi_irq(unsigned int irq)
3547 static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3549 struct irq_desc *desc = irq_to_desc(irq);
3550 struct irq_cfg *cfg;
3554 dest = set_desc_affinity(desc, mask);
3555 if (dest == BAD_APICID)
3558 cfg = desc->chip_data;
3560 dmar_msi_read(irq, &msg);
3562 msg.data &= ~MSI_DATA_VECTOR_MASK;
3563 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3564 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3565 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3567 dmar_msi_write(irq, &msg);
3570 #endif /* CONFIG_SMP */
3572 struct irq_chip dmar_msi_type = {
3574 .unmask = dmar_msi_unmask,
3575 .mask = dmar_msi_mask,
3576 .ack = ack_apic_edge,
3578 .set_affinity = dmar_msi_set_affinity,
3580 .retrigger = ioapic_retrigger_irq,
3583 int arch_setup_dmar_msi(unsigned int irq)
3588 ret = msi_compose_msg(NULL, irq, &msg);
3591 dmar_msi_write(irq, &msg);
3592 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3598 #ifdef CONFIG_HPET_TIMER
3601 static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3603 struct irq_desc *desc = irq_to_desc(irq);
3604 struct irq_cfg *cfg;
3608 dest = set_desc_affinity(desc, mask);
3609 if (dest == BAD_APICID)
3612 cfg = desc->chip_data;
3614 hpet_msi_read(irq, &msg);
3616 msg.data &= ~MSI_DATA_VECTOR_MASK;
3617 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3618 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3619 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3621 hpet_msi_write(irq, &msg);
3624 #endif /* CONFIG_SMP */
3626 struct irq_chip hpet_msi_type = {
3628 .unmask = hpet_msi_unmask,
3629 .mask = hpet_msi_mask,
3630 .ack = ack_apic_edge,
3632 .set_affinity = hpet_msi_set_affinity,
3634 .retrigger = ioapic_retrigger_irq,
3637 int arch_setup_hpet_msi(unsigned int irq)
3642 ret = msi_compose_msg(NULL, irq, &msg);
3646 hpet_msi_write(irq, &msg);
3647 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
3654 #endif /* CONFIG_PCI_MSI */
3656 * Hypertransport interrupt support
3658 #ifdef CONFIG_HT_IRQ
3662 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3664 struct ht_irq_msg msg;
3665 fetch_ht_irq_msg(irq, &msg);
3667 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3668 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3670 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3671 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3673 write_ht_irq_msg(irq, &msg);
3676 static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
3678 struct irq_desc *desc = irq_to_desc(irq);
3679 struct irq_cfg *cfg;
3682 dest = set_desc_affinity(desc, mask);
3683 if (dest == BAD_APICID)
3686 cfg = desc->chip_data;
3688 target_ht_irq(irq, dest, cfg->vector);
3693 static struct irq_chip ht_irq_chip = {
3695 .mask = mask_ht_irq,
3696 .unmask = unmask_ht_irq,
3697 .ack = ack_apic_edge,
3699 .set_affinity = set_ht_irq_affinity,
3701 .retrigger = ioapic_retrigger_irq,
3704 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3706 struct irq_cfg *cfg;
3713 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3715 struct ht_irq_msg msg;
3718 dest = apic->cpu_mask_to_apicid_and(cfg->domain,
3719 apic->target_cpus());
3721 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3725 HT_IRQ_LOW_DEST_ID(dest) |
3726 HT_IRQ_LOW_VECTOR(cfg->vector) |
3727 ((apic->irq_dest_mode == 0) ?
3728 HT_IRQ_LOW_DM_PHYSICAL :
3729 HT_IRQ_LOW_DM_LOGICAL) |
3730 HT_IRQ_LOW_RQEOI_EDGE |
3731 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3732 HT_IRQ_LOW_MT_FIXED :
3733 HT_IRQ_LOW_MT_ARBITRATED) |
3734 HT_IRQ_LOW_IRQ_MASKED;
3736 write_ht_irq_msg(irq, &msg);
3738 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3739 handle_edge_irq, "edge");
3741 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3745 #endif /* CONFIG_HT_IRQ */
3747 #ifdef CONFIG_X86_UV
3749 * Re-target the irq to the specified CPU and enable the specified MMR located
3750 * on the specified blade to allow the sending of MSIs to the specified CPU.
3752 int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3753 unsigned long mmr_offset)
3755 const struct cpumask *eligible_cpu = cpumask_of(cpu);
3756 struct irq_cfg *cfg;
3758 unsigned long mmr_value;
3759 struct uv_IO_APIC_route_entry *entry;
3760 unsigned long flags;
3765 err = assign_irq_vector(irq, cfg, eligible_cpu);
3769 spin_lock_irqsave(&vector_lock, flags);
3770 set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
3772 spin_unlock_irqrestore(&vector_lock, flags);
3775 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3776 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3778 entry->vector = cfg->vector;
3779 entry->delivery_mode = apic->irq_delivery_mode;
3780 entry->dest_mode = apic->irq_dest_mode;
3781 entry->polarity = 0;
3784 entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
3786 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3787 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3793 * Disable the specified MMR located on the specified blade so that MSIs are
3794 * longer allowed to be sent.
3796 void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset)
3798 unsigned long mmr_value;
3799 struct uv_IO_APIC_route_entry *entry;
3803 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3804 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3808 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3809 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3811 #endif /* CONFIG_X86_64 */
3813 int __init io_apic_get_redir_entries (int ioapic)
3815 union IO_APIC_reg_01 reg_01;
3816 unsigned long flags;
3818 spin_lock_irqsave(&ioapic_lock, flags);
3819 reg_01.raw = io_apic_read(ioapic, 1);
3820 spin_unlock_irqrestore(&ioapic_lock, flags);
3822 return reg_01.bits.entries;
3825 void __init probe_nr_irqs_gsi(void)
3829 nr = acpi_probe_gsi();
3830 if (nr > nr_irqs_gsi) {
3833 /* for acpi=off or acpi is not compiled in */
3837 for (idx = 0; idx < nr_ioapics; idx++)
3838 nr += io_apic_get_redir_entries(idx) + 1;
3840 if (nr > nr_irqs_gsi)
3844 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3847 #ifdef CONFIG_SPARSE_IRQ
3848 int __init arch_probe_nr_irqs(void)
3852 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3853 nr_irqs = NR_VECTORS * nr_cpu_ids;
3855 nr = nr_irqs_gsi + 8 * nr_cpu_ids;
3856 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3858 * for MSI and HT dyn irq
3860 nr += nr_irqs_gsi * 16;
3869 /* --------------------------------------------------------------------------
3870 ACPI-based IOAPIC Configuration
3871 -------------------------------------------------------------------------- */
3875 #ifdef CONFIG_X86_32
3876 int __init io_apic_get_unique_id(int ioapic, int apic_id)
3878 union IO_APIC_reg_00 reg_00;
3879 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3881 unsigned long flags;
3885 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3886 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3887 * supports up to 16 on one shared APIC bus.
3889 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3890 * advantage of new APIC bus architecture.
3893 if (physids_empty(apic_id_map))
3894 apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
3896 spin_lock_irqsave(&ioapic_lock, flags);
3897 reg_00.raw = io_apic_read(ioapic, 0);
3898 spin_unlock_irqrestore(&ioapic_lock, flags);
3900 if (apic_id >= get_physical_broadcast()) {
3901 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3902 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3903 apic_id = reg_00.bits.ID;
3907 * Every APIC in a system must have a unique ID or we get lots of nice
3908 * 'stuck on smp_invalidate_needed IPI wait' messages.
3910 if (apic->check_apicid_used(apic_id_map, apic_id)) {
3912 for (i = 0; i < get_physical_broadcast(); i++) {
3913 if (!apic->check_apicid_used(apic_id_map, i))
3917 if (i == get_physical_broadcast())
3918 panic("Max apic_id exceeded!\n");
3920 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3921 "trying %d\n", ioapic, apic_id, i);
3926 tmp = apic->apicid_to_cpu_present(apic_id);
3927 physids_or(apic_id_map, apic_id_map, tmp);
3929 if (reg_00.bits.ID != apic_id) {
3930 reg_00.bits.ID = apic_id;
3932 spin_lock_irqsave(&ioapic_lock, flags);
3933 io_apic_write(ioapic, 0, reg_00.raw);
3934 reg_00.raw = io_apic_read(ioapic, 0);
3935 spin_unlock_irqrestore(&ioapic_lock, flags);
3938 if (reg_00.bits.ID != apic_id) {
3939 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
3944 apic_printk(APIC_VERBOSE, KERN_INFO
3945 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
3950 int __init io_apic_get_version(int ioapic)
3952 union IO_APIC_reg_01 reg_01;
3953 unsigned long flags;
3955 spin_lock_irqsave(&ioapic_lock, flags);
3956 reg_01.raw = io_apic_read(ioapic, 1);
3957 spin_unlock_irqrestore(&ioapic_lock, flags);
3959 return reg_01.bits.version;
3963 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
3965 struct irq_desc *desc;
3966 struct irq_cfg *cfg;
3967 int cpu = boot_cpu_id;
3969 if (!IO_APIC_IRQ(irq)) {
3970 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3975 desc = irq_to_desc_alloc_cpu(irq, cpu);
3977 printk(KERN_INFO "can not get irq_desc %d\n", irq);
3982 * IRQs < 16 are already in the irq_2_pin[] map
3984 if (irq >= NR_IRQS_LEGACY) {
3985 cfg = desc->chip_data;
3986 add_pin_to_irq_cpu(cfg, cpu, ioapic, pin);
3989 setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity);
3995 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3999 if (skip_ioapic_setup)
4002 for (i = 0; i < mp_irq_entries; i++)
4003 if (mp_irqs[i].irqtype == mp_INT &&
4004 mp_irqs[i].srcbusirq == bus_irq)
4006 if (i >= mp_irq_entries)
4009 *trigger = irq_trigger(i);
4010 *polarity = irq_polarity(i);
4014 #endif /* CONFIG_ACPI */
4017 * This function currently is only a helper for the i386 smp boot process where
4018 * we need to reprogram the ioredtbls to cater for the cpus which have come online
4019 * so mask in all cases should simply be apic->target_cpus()
4022 void __init setup_ioapic_dest(void)
4024 int pin, ioapic, irq, irq_entry;
4025 struct irq_desc *desc;
4026 struct irq_cfg *cfg;
4027 const struct cpumask *mask;
4029 if (skip_ioapic_setup == 1)
4032 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
4033 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
4034 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
4035 if (irq_entry == -1)
4037 irq = pin_2_irq(irq_entry, ioapic, pin);
4039 /* setup_IO_APIC_irqs could fail to get vector for some device
4040 * when you have too many devices, because at that time only boot
4043 desc = irq_to_desc(irq);
4044 cfg = desc->chip_data;
4046 setup_IO_APIC_irq(ioapic, pin, irq, desc,
4047 irq_trigger(irq_entry),
4048 irq_polarity(irq_entry));
4054 * Honour affinities which have been set in early boot
4057 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
4058 mask = desc->affinity;
4060 mask = apic->target_cpus();
4062 #ifdef CONFIG_INTR_REMAP
4063 if (intr_remapping_enabled)
4064 set_ir_ioapic_affinity_irq_desc(desc, mask);
4067 set_ioapic_affinity_irq_desc(desc, mask);
4074 #define IOAPIC_RESOURCE_NAME_SIZE 11
4076 static struct resource *ioapic_resources;
4078 static struct resource * __init ioapic_setup_resources(void)
4081 struct resource *res;
4085 if (nr_ioapics <= 0)
4088 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
4091 mem = alloc_bootmem(n);
4095 mem += sizeof(struct resource) * nr_ioapics;
4097 for (i = 0; i < nr_ioapics; i++) {
4099 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
4100 sprintf(mem, "IOAPIC %u", i);
4101 mem += IOAPIC_RESOURCE_NAME_SIZE;
4105 ioapic_resources = res;
4110 void __init ioapic_init_mappings(void)
4112 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
4113 struct resource *ioapic_res;
4116 ioapic_res = ioapic_setup_resources();
4117 for (i = 0; i < nr_ioapics; i++) {
4118 if (smp_found_config) {
4119 ioapic_phys = mp_ioapics[i].apicaddr;
4120 #ifdef CONFIG_X86_32
4123 "WARNING: bogus zero IO-APIC "
4124 "address found in MPTABLE, "
4125 "disabling IO/APIC support!\n");
4126 smp_found_config = 0;
4127 skip_ioapic_setup = 1;
4128 goto fake_ioapic_page;
4132 #ifdef CONFIG_X86_32
4135 ioapic_phys = (unsigned long)
4136 alloc_bootmem_pages(PAGE_SIZE);
4137 ioapic_phys = __pa(ioapic_phys);
4139 set_fixmap_nocache(idx, ioapic_phys);
4140 apic_printk(APIC_VERBOSE,
4141 "mapped IOAPIC to %08lx (%08lx)\n",
4142 __fix_to_virt(idx), ioapic_phys);
4145 if (ioapic_res != NULL) {
4146 ioapic_res->start = ioapic_phys;
4147 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
4153 static int __init ioapic_insert_resources(void)
4156 struct resource *r = ioapic_resources;
4160 "IO APIC resources could be not be allocated.\n");
4164 for (i = 0; i < nr_ioapics; i++) {
4165 insert_resource(&iomem_resource, r);
4172 /* Insert the IO APIC resources after PCI initialization has occured to handle
4173 * IO APICS that are mapped in on a BAR in PCI space. */
4174 late_initcall(ioapic_insert_resources);