2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
40 #include <acpi/acpi_bus.h>
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
44 #include <linux/hpet.h>
50 #include <asm/proto.h>
53 #include <asm/timer.h>
54 #include <asm/i8259.h>
56 #include <asm/msidef.h>
57 #include <asm/hypertransport.h>
58 #include <asm/setup.h>
59 #include <asm/irq_remapping.h>
61 #include <asm/uv/uv_hub.h>
62 #include <asm/uv/uv_irq.h>
65 #include <mach_apic.h>
66 #include <mach_apicdef.h>
68 #define __apicdebuginit(type) static type __init
71 * Is the SiS APIC rmw bug present ?
72 * -1 = don't know, 0 = no, 1 = yes
74 int sis_apic_bug = -1;
76 static DEFINE_SPINLOCK(ioapic_lock);
77 static DEFINE_SPINLOCK(vector_lock);
80 * # of IRQ routing registers
82 int nr_ioapic_registers[MAX_IO_APICS];
84 /* I/O APIC entries */
85 struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
88 /* MP IRQ source entries */
89 struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
91 /* # of MP IRQ source entries */
94 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
95 int mp_bus_id_to_type[MAX_MP_BUSSES];
98 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
100 int skip_ioapic_setup;
102 static int __init parse_noapic(char *str)
104 /* disable IO-APIC */
105 disable_ioapic_setup();
108 early_param("noapic", parse_noapic);
113 * This is performance-critical, we want to do it O(1)
115 * the indexing order of this array favors 1:1 mappings
116 * between pins and IRQs.
119 struct irq_pin_list {
121 struct irq_pin_list *next;
124 static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
126 struct irq_pin_list *pin;
129 node = cpu_to_node(cpu);
131 pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
132 printk(KERN_DEBUG " alloc irq_2_pin on cpu %d node %d\n", cpu, node);
138 struct irq_pin_list *irq_2_pin;
140 cpumask_t old_domain;
141 unsigned move_cleanup_count;
143 u8 move_in_progress : 1;
146 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
147 #ifdef CONFIG_SPARSE_IRQ
148 static struct irq_cfg irq_cfgx[] = {
150 static struct irq_cfg irq_cfgx[NR_IRQS] = {
152 [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
153 [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
154 [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
155 [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
156 [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
157 [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
158 [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
159 [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
160 [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
161 [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
162 [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
163 [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
164 [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
165 [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
166 [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
167 [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
170 void __init arch_early_irq_init(void)
173 struct irq_desc *desc;
178 count = ARRAY_SIZE(irq_cfgx);
180 for (i = 0; i < count; i++) {
181 desc = irq_to_desc(i);
182 desc->chip_data = &cfg[i];
186 #ifdef CONFIG_SPARSE_IRQ
187 static struct irq_cfg *irq_cfg(unsigned int irq)
189 struct irq_cfg *cfg = NULL;
190 struct irq_desc *desc;
192 desc = irq_to_desc(irq);
194 cfg = desc->chip_data;
199 static struct irq_cfg *get_one_free_irq_cfg(int cpu)
204 node = cpu_to_node(cpu);
206 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
207 printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node);
212 void arch_init_chip_data(struct irq_desc *desc, int cpu)
216 cfg = desc->chip_data;
218 desc->chip_data = get_one_free_irq_cfg(cpu);
219 if (!desc->chip_data) {
220 printk(KERN_ERR "can not alloc irq_cfg\n");
227 static struct irq_cfg *irq_cfg(unsigned int irq)
229 return irq < nr_irqs ? irq_cfgx + irq : NULL;
235 set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
241 unsigned int unused[3];
245 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
247 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
248 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
251 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
253 struct io_apic __iomem *io_apic = io_apic_base(apic);
254 writel(reg, &io_apic->index);
255 return readl(&io_apic->data);
258 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
260 struct io_apic __iomem *io_apic = io_apic_base(apic);
261 writel(reg, &io_apic->index);
262 writel(value, &io_apic->data);
266 * Re-write a value: to be used for read-modify-write
267 * cycles where the read already set up the index register.
269 * Older SiS APIC requires we rewrite the index register
271 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
273 struct io_apic __iomem *io_apic = io_apic_base(apic);
276 writel(reg, &io_apic->index);
277 writel(value, &io_apic->data);
280 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
282 struct irq_pin_list *entry;
285 spin_lock_irqsave(&ioapic_lock, flags);
286 entry = cfg->irq_2_pin;
294 reg = io_apic_read(entry->apic, 0x10 + pin*2);
295 /* Is the remote IRR bit set? */
296 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
297 spin_unlock_irqrestore(&ioapic_lock, flags);
304 spin_unlock_irqrestore(&ioapic_lock, flags);
310 struct { u32 w1, w2; };
311 struct IO_APIC_route_entry entry;
314 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
316 union entry_union eu;
318 spin_lock_irqsave(&ioapic_lock, flags);
319 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
320 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
321 spin_unlock_irqrestore(&ioapic_lock, flags);
326 * When we write a new IO APIC routing entry, we need to write the high
327 * word first! If the mask bit in the low word is clear, we will enable
328 * the interrupt, and we need to make sure the entry is fully populated
329 * before that happens.
332 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
334 union entry_union eu;
336 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
337 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
340 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
343 spin_lock_irqsave(&ioapic_lock, flags);
344 __ioapic_write_entry(apic, pin, e);
345 spin_unlock_irqrestore(&ioapic_lock, flags);
349 * When we mask an IO APIC routing entry, we need to write the low
350 * word first, in order to set the mask bit before we change the
353 static void ioapic_mask_entry(int apic, int pin)
356 union entry_union eu = { .entry.mask = 1 };
358 spin_lock_irqsave(&ioapic_lock, flags);
359 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
360 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
361 spin_unlock_irqrestore(&ioapic_lock, flags);
365 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
368 struct irq_pin_list *entry;
369 u8 vector = cfg->vector;
371 entry = cfg->irq_2_pin;
380 #ifdef CONFIG_INTR_REMAP
382 * With interrupt-remapping, destination information comes
383 * from interrupt-remapping table entry.
385 if (!irq_remapped(irq))
386 io_apic_write(apic, 0x11 + pin*2, dest);
388 io_apic_write(apic, 0x11 + pin*2, dest);
390 reg = io_apic_read(apic, 0x10 + pin*2);
391 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
393 io_apic_modify(apic, 0x10 + pin*2, reg);
401 assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
403 static void set_ioapic_affinity_irq_desc(struct irq_desc *desc,
404 const struct cpumask *mask)
412 if (!cpumask_intersects(mask, cpu_online_mask))
416 cfg = desc->chip_data;
417 if (assign_irq_vector(irq, cfg, mask))
420 set_extra_move_desc(desc, mask);
422 cpumask_and(&tmp, &cfg->domain, mask);
423 dest = cpu_mask_to_apicid(&tmp);
425 * Only the high 8 bits are valid.
427 dest = SET_APIC_LOGICAL_ID(dest);
429 spin_lock_irqsave(&ioapic_lock, flags);
430 __target_IO_APIC_irq(irq, dest, cfg);
431 cpumask_copy(&desc->affinity, mask);
432 spin_unlock_irqrestore(&ioapic_lock, flags);
435 static void set_ioapic_affinity_irq(unsigned int irq,
436 const struct cpumask *mask)
438 struct irq_desc *desc;
440 desc = irq_to_desc(irq);
442 set_ioapic_affinity_irq_desc(desc, mask);
444 #endif /* CONFIG_SMP */
447 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
448 * shared ISA-space IRQs, so we have to support them. We are super
449 * fast in the common case, and fast for shared ISA-space IRQs.
451 static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin)
453 struct irq_pin_list *entry;
455 entry = cfg->irq_2_pin;
457 entry = get_one_free_irq_2_pin(cpu);
459 printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n",
463 cfg->irq_2_pin = entry;
469 while (entry->next) {
470 /* not again, please */
471 if (entry->apic == apic && entry->pin == pin)
477 entry->next = get_one_free_irq_2_pin(cpu);
484 * Reroute an IRQ to a different pin.
486 static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu,
487 int oldapic, int oldpin,
488 int newapic, int newpin)
490 struct irq_pin_list *entry = cfg->irq_2_pin;
494 if (entry->apic == oldapic && entry->pin == oldpin) {
495 entry->apic = newapic;
498 /* every one is different, right? */
504 /* why? call replace before add? */
506 add_pin_to_irq_cpu(cfg, cpu, newapic, newpin);
509 static inline void io_apic_modify_irq(struct irq_cfg *cfg,
510 int mask_and, int mask_or,
511 void (*final)(struct irq_pin_list *entry))
514 struct irq_pin_list *entry;
516 for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
519 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
522 io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
528 static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
530 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
534 void io_apic_sync(struct irq_pin_list *entry)
537 * Synchronize the IO-APIC and the CPU by doing
538 * a dummy read from the IO-APIC
540 struct io_apic __iomem *io_apic;
541 io_apic = io_apic_base(entry->apic);
542 readl(&io_apic->data);
545 static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
547 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
549 #else /* CONFIG_X86_32 */
550 static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
552 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL);
555 static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg)
557 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER,
558 IO_APIC_REDIR_MASKED, NULL);
561 static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg)
563 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED,
564 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
566 #endif /* CONFIG_X86_32 */
568 static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
570 struct irq_cfg *cfg = desc->chip_data;
575 spin_lock_irqsave(&ioapic_lock, flags);
576 __mask_IO_APIC_irq(cfg);
577 spin_unlock_irqrestore(&ioapic_lock, flags);
580 static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
582 struct irq_cfg *cfg = desc->chip_data;
585 spin_lock_irqsave(&ioapic_lock, flags);
586 __unmask_IO_APIC_irq(cfg);
587 spin_unlock_irqrestore(&ioapic_lock, flags);
590 static void mask_IO_APIC_irq(unsigned int irq)
592 struct irq_desc *desc = irq_to_desc(irq);
594 mask_IO_APIC_irq_desc(desc);
596 static void unmask_IO_APIC_irq(unsigned int irq)
598 struct irq_desc *desc = irq_to_desc(irq);
600 unmask_IO_APIC_irq_desc(desc);
603 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
605 struct IO_APIC_route_entry entry;
607 /* Check delivery_mode to be sure we're not clearing an SMI pin */
608 entry = ioapic_read_entry(apic, pin);
609 if (entry.delivery_mode == dest_SMI)
612 * Disable it in the IO-APIC irq-routing table:
614 ioapic_mask_entry(apic, pin);
617 static void clear_IO_APIC (void)
621 for (apic = 0; apic < nr_ioapics; apic++)
622 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
623 clear_IO_APIC_pin(apic, pin);
626 #if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
627 void send_IPI_self(int vector)
634 apic_wait_icr_idle();
635 cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
637 * Send the IPI. The write to APIC_ICR fires this off.
639 apic_write(APIC_ICR, cfg);
641 #endif /* !CONFIG_SMP && CONFIG_X86_32*/
645 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
646 * specific CPU-side IRQs.
650 static int pirq_entries [MAX_PIRQS];
651 static int pirqs_enabled;
653 static int __init ioapic_pirq_setup(char *str)
656 int ints[MAX_PIRQS+1];
658 get_options(str, ARRAY_SIZE(ints), ints);
660 for (i = 0; i < MAX_PIRQS; i++)
661 pirq_entries[i] = -1;
664 apic_printk(APIC_VERBOSE, KERN_INFO
665 "PIRQ redirection, working around broken MP-BIOS.\n");
667 if (ints[0] < MAX_PIRQS)
670 for (i = 0; i < max; i++) {
671 apic_printk(APIC_VERBOSE, KERN_DEBUG
672 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
674 * PIRQs are mapped upside down, usually.
676 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
681 __setup("pirq=", ioapic_pirq_setup);
682 #endif /* CONFIG_X86_32 */
684 #ifdef CONFIG_INTR_REMAP
685 /* I/O APIC RTE contents at the OS boot up */
686 static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
689 * Saves and masks all the unmasked IO-APIC RTE's
691 int save_mask_IO_APIC_setup(void)
693 union IO_APIC_reg_01 reg_01;
698 * The number of IO-APIC IRQ registers (== #pins):
700 for (apic = 0; apic < nr_ioapics; apic++) {
701 spin_lock_irqsave(&ioapic_lock, flags);
702 reg_01.raw = io_apic_read(apic, 1);
703 spin_unlock_irqrestore(&ioapic_lock, flags);
704 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
707 for (apic = 0; apic < nr_ioapics; apic++) {
708 early_ioapic_entries[apic] =
709 kzalloc(sizeof(struct IO_APIC_route_entry) *
710 nr_ioapic_registers[apic], GFP_KERNEL);
711 if (!early_ioapic_entries[apic])
715 for (apic = 0; apic < nr_ioapics; apic++)
716 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
717 struct IO_APIC_route_entry entry;
719 entry = early_ioapic_entries[apic][pin] =
720 ioapic_read_entry(apic, pin);
723 ioapic_write_entry(apic, pin, entry);
731 kfree(early_ioapic_entries[apic--]);
732 memset(early_ioapic_entries, 0,
733 ARRAY_SIZE(early_ioapic_entries));
738 void restore_IO_APIC_setup(void)
742 for (apic = 0; apic < nr_ioapics; apic++) {
743 if (!early_ioapic_entries[apic])
745 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
746 ioapic_write_entry(apic, pin,
747 early_ioapic_entries[apic][pin]);
748 kfree(early_ioapic_entries[apic]);
749 early_ioapic_entries[apic] = NULL;
753 void reinit_intr_remapped_IO_APIC(int intr_remapping)
756 * for now plain restore of previous settings.
757 * TBD: In the case of OS enabling interrupt-remapping,
758 * IO-APIC RTE's need to be setup to point to interrupt-remapping
759 * table entries. for now, do a plain restore, and wait for
760 * the setup_IO_APIC_irqs() to do proper initialization.
762 restore_IO_APIC_setup();
767 * Find the IRQ entry number of a certain pin.
769 static int find_irq_entry(int apic, int pin, int type)
773 for (i = 0; i < mp_irq_entries; i++)
774 if (mp_irqs[i].mp_irqtype == type &&
775 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
776 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
777 mp_irqs[i].mp_dstirq == pin)
784 * Find the pin to which IRQ[irq] (ISA) is connected
786 static int __init find_isa_irq_pin(int irq, int type)
790 for (i = 0; i < mp_irq_entries; i++) {
791 int lbus = mp_irqs[i].mp_srcbus;
793 if (test_bit(lbus, mp_bus_not_pci) &&
794 (mp_irqs[i].mp_irqtype == type) &&
795 (mp_irqs[i].mp_srcbusirq == irq))
797 return mp_irqs[i].mp_dstirq;
802 static int __init find_isa_irq_apic(int irq, int type)
806 for (i = 0; i < mp_irq_entries; i++) {
807 int lbus = mp_irqs[i].mp_srcbus;
809 if (test_bit(lbus, mp_bus_not_pci) &&
810 (mp_irqs[i].mp_irqtype == type) &&
811 (mp_irqs[i].mp_srcbusirq == irq))
814 if (i < mp_irq_entries) {
816 for(apic = 0; apic < nr_ioapics; apic++) {
817 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
826 * Find a specific PCI IRQ entry.
827 * Not an __init, possibly needed by modules
829 static int pin_2_irq(int idx, int apic, int pin);
831 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
833 int apic, i, best_guess = -1;
835 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
837 if (test_bit(bus, mp_bus_not_pci)) {
838 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
841 for (i = 0; i < mp_irq_entries; i++) {
842 int lbus = mp_irqs[i].mp_srcbus;
844 for (apic = 0; apic < nr_ioapics; apic++)
845 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
846 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
849 if (!test_bit(lbus, mp_bus_not_pci) &&
850 !mp_irqs[i].mp_irqtype &&
852 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
853 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
855 if (!(apic || IO_APIC_IRQ(irq)))
858 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
861 * Use the first all-but-pin matching entry as a
862 * best-guess fuzzy result for broken mptables.
871 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
873 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
875 * EISA Edge/Level control register, ELCR
877 static int EISA_ELCR(unsigned int irq)
879 if (irq < NR_IRQS_LEGACY) {
880 unsigned int port = 0x4d0 + (irq >> 3);
881 return (inb(port) >> (irq & 7)) & 1;
883 apic_printk(APIC_VERBOSE, KERN_INFO
884 "Broken MPtable reports ISA irq %d\n", irq);
890 /* ISA interrupts are always polarity zero edge triggered,
891 * when listed as conforming in the MP table. */
893 #define default_ISA_trigger(idx) (0)
894 #define default_ISA_polarity(idx) (0)
896 /* EISA interrupts are always polarity zero and can be edge or level
897 * trigger depending on the ELCR value. If an interrupt is listed as
898 * EISA conforming in the MP table, that means its trigger type must
899 * be read in from the ELCR */
901 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
902 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
904 /* PCI interrupts are always polarity one level triggered,
905 * when listed as conforming in the MP table. */
907 #define default_PCI_trigger(idx) (1)
908 #define default_PCI_polarity(idx) (1)
910 /* MCA interrupts are always polarity zero level triggered,
911 * when listed as conforming in the MP table. */
913 #define default_MCA_trigger(idx) (1)
914 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
916 static int MPBIOS_polarity(int idx)
918 int bus = mp_irqs[idx].mp_srcbus;
922 * Determine IRQ line polarity (high active or low active):
924 switch (mp_irqs[idx].mp_irqflag & 3)
926 case 0: /* conforms, ie. bus-type dependent polarity */
927 if (test_bit(bus, mp_bus_not_pci))
928 polarity = default_ISA_polarity(idx);
930 polarity = default_PCI_polarity(idx);
932 case 1: /* high active */
937 case 2: /* reserved */
939 printk(KERN_WARNING "broken BIOS!!\n");
943 case 3: /* low active */
948 default: /* invalid */
950 printk(KERN_WARNING "broken BIOS!!\n");
958 static int MPBIOS_trigger(int idx)
960 int bus = mp_irqs[idx].mp_srcbus;
964 * Determine IRQ trigger mode (edge or level sensitive):
966 switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
968 case 0: /* conforms, ie. bus-type dependent */
969 if (test_bit(bus, mp_bus_not_pci))
970 trigger = default_ISA_trigger(idx);
972 trigger = default_PCI_trigger(idx);
973 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
974 switch (mp_bus_id_to_type[bus]) {
975 case MP_BUS_ISA: /* ISA pin */
977 /* set before the switch */
980 case MP_BUS_EISA: /* EISA pin */
982 trigger = default_EISA_trigger(idx);
985 case MP_BUS_PCI: /* PCI pin */
987 /* set before the switch */
990 case MP_BUS_MCA: /* MCA pin */
992 trigger = default_MCA_trigger(idx);
997 printk(KERN_WARNING "broken BIOS!!\n");
1009 case 2: /* reserved */
1011 printk(KERN_WARNING "broken BIOS!!\n");
1020 default: /* invalid */
1022 printk(KERN_WARNING "broken BIOS!!\n");
1030 static inline int irq_polarity(int idx)
1032 return MPBIOS_polarity(idx);
1035 static inline int irq_trigger(int idx)
1037 return MPBIOS_trigger(idx);
1040 int (*ioapic_renumber_irq)(int ioapic, int irq);
1041 static int pin_2_irq(int idx, int apic, int pin)
1044 int bus = mp_irqs[idx].mp_srcbus;
1047 * Debugging check, we are in big trouble if this message pops up!
1049 if (mp_irqs[idx].mp_dstirq != pin)
1050 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1052 if (test_bit(bus, mp_bus_not_pci)) {
1053 irq = mp_irqs[idx].mp_srcbusirq;
1056 * PCI IRQs are mapped in order
1060 irq += nr_ioapic_registers[i++];
1063 * For MPS mode, so far only needed by ES7000 platform
1065 if (ioapic_renumber_irq)
1066 irq = ioapic_renumber_irq(apic, irq);
1069 #ifdef CONFIG_X86_32
1071 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1073 if ((pin >= 16) && (pin <= 23)) {
1074 if (pirq_entries[pin-16] != -1) {
1075 if (!pirq_entries[pin-16]) {
1076 apic_printk(APIC_VERBOSE, KERN_DEBUG
1077 "disabling PIRQ%d\n", pin-16);
1079 irq = pirq_entries[pin-16];
1080 apic_printk(APIC_VERBOSE, KERN_DEBUG
1081 "using PIRQ%d -> IRQ %d\n",
1091 void lock_vector_lock(void)
1093 /* Used to the online set of cpus does not change
1094 * during assign_irq_vector.
1096 spin_lock(&vector_lock);
1099 void unlock_vector_lock(void)
1101 spin_unlock(&vector_lock);
1105 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1108 * NOTE! The local APIC isn't very good at handling
1109 * multiple interrupts at the same interrupt level.
1110 * As the interrupt level is determined by taking the
1111 * vector number and shifting that right by 4, we
1112 * want to spread these out a bit so that they don't
1113 * all fall in the same interrupt level.
1115 * Also, we've got to be careful not to trash gate
1116 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1118 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1119 unsigned int old_vector;
1123 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1126 old_vector = cfg->vector;
1128 cpus_and(tmp_mask, *mask, cpu_online_map);
1129 cpus_and(tmp_mask, cfg->domain, tmp_mask);
1130 if (!cpus_empty(tmp_mask))
1134 /* Only try and allocate irqs on cpus that are present */
1135 for_each_cpu_and(cpu, mask, &cpu_online_map) {
1139 vector_allocation_domain(cpu, &tmp_mask);
1141 vector = current_vector;
1142 offset = current_offset;
1145 if (vector >= first_system_vector) {
1146 /* If out of vectors on large boxen, must share them. */
1147 offset = (offset + 1) % 8;
1148 vector = FIRST_DEVICE_VECTOR + offset;
1150 if (unlikely(current_vector == vector))
1152 #ifdef CONFIG_X86_64
1153 if (vector == IA32_SYSCALL_VECTOR)
1156 if (vector == SYSCALL_VECTOR)
1159 for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map)
1160 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1163 current_vector = vector;
1164 current_offset = offset;
1166 cfg->move_in_progress = 1;
1167 cfg->old_domain = cfg->domain;
1169 for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map)
1170 per_cpu(vector_irq, new_cpu)[vector] = irq;
1171 cfg->vector = vector;
1172 cfg->domain = tmp_mask;
1179 assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1182 unsigned long flags;
1184 spin_lock_irqsave(&vector_lock, flags);
1185 err = __assign_irq_vector(irq, cfg, mask);
1186 spin_unlock_irqrestore(&vector_lock, flags);
1190 static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1195 BUG_ON(!cfg->vector);
1197 vector = cfg->vector;
1198 cpus_and(mask, cfg->domain, cpu_online_map);
1199 for_each_cpu_mask_nr(cpu, mask)
1200 per_cpu(vector_irq, cpu)[vector] = -1;
1203 cpus_clear(cfg->domain);
1205 if (likely(!cfg->move_in_progress))
1207 cpus_and(mask, cfg->old_domain, cpu_online_map);
1208 for_each_cpu_mask_nr(cpu, mask) {
1209 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1211 if (per_cpu(vector_irq, cpu)[vector] != irq)
1213 per_cpu(vector_irq, cpu)[vector] = -1;
1217 cfg->move_in_progress = 0;
1220 void __setup_vector_irq(int cpu)
1222 /* Initialize vector_irq on a new cpu */
1223 /* This function must be called with vector_lock held */
1225 struct irq_cfg *cfg;
1226 struct irq_desc *desc;
1228 /* Mark the inuse vectors */
1229 for_each_irq_desc(irq, desc) {
1232 cfg = desc->chip_data;
1233 if (!cpu_isset(cpu, cfg->domain))
1235 vector = cfg->vector;
1236 per_cpu(vector_irq, cpu)[vector] = irq;
1238 /* Mark the free vectors */
1239 for (vector = 0; vector < NR_VECTORS; ++vector) {
1240 irq = per_cpu(vector_irq, cpu)[vector];
1245 if (!cpu_isset(cpu, cfg->domain))
1246 per_cpu(vector_irq, cpu)[vector] = -1;
1250 static struct irq_chip ioapic_chip;
1251 #ifdef CONFIG_INTR_REMAP
1252 static struct irq_chip ir_ioapic_chip;
1255 #define IOAPIC_AUTO -1
1256 #define IOAPIC_EDGE 0
1257 #define IOAPIC_LEVEL 1
1259 #ifdef CONFIG_X86_32
1260 static inline int IO_APIC_irq_trigger(int irq)
1264 for (apic = 0; apic < nr_ioapics; apic++) {
1265 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1266 idx = find_irq_entry(apic, pin, mp_INT);
1267 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1268 return irq_trigger(idx);
1272 * nonexistent IRQs are edge default
1277 static inline int IO_APIC_irq_trigger(int irq)
1283 static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger)
1286 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1287 trigger == IOAPIC_LEVEL)
1288 desc->status |= IRQ_LEVEL;
1290 desc->status &= ~IRQ_LEVEL;
1292 #ifdef CONFIG_INTR_REMAP
1293 if (irq_remapped(irq)) {
1294 desc->status |= IRQ_MOVE_PCNTXT;
1296 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1300 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1301 handle_edge_irq, "edge");
1305 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1306 trigger == IOAPIC_LEVEL)
1307 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1311 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1312 handle_edge_irq, "edge");
1315 static int setup_ioapic_entry(int apic, int irq,
1316 struct IO_APIC_route_entry *entry,
1317 unsigned int destination, int trigger,
1318 int polarity, int vector)
1321 * add it to the IO-APIC irq-routing table:
1323 memset(entry,0,sizeof(*entry));
1325 #ifdef CONFIG_INTR_REMAP
1326 if (intr_remapping_enabled) {
1327 struct intel_iommu *iommu = map_ioapic_to_ir(apic);
1329 struct IR_IO_APIC_route_entry *ir_entry =
1330 (struct IR_IO_APIC_route_entry *) entry;
1334 panic("No mapping iommu for ioapic %d\n", apic);
1336 index = alloc_irte(iommu, irq, 1);
1338 panic("Failed to allocate IRTE for ioapic %d\n", apic);
1340 memset(&irte, 0, sizeof(irte));
1343 irte.dst_mode = INT_DEST_MODE;
1344 irte.trigger_mode = trigger;
1345 irte.dlvry_mode = INT_DELIVERY_MODE;
1346 irte.vector = vector;
1347 irte.dest_id = IRTE_DEST(destination);
1349 modify_irte(irq, &irte);
1351 ir_entry->index2 = (index >> 15) & 0x1;
1353 ir_entry->format = 1;
1354 ir_entry->index = (index & 0x7fff);
1358 entry->delivery_mode = INT_DELIVERY_MODE;
1359 entry->dest_mode = INT_DEST_MODE;
1360 entry->dest = destination;
1363 entry->mask = 0; /* enable IRQ */
1364 entry->trigger = trigger;
1365 entry->polarity = polarity;
1366 entry->vector = vector;
1368 /* Mask level triggered irqs.
1369 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1376 static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc,
1377 int trigger, int polarity)
1379 struct irq_cfg *cfg;
1380 struct IO_APIC_route_entry entry;
1383 if (!IO_APIC_IRQ(irq))
1386 cfg = desc->chip_data;
1388 mask = *TARGET_CPUS;
1389 if (assign_irq_vector(irq, cfg, &mask))
1392 cpus_and(mask, cfg->domain, mask);
1394 apic_printk(APIC_VERBOSE,KERN_DEBUG
1395 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1396 "IRQ %d Mode:%i Active:%i)\n",
1397 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
1398 irq, trigger, polarity);
1401 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1402 cpu_mask_to_apicid(&mask), trigger, polarity,
1404 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1405 mp_ioapics[apic].mp_apicid, pin);
1406 __clear_irq_vector(irq, cfg);
1410 ioapic_register_intr(irq, desc, trigger);
1411 if (irq < NR_IRQS_LEGACY)
1412 disable_8259A_irq(irq);
1414 ioapic_write_entry(apic, pin, entry);
1417 static void __init setup_IO_APIC_irqs(void)
1419 int apic, pin, idx, irq;
1421 struct irq_desc *desc;
1422 struct irq_cfg *cfg;
1423 int cpu = boot_cpu_id;
1425 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1427 for (apic = 0; apic < nr_ioapics; apic++) {
1428 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1430 idx = find_irq_entry(apic, pin, mp_INT);
1434 apic_printk(APIC_VERBOSE,
1435 KERN_DEBUG " %d-%d",
1436 mp_ioapics[apic].mp_apicid,
1439 apic_printk(APIC_VERBOSE, " %d-%d",
1440 mp_ioapics[apic].mp_apicid,
1445 apic_printk(APIC_VERBOSE,
1446 " (apicid-pin) not connected\n");
1450 irq = pin_2_irq(idx, apic, pin);
1451 #ifdef CONFIG_X86_32
1452 if (multi_timer_check(apic, irq))
1455 desc = irq_to_desc_alloc_cpu(irq, cpu);
1457 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1460 cfg = desc->chip_data;
1461 add_pin_to_irq_cpu(cfg, cpu, apic, pin);
1463 setup_IO_APIC_irq(apic, pin, irq, desc,
1464 irq_trigger(idx), irq_polarity(idx));
1469 apic_printk(APIC_VERBOSE,
1470 " (apicid-pin) not connected\n");
1474 * Set up the timer pin, possibly with the 8259A-master behind.
1476 static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1479 struct IO_APIC_route_entry entry;
1481 #ifdef CONFIG_INTR_REMAP
1482 if (intr_remapping_enabled)
1486 memset(&entry, 0, sizeof(entry));
1489 * We use logical delivery to get the timer IRQ
1492 entry.dest_mode = INT_DEST_MODE;
1493 entry.mask = 1; /* mask IRQ now */
1494 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
1495 entry.delivery_mode = INT_DELIVERY_MODE;
1498 entry.vector = vector;
1501 * The timer IRQ doesn't have to know that behind the
1502 * scene we may have a 8259A-master in AEOI mode ...
1504 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1507 * Add it to the IO-APIC irq-routing table:
1509 ioapic_write_entry(apic, pin, entry);
1513 __apicdebuginit(void) print_IO_APIC(void)
1516 union IO_APIC_reg_00 reg_00;
1517 union IO_APIC_reg_01 reg_01;
1518 union IO_APIC_reg_02 reg_02;
1519 union IO_APIC_reg_03 reg_03;
1520 unsigned long flags;
1521 struct irq_cfg *cfg;
1522 struct irq_desc *desc;
1525 if (apic_verbosity == APIC_QUIET)
1528 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1529 for (i = 0; i < nr_ioapics; i++)
1530 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1531 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1534 * We are a bit conservative about what we expect. We have to
1535 * know about every hardware change ASAP.
1537 printk(KERN_INFO "testing the IO APIC.......................\n");
1539 for (apic = 0; apic < nr_ioapics; apic++) {
1541 spin_lock_irqsave(&ioapic_lock, flags);
1542 reg_00.raw = io_apic_read(apic, 0);
1543 reg_01.raw = io_apic_read(apic, 1);
1544 if (reg_01.bits.version >= 0x10)
1545 reg_02.raw = io_apic_read(apic, 2);
1546 if (reg_01.bits.version >= 0x20)
1547 reg_03.raw = io_apic_read(apic, 3);
1548 spin_unlock_irqrestore(&ioapic_lock, flags);
1551 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1552 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1553 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1554 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1555 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1557 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1558 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1560 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1561 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1564 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1565 * but the value of reg_02 is read as the previous read register
1566 * value, so ignore it if reg_02 == reg_01.
1568 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1569 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1570 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1574 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1575 * or reg_03, but the value of reg_0[23] is read as the previous read
1576 * register value, so ignore it if reg_03 == reg_0[12].
1578 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1579 reg_03.raw != reg_01.raw) {
1580 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1581 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1584 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1586 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1587 " Stat Dmod Deli Vect: \n");
1589 for (i = 0; i <= reg_01.bits.entries; i++) {
1590 struct IO_APIC_route_entry entry;
1592 entry = ioapic_read_entry(apic, i);
1594 printk(KERN_DEBUG " %02x %03X ",
1599 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1604 entry.delivery_status,
1606 entry.delivery_mode,
1611 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1612 for_each_irq_desc(irq, desc) {
1613 struct irq_pin_list *entry;
1617 cfg = desc->chip_data;
1618 entry = cfg->irq_2_pin;
1621 printk(KERN_DEBUG "IRQ%d ", irq);
1623 printk("-> %d:%d", entry->apic, entry->pin);
1626 entry = entry->next;
1631 printk(KERN_INFO ".................................... done.\n");
1636 __apicdebuginit(void) print_APIC_bitfield(int base)
1641 if (apic_verbosity == APIC_QUIET)
1644 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1645 for (i = 0; i < 8; i++) {
1646 v = apic_read(base + i*0x10);
1647 for (j = 0; j < 32; j++) {
1657 __apicdebuginit(void) print_local_APIC(void *dummy)
1659 unsigned int v, ver, maxlvt;
1662 if (apic_verbosity == APIC_QUIET)
1665 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1666 smp_processor_id(), hard_smp_processor_id());
1667 v = apic_read(APIC_ID);
1668 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1669 v = apic_read(APIC_LVR);
1670 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1671 ver = GET_APIC_VERSION(v);
1672 maxlvt = lapic_get_maxlvt();
1674 v = apic_read(APIC_TASKPRI);
1675 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1677 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1678 if (!APIC_XAPIC(ver)) {
1679 v = apic_read(APIC_ARBPRI);
1680 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1681 v & APIC_ARBPRI_MASK);
1683 v = apic_read(APIC_PROCPRI);
1684 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1688 * Remote read supported only in the 82489DX and local APIC for
1689 * Pentium processors.
1691 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1692 v = apic_read(APIC_RRR);
1693 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1696 v = apic_read(APIC_LDR);
1697 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1698 if (!x2apic_enabled()) {
1699 v = apic_read(APIC_DFR);
1700 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1702 v = apic_read(APIC_SPIV);
1703 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1705 printk(KERN_DEBUG "... APIC ISR field:\n");
1706 print_APIC_bitfield(APIC_ISR);
1707 printk(KERN_DEBUG "... APIC TMR field:\n");
1708 print_APIC_bitfield(APIC_TMR);
1709 printk(KERN_DEBUG "... APIC IRR field:\n");
1710 print_APIC_bitfield(APIC_IRR);
1712 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1713 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1714 apic_write(APIC_ESR, 0);
1716 v = apic_read(APIC_ESR);
1717 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1720 icr = apic_icr_read();
1721 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1722 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1724 v = apic_read(APIC_LVTT);
1725 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1727 if (maxlvt > 3) { /* PC is LVT#4. */
1728 v = apic_read(APIC_LVTPC);
1729 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1731 v = apic_read(APIC_LVT0);
1732 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1733 v = apic_read(APIC_LVT1);
1734 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1736 if (maxlvt > 2) { /* ERR is LVT#3. */
1737 v = apic_read(APIC_LVTERR);
1738 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1741 v = apic_read(APIC_TMICT);
1742 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1743 v = apic_read(APIC_TMCCT);
1744 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1745 v = apic_read(APIC_TDCR);
1746 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1750 __apicdebuginit(void) print_all_local_APICs(void)
1755 for_each_online_cpu(cpu)
1756 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1760 __apicdebuginit(void) print_PIC(void)
1763 unsigned long flags;
1765 if (apic_verbosity == APIC_QUIET)
1768 printk(KERN_DEBUG "\nprinting PIC contents\n");
1770 spin_lock_irqsave(&i8259A_lock, flags);
1772 v = inb(0xa1) << 8 | inb(0x21);
1773 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1775 v = inb(0xa0) << 8 | inb(0x20);
1776 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1780 v = inb(0xa0) << 8 | inb(0x20);
1784 spin_unlock_irqrestore(&i8259A_lock, flags);
1786 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1788 v = inb(0x4d1) << 8 | inb(0x4d0);
1789 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1792 __apicdebuginit(int) print_all_ICs(void)
1795 print_all_local_APICs();
1801 fs_initcall(print_all_ICs);
1804 /* Where if anywhere is the i8259 connect in external int mode */
1805 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1807 void __init enable_IO_APIC(void)
1809 union IO_APIC_reg_01 reg_01;
1810 int i8259_apic, i8259_pin;
1812 unsigned long flags;
1814 #ifdef CONFIG_X86_32
1817 for (i = 0; i < MAX_PIRQS; i++)
1818 pirq_entries[i] = -1;
1822 * The number of IO-APIC IRQ registers (== #pins):
1824 for (apic = 0; apic < nr_ioapics; apic++) {
1825 spin_lock_irqsave(&ioapic_lock, flags);
1826 reg_01.raw = io_apic_read(apic, 1);
1827 spin_unlock_irqrestore(&ioapic_lock, flags);
1828 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1830 for(apic = 0; apic < nr_ioapics; apic++) {
1832 /* See if any of the pins is in ExtINT mode */
1833 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1834 struct IO_APIC_route_entry entry;
1835 entry = ioapic_read_entry(apic, pin);
1837 /* If the interrupt line is enabled and in ExtInt mode
1838 * I have found the pin where the i8259 is connected.
1840 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1841 ioapic_i8259.apic = apic;
1842 ioapic_i8259.pin = pin;
1848 /* Look to see what if the MP table has reported the ExtINT */
1849 /* If we could not find the appropriate pin by looking at the ioapic
1850 * the i8259 probably is not connected the ioapic but give the
1851 * mptable a chance anyway.
1853 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1854 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1855 /* Trust the MP table if nothing is setup in the hardware */
1856 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1857 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1858 ioapic_i8259.pin = i8259_pin;
1859 ioapic_i8259.apic = i8259_apic;
1861 /* Complain if the MP table and the hardware disagree */
1862 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1863 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1865 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1869 * Do not trust the IO-APIC being empty at bootup
1875 * Not an __init, needed by the reboot code
1877 void disable_IO_APIC(void)
1880 * Clear the IO-APIC before rebooting:
1885 * If the i8259 is routed through an IOAPIC
1886 * Put that IOAPIC in virtual wire mode
1887 * so legacy interrupts can be delivered.
1889 if (ioapic_i8259.pin != -1) {
1890 struct IO_APIC_route_entry entry;
1892 memset(&entry, 0, sizeof(entry));
1893 entry.mask = 0; /* Enabled */
1894 entry.trigger = 0; /* Edge */
1896 entry.polarity = 0; /* High */
1897 entry.delivery_status = 0;
1898 entry.dest_mode = 0; /* Physical */
1899 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1901 entry.dest = read_apic_id();
1904 * Add it to the IO-APIC irq-routing table:
1906 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1909 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1912 #ifdef CONFIG_X86_32
1914 * function to set the IO-APIC physical IDs based on the
1915 * values stored in the MPC table.
1917 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1920 static void __init setup_ioapic_ids_from_mpc(void)
1922 union IO_APIC_reg_00 reg_00;
1923 physid_mask_t phys_id_present_map;
1926 unsigned char old_id;
1927 unsigned long flags;
1929 if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
1933 * Don't check I/O APIC IDs for xAPIC systems. They have
1934 * no meaning without the serial APIC bus.
1936 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1937 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
1940 * This is broken; anything with a real cpu count has to
1941 * circumvent this idiocy regardless.
1943 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
1946 * Set the IOAPIC ID to the value stored in the MPC table.
1948 for (apic = 0; apic < nr_ioapics; apic++) {
1950 /* Read the register 0 value */
1951 spin_lock_irqsave(&ioapic_lock, flags);
1952 reg_00.raw = io_apic_read(apic, 0);
1953 spin_unlock_irqrestore(&ioapic_lock, flags);
1955 old_id = mp_ioapics[apic].mp_apicid;
1957 if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
1958 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1959 apic, mp_ioapics[apic].mp_apicid);
1960 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1962 mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
1966 * Sanity check, is the ID really free? Every APIC in a
1967 * system must have a unique ID or we get lots of nice
1968 * 'stuck on smp_invalidate_needed IPI wait' messages.
1970 if (check_apicid_used(phys_id_present_map,
1971 mp_ioapics[apic].mp_apicid)) {
1972 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1973 apic, mp_ioapics[apic].mp_apicid);
1974 for (i = 0; i < get_physical_broadcast(); i++)
1975 if (!physid_isset(i, phys_id_present_map))
1977 if (i >= get_physical_broadcast())
1978 panic("Max APIC ID exceeded!\n");
1979 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1981 physid_set(i, phys_id_present_map);
1982 mp_ioapics[apic].mp_apicid = i;
1985 tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
1986 apic_printk(APIC_VERBOSE, "Setting %d in the "
1987 "phys_id_present_map\n",
1988 mp_ioapics[apic].mp_apicid);
1989 physids_or(phys_id_present_map, phys_id_present_map, tmp);
1994 * We need to adjust the IRQ routing table
1995 * if the ID changed.
1997 if (old_id != mp_ioapics[apic].mp_apicid)
1998 for (i = 0; i < mp_irq_entries; i++)
1999 if (mp_irqs[i].mp_dstapic == old_id)
2000 mp_irqs[i].mp_dstapic
2001 = mp_ioapics[apic].mp_apicid;
2004 * Read the right value from the MPC table and
2005 * write it into the ID register.
2007 apic_printk(APIC_VERBOSE, KERN_INFO
2008 "...changing IO-APIC physical APIC ID to %d ...",
2009 mp_ioapics[apic].mp_apicid);
2011 reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
2012 spin_lock_irqsave(&ioapic_lock, flags);
2013 io_apic_write(apic, 0, reg_00.raw);
2014 spin_unlock_irqrestore(&ioapic_lock, flags);
2019 spin_lock_irqsave(&ioapic_lock, flags);
2020 reg_00.raw = io_apic_read(apic, 0);
2021 spin_unlock_irqrestore(&ioapic_lock, flags);
2022 if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
2023 printk("could not set ID!\n");
2025 apic_printk(APIC_VERBOSE, " ok.\n");
2030 int no_timer_check __initdata;
2032 static int __init notimercheck(char *s)
2037 __setup("no_timer_check", notimercheck);
2040 * There is a nasty bug in some older SMP boards, their mptable lies
2041 * about the timer IRQ. We do the following to work around the situation:
2043 * - timer IRQ defaults to IO-APIC IRQ
2044 * - if this function detects that timer IRQs are defunct, then we fall
2045 * back to ISA timer IRQs
2047 static int __init timer_irq_works(void)
2049 unsigned long t1 = jiffies;
2050 unsigned long flags;
2055 local_save_flags(flags);
2057 /* Let ten ticks pass... */
2058 mdelay((10 * 1000) / HZ);
2059 local_irq_restore(flags);
2062 * Expect a few ticks at least, to be sure some possible
2063 * glue logic does not lock up after one or two first
2064 * ticks in a non-ExtINT mode. Also the local APIC
2065 * might have cached one ExtINT interrupt. Finally, at
2066 * least one tick may be lost due to delays.
2070 if (time_after(jiffies, t1 + 4))
2076 * In the SMP+IOAPIC case it might happen that there are an unspecified
2077 * number of pending IRQ events unhandled. These cases are very rare,
2078 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2079 * better to do it this way as thus we do not have to be aware of
2080 * 'pending' interrupts in the IRQ path, except at this point.
2083 * Edge triggered needs to resend any interrupt
2084 * that was delayed but this is now handled in the device
2089 * Starting up a edge-triggered IO-APIC interrupt is
2090 * nasty - we need to make sure that we get the edge.
2091 * If it is already asserted for some reason, we need
2092 * return 1 to indicate that is was pending.
2094 * This is not complete - we should be able to fake
2095 * an edge even if it isn't on the 8259A...
2098 static unsigned int startup_ioapic_irq(unsigned int irq)
2100 int was_pending = 0;
2101 unsigned long flags;
2102 struct irq_cfg *cfg;
2104 spin_lock_irqsave(&ioapic_lock, flags);
2105 if (irq < NR_IRQS_LEGACY) {
2106 disable_8259A_irq(irq);
2107 if (i8259A_irq_pending(irq))
2111 __unmask_IO_APIC_irq(cfg);
2112 spin_unlock_irqrestore(&ioapic_lock, flags);
2117 #ifdef CONFIG_X86_64
2118 static int ioapic_retrigger_irq(unsigned int irq)
2121 struct irq_cfg *cfg = irq_cfg(irq);
2122 unsigned long flags;
2124 spin_lock_irqsave(&vector_lock, flags);
2125 send_IPI_mask(&cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
2126 spin_unlock_irqrestore(&vector_lock, flags);
2131 static int ioapic_retrigger_irq(unsigned int irq)
2133 send_IPI_self(irq_cfg(irq)->vector);
2140 * Level and edge triggered IO-APIC interrupts need different handling,
2141 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2142 * handled with the level-triggered descriptor, but that one has slightly
2143 * more overhead. Level-triggered interrupts cannot be handled with the
2144 * edge-triggered handler, without risking IRQ storms and other ugly
2150 #ifdef CONFIG_INTR_REMAP
2151 static void ir_irq_migration(struct work_struct *work);
2153 static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2156 * Migrate the IO-APIC irq in the presence of intr-remapping.
2158 * For edge triggered, irq migration is a simple atomic update(of vector
2159 * and cpu destination) of IRTE and flush the hardware cache.
2161 * For level triggered, we need to modify the io-apic RTE aswell with the update
2162 * vector information, along with modifying IRTE with vector and destination.
2163 * So irq migration for level triggered is little bit more complex compared to
2164 * edge triggered migration. But the good news is, we use the same algorithm
2165 * for level triggered migration as we have today, only difference being,
2166 * we now initiate the irq migration from process context instead of the
2167 * interrupt context.
2169 * In future, when we do a directed EOI (combined with cpu EOI broadcast
2170 * suppression) to the IO-APIC, level triggered irq migration will also be
2171 * as simple as edge triggered migration and we can do the irq migration
2172 * with a simple atomic update to IO-APIC RTE.
2175 migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2177 struct irq_cfg *cfg;
2180 int modify_ioapic_rte;
2182 unsigned long flags;
2185 cpus_and(tmpmask, *mask, cpu_online_map);
2186 if (cpus_empty(tmpmask))
2190 if (get_irte(irq, &irte))
2193 cfg = desc->chip_data;
2194 if (assign_irq_vector(irq, cfg, mask))
2197 set_extra_move_desc(desc, mask);
2199 cpus_and(tmpmask, cfg->domain, *mask);
2200 dest = cpu_mask_to_apicid(&tmpmask);
2202 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2203 if (modify_ioapic_rte) {
2204 spin_lock_irqsave(&ioapic_lock, flags);
2205 __target_IO_APIC_irq(irq, dest, cfg);
2206 spin_unlock_irqrestore(&ioapic_lock, flags);
2209 irte.vector = cfg->vector;
2210 irte.dest_id = IRTE_DEST(dest);
2213 * Modified the IRTE and flushes the Interrupt entry cache.
2215 modify_irte(irq, &irte);
2217 if (cfg->move_in_progress) {
2218 cpus_and(tmpmask, cfg->old_domain, cpu_online_map);
2219 cfg->move_cleanup_count = cpus_weight(tmpmask);
2220 send_IPI_mask(&tmpmask, IRQ_MOVE_CLEANUP_VECTOR);
2221 cfg->move_in_progress = 0;
2224 desc->affinity = *mask;
2227 static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2230 struct irq_cfg *cfg = desc->chip_data;
2232 mask_IO_APIC_irq_desc(desc);
2234 if (io_apic_level_ack_pending(cfg)) {
2236 * Interrupt in progress. Migrating irq now will change the
2237 * vector information in the IO-APIC RTE and that will confuse
2238 * the EOI broadcast performed by cpu.
2239 * So, delay the irq migration to the next instance.
2241 schedule_delayed_work(&ir_migration_work, 1);
2245 /* everthing is clear. we have right of way */
2246 migrate_ioapic_irq_desc(desc, &desc->pending_mask);
2249 desc->status &= ~IRQ_MOVE_PENDING;
2250 cpus_clear(desc->pending_mask);
2253 unmask_IO_APIC_irq_desc(desc);
2258 static void ir_irq_migration(struct work_struct *work)
2261 struct irq_desc *desc;
2263 for_each_irq_desc(irq, desc) {
2267 if (desc->status & IRQ_MOVE_PENDING) {
2268 unsigned long flags;
2270 spin_lock_irqsave(&desc->lock, flags);
2271 if (!desc->chip->set_affinity ||
2272 !(desc->status & IRQ_MOVE_PENDING)) {
2273 desc->status &= ~IRQ_MOVE_PENDING;
2274 spin_unlock_irqrestore(&desc->lock, flags);
2278 desc->chip->set_affinity(irq, &desc->pending_mask);
2279 spin_unlock_irqrestore(&desc->lock, flags);
2285 * Migrates the IRQ destination in the process context.
2287 static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2288 const struct cpumask *mask)
2290 if (desc->status & IRQ_LEVEL) {
2291 desc->status |= IRQ_MOVE_PENDING;
2292 cpumask_copy(&desc->pending_mask, mask);
2293 migrate_irq_remapped_level_desc(desc);
2297 migrate_ioapic_irq_desc(desc, mask);
2299 static void set_ir_ioapic_affinity_irq(unsigned int irq,
2300 const struct cpumask *mask)
2302 struct irq_desc *desc = irq_to_desc(irq);
2304 set_ir_ioapic_affinity_irq_desc(desc, mask);
2308 asmlinkage void smp_irq_move_cleanup_interrupt(void)
2310 unsigned vector, me;
2312 #ifdef CONFIG_X86_64
2317 me = smp_processor_id();
2318 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2320 struct irq_desc *desc;
2321 struct irq_cfg *cfg;
2322 irq = __get_cpu_var(vector_irq)[vector];
2327 desc = irq_to_desc(irq);
2332 spin_lock(&desc->lock);
2333 if (!cfg->move_cleanup_count)
2336 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
2339 __get_cpu_var(vector_irq)[vector] = -1;
2340 cfg->move_cleanup_count--;
2342 spin_unlock(&desc->lock);
2348 static void irq_complete_move(struct irq_desc **descp)
2350 struct irq_desc *desc = *descp;
2351 struct irq_cfg *cfg = desc->chip_data;
2352 unsigned vector, me;
2354 if (likely(!cfg->move_in_progress))
2357 vector = ~get_irq_regs()->orig_ax;
2358 me = smp_processor_id();
2359 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
2360 cpumask_t cleanup_mask;
2362 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2363 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2364 send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2365 cfg->move_in_progress = 0;
2369 static inline void irq_complete_move(struct irq_desc **descp) {}
2372 #ifdef CONFIG_INTR_REMAP
2373 static void ack_x2apic_level(unsigned int irq)
2378 static void ack_x2apic_edge(unsigned int irq)
2385 static void ack_apic_edge(unsigned int irq)
2387 struct irq_desc *desc = irq_to_desc(irq);
2389 irq_complete_move(&desc);
2390 move_native_irq(irq);
2394 atomic_t irq_mis_count;
2396 static void ack_apic_level(unsigned int irq)
2398 struct irq_desc *desc = irq_to_desc(irq);
2400 #ifdef CONFIG_X86_32
2404 struct irq_cfg *cfg;
2405 int do_unmask_irq = 0;
2407 irq_complete_move(&desc);
2408 #ifdef CONFIG_GENERIC_PENDING_IRQ
2409 /* If we are moving the irq we need to mask it */
2410 if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2412 mask_IO_APIC_irq_desc(desc);
2416 #ifdef CONFIG_X86_32
2418 * It appears there is an erratum which affects at least version 0x11
2419 * of I/O APIC (that's the 82093AA and cores integrated into various
2420 * chipsets). Under certain conditions a level-triggered interrupt is
2421 * erroneously delivered as edge-triggered one but the respective IRR
2422 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2423 * message but it will never arrive and further interrupts are blocked
2424 * from the source. The exact reason is so far unknown, but the
2425 * phenomenon was observed when two consecutive interrupt requests
2426 * from a given source get delivered to the same CPU and the source is
2427 * temporarily disabled in between.
2429 * A workaround is to simulate an EOI message manually. We achieve it
2430 * by setting the trigger mode to edge and then to level when the edge
2431 * trigger mode gets detected in the TMR of a local APIC for a
2432 * level-triggered interrupt. We mask the source for the time of the
2433 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2434 * The idea is from Manfred Spraul. --macro
2436 cfg = desc->chip_data;
2439 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2443 * We must acknowledge the irq before we move it or the acknowledge will
2444 * not propagate properly.
2448 /* Now we can move and renable the irq */
2449 if (unlikely(do_unmask_irq)) {
2450 /* Only migrate the irq if the ack has been received.
2452 * On rare occasions the broadcast level triggered ack gets
2453 * delayed going to ioapics, and if we reprogram the
2454 * vector while Remote IRR is still set the irq will never
2457 * To prevent this scenario we read the Remote IRR bit
2458 * of the ioapic. This has two effects.
2459 * - On any sane system the read of the ioapic will
2460 * flush writes (and acks) going to the ioapic from
2462 * - We get to see if the ACK has actually been delivered.
2464 * Based on failed experiments of reprogramming the
2465 * ioapic entry from outside of irq context starting
2466 * with masking the ioapic entry and then polling until
2467 * Remote IRR was clear before reprogramming the
2468 * ioapic I don't trust the Remote IRR bit to be
2469 * completey accurate.
2471 * However there appears to be no other way to plug
2472 * this race, so if the Remote IRR bit is not
2473 * accurate and is causing problems then it is a hardware bug
2474 * and you can go talk to the chipset vendor about it.
2476 cfg = desc->chip_data;
2477 if (!io_apic_level_ack_pending(cfg))
2478 move_masked_irq(irq);
2479 unmask_IO_APIC_irq_desc(desc);
2482 #ifdef CONFIG_X86_32
2483 if (!(v & (1 << (i & 0x1f)))) {
2484 atomic_inc(&irq_mis_count);
2485 spin_lock(&ioapic_lock);
2486 __mask_and_edge_IO_APIC_irq(cfg);
2487 __unmask_and_level_IO_APIC_irq(cfg);
2488 spin_unlock(&ioapic_lock);
2493 static struct irq_chip ioapic_chip __read_mostly = {
2495 .startup = startup_ioapic_irq,
2496 .mask = mask_IO_APIC_irq,
2497 .unmask = unmask_IO_APIC_irq,
2498 .ack = ack_apic_edge,
2499 .eoi = ack_apic_level,
2501 .set_affinity = set_ioapic_affinity_irq,
2503 .retrigger = ioapic_retrigger_irq,
2506 #ifdef CONFIG_INTR_REMAP
2507 static struct irq_chip ir_ioapic_chip __read_mostly = {
2508 .name = "IR-IO-APIC",
2509 .startup = startup_ioapic_irq,
2510 .mask = mask_IO_APIC_irq,
2511 .unmask = unmask_IO_APIC_irq,
2512 .ack = ack_x2apic_edge,
2513 .eoi = ack_x2apic_level,
2515 .set_affinity = set_ir_ioapic_affinity_irq,
2517 .retrigger = ioapic_retrigger_irq,
2521 static inline void init_IO_APIC_traps(void)
2524 struct irq_desc *desc;
2525 struct irq_cfg *cfg;
2528 * NOTE! The local APIC isn't very good at handling
2529 * multiple interrupts at the same interrupt level.
2530 * As the interrupt level is determined by taking the
2531 * vector number and shifting that right by 4, we
2532 * want to spread these out a bit so that they don't
2533 * all fall in the same interrupt level.
2535 * Also, we've got to be careful not to trash gate
2536 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2538 for_each_irq_desc(irq, desc) {
2542 cfg = desc->chip_data;
2543 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2545 * Hmm.. We don't have an entry for this,
2546 * so default to an old-fashioned 8259
2547 * interrupt if we can..
2549 if (irq < NR_IRQS_LEGACY)
2550 make_8259A_irq(irq);
2552 /* Strange. Oh, well.. */
2553 desc->chip = &no_irq_chip;
2559 * The local APIC irq-chip implementation:
2562 static void mask_lapic_irq(unsigned int irq)
2566 v = apic_read(APIC_LVT0);
2567 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2570 static void unmask_lapic_irq(unsigned int irq)
2574 v = apic_read(APIC_LVT0);
2575 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2578 static void ack_lapic_irq(unsigned int irq)
2583 static struct irq_chip lapic_chip __read_mostly = {
2584 .name = "local-APIC",
2585 .mask = mask_lapic_irq,
2586 .unmask = unmask_lapic_irq,
2587 .ack = ack_lapic_irq,
2590 static void lapic_register_intr(int irq, struct irq_desc *desc)
2592 desc->status &= ~IRQ_LEVEL;
2593 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2597 static void __init setup_nmi(void)
2600 * Dirty trick to enable the NMI watchdog ...
2601 * We put the 8259A master into AEOI mode and
2602 * unmask on all local APICs LVT0 as NMI.
2604 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2605 * is from Maciej W. Rozycki - so we do not have to EOI from
2606 * the NMI handler or the timer interrupt.
2608 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2610 enable_NMI_through_LVT0();
2612 apic_printk(APIC_VERBOSE, " done.\n");
2616 * This looks a bit hackish but it's about the only one way of sending
2617 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2618 * not support the ExtINT mode, unfortunately. We need to send these
2619 * cycles as some i82489DX-based boards have glue logic that keeps the
2620 * 8259A interrupt line asserted until INTA. --macro
2622 static inline void __init unlock_ExtINT_logic(void)
2625 struct IO_APIC_route_entry entry0, entry1;
2626 unsigned char save_control, save_freq_select;
2628 pin = find_isa_irq_pin(8, mp_INT);
2633 apic = find_isa_irq_apic(8, mp_INT);
2639 entry0 = ioapic_read_entry(apic, pin);
2640 clear_IO_APIC_pin(apic, pin);
2642 memset(&entry1, 0, sizeof(entry1));
2644 entry1.dest_mode = 0; /* physical delivery */
2645 entry1.mask = 0; /* unmask IRQ now */
2646 entry1.dest = hard_smp_processor_id();
2647 entry1.delivery_mode = dest_ExtINT;
2648 entry1.polarity = entry0.polarity;
2652 ioapic_write_entry(apic, pin, entry1);
2654 save_control = CMOS_READ(RTC_CONTROL);
2655 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2656 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2658 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2663 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2667 CMOS_WRITE(save_control, RTC_CONTROL);
2668 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2669 clear_IO_APIC_pin(apic, pin);
2671 ioapic_write_entry(apic, pin, entry0);
2674 static int disable_timer_pin_1 __initdata;
2675 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2676 static int __init disable_timer_pin_setup(char *arg)
2678 disable_timer_pin_1 = 1;
2681 early_param("disable_timer_pin_1", disable_timer_pin_setup);
2683 int timer_through_8259 __initdata;
2686 * This code may look a bit paranoid, but it's supposed to cooperate with
2687 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2688 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2689 * fanatically on his truly buggy board.
2691 * FIXME: really need to revamp this for all platforms.
2693 static inline void __init check_timer(void)
2695 struct irq_desc *desc = irq_to_desc(0);
2696 struct irq_cfg *cfg = desc->chip_data;
2697 int cpu = boot_cpu_id;
2698 int apic1, pin1, apic2, pin2;
2699 unsigned long flags;
2703 local_irq_save(flags);
2705 ver = apic_read(APIC_LVR);
2706 ver = GET_APIC_VERSION(ver);
2709 * get/set the timer IRQ vector:
2711 disable_8259A_irq(0);
2712 assign_irq_vector(0, cfg, TARGET_CPUS);
2715 * As IRQ0 is to be enabled in the 8259A, the virtual
2716 * wire has to be disabled in the local APIC. Also
2717 * timer interrupts need to be acknowledged manually in
2718 * the 8259A for the i82489DX when using the NMI
2719 * watchdog as that APIC treats NMIs as level-triggered.
2720 * The AEOI mode will finish them in the 8259A
2723 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2725 #ifdef CONFIG_X86_32
2726 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2729 pin1 = find_isa_irq_pin(0, mp_INT);
2730 apic1 = find_isa_irq_apic(0, mp_INT);
2731 pin2 = ioapic_i8259.pin;
2732 apic2 = ioapic_i8259.apic;
2734 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2735 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2736 cfg->vector, apic1, pin1, apic2, pin2);
2739 * Some BIOS writers are clueless and report the ExtINTA
2740 * I/O APIC input from the cascaded 8259A as the timer
2741 * interrupt input. So just in case, if only one pin
2742 * was found above, try it both directly and through the
2746 #ifdef CONFIG_INTR_REMAP
2747 if (intr_remapping_enabled)
2748 panic("BIOS bug: timer not connected to IO-APIC");
2753 } else if (pin2 == -1) {
2760 * Ok, does IRQ0 through the IOAPIC work?
2763 add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
2764 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2766 unmask_IO_APIC_irq_desc(desc);
2767 if (timer_irq_works()) {
2768 if (nmi_watchdog == NMI_IO_APIC) {
2770 enable_8259A_irq(0);
2772 if (disable_timer_pin_1 > 0)
2773 clear_IO_APIC_pin(0, pin1);
2776 #ifdef CONFIG_INTR_REMAP
2777 if (intr_remapping_enabled)
2778 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2780 clear_IO_APIC_pin(apic1, pin1);
2782 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2783 "8254 timer not connected to IO-APIC\n");
2785 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2786 "(IRQ0) through the 8259A ...\n");
2787 apic_printk(APIC_QUIET, KERN_INFO
2788 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2790 * legacy devices should be connected to IO APIC #0
2792 replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
2793 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2794 unmask_IO_APIC_irq_desc(desc);
2795 enable_8259A_irq(0);
2796 if (timer_irq_works()) {
2797 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2798 timer_through_8259 = 1;
2799 if (nmi_watchdog == NMI_IO_APIC) {
2800 disable_8259A_irq(0);
2802 enable_8259A_irq(0);
2807 * Cleanup, just in case ...
2809 disable_8259A_irq(0);
2810 clear_IO_APIC_pin(apic2, pin2);
2811 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2814 if (nmi_watchdog == NMI_IO_APIC) {
2815 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2816 "through the IO-APIC - disabling NMI Watchdog!\n");
2817 nmi_watchdog = NMI_NONE;
2819 #ifdef CONFIG_X86_32
2823 apic_printk(APIC_QUIET, KERN_INFO
2824 "...trying to set up timer as Virtual Wire IRQ...\n");
2826 lapic_register_intr(0, desc);
2827 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2828 enable_8259A_irq(0);
2830 if (timer_irq_works()) {
2831 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2834 disable_8259A_irq(0);
2835 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2836 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2838 apic_printk(APIC_QUIET, KERN_INFO
2839 "...trying to set up timer as ExtINT IRQ...\n");
2843 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2845 unlock_ExtINT_logic();
2847 if (timer_irq_works()) {
2848 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2851 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2852 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2853 "report. Then try booting with the 'noapic' option.\n");
2855 local_irq_restore(flags);
2859 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2860 * to devices. However there may be an I/O APIC pin available for
2861 * this interrupt regardless. The pin may be left unconnected, but
2862 * typically it will be reused as an ExtINT cascade interrupt for
2863 * the master 8259A. In the MPS case such a pin will normally be
2864 * reported as an ExtINT interrupt in the MP table. With ACPI
2865 * there is no provision for ExtINT interrupts, and in the absence
2866 * of an override it would be treated as an ordinary ISA I/O APIC
2867 * interrupt, that is edge-triggered and unmasked by default. We
2868 * used to do this, but it caused problems on some systems because
2869 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2870 * the same ExtINT cascade interrupt to drive the local APIC of the
2871 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2872 * the I/O APIC in all cases now. No actual device should request
2873 * it anyway. --macro
2875 #define PIC_IRQS (1 << PIC_CASCADE_IR)
2877 void __init setup_IO_APIC(void)
2880 #ifdef CONFIG_X86_32
2884 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2888 io_apic_irqs = ~PIC_IRQS;
2890 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2892 * Set up IO-APIC IRQ routing.
2894 #ifdef CONFIG_X86_32
2896 setup_ioapic_ids_from_mpc();
2899 setup_IO_APIC_irqs();
2900 init_IO_APIC_traps();
2905 * Called after all the initialization is done. If we didnt find any
2906 * APIC bugs then we can allow the modify fast path
2909 static int __init io_apic_bug_finalize(void)
2911 if (sis_apic_bug == -1)
2916 late_initcall(io_apic_bug_finalize);
2918 struct sysfs_ioapic_data {
2919 struct sys_device dev;
2920 struct IO_APIC_route_entry entry[0];
2922 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
2924 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
2926 struct IO_APIC_route_entry *entry;
2927 struct sysfs_ioapic_data *data;
2930 data = container_of(dev, struct sysfs_ioapic_data, dev);
2931 entry = data->entry;
2932 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2933 *entry = ioapic_read_entry(dev->id, i);
2938 static int ioapic_resume(struct sys_device *dev)
2940 struct IO_APIC_route_entry *entry;
2941 struct sysfs_ioapic_data *data;
2942 unsigned long flags;
2943 union IO_APIC_reg_00 reg_00;
2946 data = container_of(dev, struct sysfs_ioapic_data, dev);
2947 entry = data->entry;
2949 spin_lock_irqsave(&ioapic_lock, flags);
2950 reg_00.raw = io_apic_read(dev->id, 0);
2951 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
2952 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
2953 io_apic_write(dev->id, 0, reg_00.raw);
2955 spin_unlock_irqrestore(&ioapic_lock, flags);
2956 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2957 ioapic_write_entry(dev->id, i, entry[i]);
2962 static struct sysdev_class ioapic_sysdev_class = {
2964 .suspend = ioapic_suspend,
2965 .resume = ioapic_resume,
2968 static int __init ioapic_init_sysfs(void)
2970 struct sys_device * dev;
2973 error = sysdev_class_register(&ioapic_sysdev_class);
2977 for (i = 0; i < nr_ioapics; i++ ) {
2978 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
2979 * sizeof(struct IO_APIC_route_entry);
2980 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
2981 if (!mp_ioapic_data[i]) {
2982 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2985 dev = &mp_ioapic_data[i]->dev;
2987 dev->cls = &ioapic_sysdev_class;
2988 error = sysdev_register(dev);
2990 kfree(mp_ioapic_data[i]);
2991 mp_ioapic_data[i] = NULL;
2992 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3000 device_initcall(ioapic_init_sysfs);
3003 * Dynamic irq allocate and deallocation
3005 unsigned int create_irq_nr(unsigned int irq_want)
3007 /* Allocate an unused irq */
3010 unsigned long flags;
3011 struct irq_cfg *cfg_new = NULL;
3012 int cpu = boot_cpu_id;
3013 struct irq_desc *desc_new = NULL;
3016 spin_lock_irqsave(&vector_lock, flags);
3017 for (new = irq_want; new < NR_IRQS; new++) {
3018 if (platform_legacy_irq(new))
3021 desc_new = irq_to_desc_alloc_cpu(new, cpu);
3023 printk(KERN_INFO "can not get irq_desc for %d\n", new);
3026 cfg_new = desc_new->chip_data;
3028 if (cfg_new->vector != 0)
3030 if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0)
3034 spin_unlock_irqrestore(&vector_lock, flags);
3037 dynamic_irq_init(irq);
3038 /* restore it, in case dynamic_irq_init clear it */
3040 desc_new->chip_data = cfg_new;
3045 static int nr_irqs_gsi = NR_IRQS_LEGACY;
3046 int create_irq(void)
3048 unsigned int irq_want;
3051 irq_want = nr_irqs_gsi;
3052 irq = create_irq_nr(irq_want);
3060 void destroy_irq(unsigned int irq)
3062 unsigned long flags;
3063 struct irq_cfg *cfg;
3064 struct irq_desc *desc;
3066 /* store it, in case dynamic_irq_cleanup clear it */
3067 desc = irq_to_desc(irq);
3068 cfg = desc->chip_data;
3069 dynamic_irq_cleanup(irq);
3070 /* connect back irq_cfg */
3072 desc->chip_data = cfg;
3074 #ifdef CONFIG_INTR_REMAP
3077 spin_lock_irqsave(&vector_lock, flags);
3078 __clear_irq_vector(irq, cfg);
3079 spin_unlock_irqrestore(&vector_lock, flags);
3083 * MSI message composition
3085 #ifdef CONFIG_PCI_MSI
3086 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
3088 struct irq_cfg *cfg;
3095 err = assign_irq_vector(irq, cfg, &tmp);
3099 cpus_and(tmp, cfg->domain, tmp);
3100 dest = cpu_mask_to_apicid(&tmp);
3102 #ifdef CONFIG_INTR_REMAP
3103 if (irq_remapped(irq)) {
3108 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
3109 BUG_ON(ir_index == -1);
3111 memset (&irte, 0, sizeof(irte));
3114 irte.dst_mode = INT_DEST_MODE;
3115 irte.trigger_mode = 0; /* edge */
3116 irte.dlvry_mode = INT_DELIVERY_MODE;
3117 irte.vector = cfg->vector;
3118 irte.dest_id = IRTE_DEST(dest);
3120 modify_irte(irq, &irte);
3122 msg->address_hi = MSI_ADDR_BASE_HI;
3123 msg->data = sub_handle;
3124 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
3126 MSI_ADDR_IR_INDEX1(ir_index) |
3127 MSI_ADDR_IR_INDEX2(ir_index);
3131 msg->address_hi = MSI_ADDR_BASE_HI;
3134 ((INT_DEST_MODE == 0) ?
3135 MSI_ADDR_DEST_MODE_PHYSICAL:
3136 MSI_ADDR_DEST_MODE_LOGICAL) |
3137 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3138 MSI_ADDR_REDIRECTION_CPU:
3139 MSI_ADDR_REDIRECTION_LOWPRI) |
3140 MSI_ADDR_DEST_ID(dest);
3143 MSI_DATA_TRIGGER_EDGE |
3144 MSI_DATA_LEVEL_ASSERT |
3145 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3146 MSI_DATA_DELIVERY_FIXED:
3147 MSI_DATA_DELIVERY_LOWPRI) |
3148 MSI_DATA_VECTOR(cfg->vector);
3154 static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3156 struct irq_desc *desc = irq_to_desc(irq);
3157 struct irq_cfg *cfg;
3162 if (!cpumask_intersects(mask, cpu_online_mask))
3165 cfg = desc->chip_data;
3166 if (assign_irq_vector(irq, cfg, mask))
3169 set_extra_move_desc(desc, mask);
3171 cpumask_and(&tmp, &cfg->domain, mask);
3172 dest = cpu_mask_to_apicid(&tmp);
3174 read_msi_msg_desc(desc, &msg);
3176 msg.data &= ~MSI_DATA_VECTOR_MASK;
3177 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3178 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3179 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3181 write_msi_msg_desc(desc, &msg);
3182 cpumask_copy(&desc->affinity, mask);
3184 #ifdef CONFIG_INTR_REMAP
3186 * Migrate the MSI irq to another cpumask. This migration is
3187 * done in the process context using interrupt-remapping hardware.
3190 ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3192 struct irq_desc *desc = irq_to_desc(irq);
3193 struct irq_cfg *cfg;
3195 cpumask_t tmp, cleanup_mask;
3198 if (!cpumask_intersects(mask, cpu_online_mask))
3201 if (get_irte(irq, &irte))
3204 cfg = desc->chip_data;
3205 if (assign_irq_vector(irq, cfg, mask))
3208 set_extra_move_desc(desc, mask);
3210 cpumask_and(&tmp, &cfg->domain, mask);
3211 dest = cpu_mask_to_apicid(&tmp);
3213 irte.vector = cfg->vector;
3214 irte.dest_id = IRTE_DEST(dest);
3217 * atomically update the IRTE with the new destination and vector.
3219 modify_irte(irq, &irte);
3222 * After this point, all the interrupts will start arriving
3223 * at the new destination. So, time to cleanup the previous
3224 * vector allocation.
3226 if (cfg->move_in_progress) {
3227 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
3228 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
3229 send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
3230 cfg->move_in_progress = 0;
3233 cpumask_copy(&desc->affinity, mask);
3237 #endif /* CONFIG_SMP */
3240 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3241 * which implement the MSI or MSI-X Capability Structure.
3243 static struct irq_chip msi_chip = {
3245 .unmask = unmask_msi_irq,
3246 .mask = mask_msi_irq,
3247 .ack = ack_apic_edge,
3249 .set_affinity = set_msi_irq_affinity,
3251 .retrigger = ioapic_retrigger_irq,
3254 #ifdef CONFIG_INTR_REMAP
3255 static struct irq_chip msi_ir_chip = {
3256 .name = "IR-PCI-MSI",
3257 .unmask = unmask_msi_irq,
3258 .mask = mask_msi_irq,
3259 .ack = ack_x2apic_edge,
3261 .set_affinity = ir_set_msi_irq_affinity,
3263 .retrigger = ioapic_retrigger_irq,
3267 * Map the PCI dev to the corresponding remapping hardware unit
3268 * and allocate 'nvec' consecutive interrupt-remapping table entries
3271 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3273 struct intel_iommu *iommu;
3276 iommu = map_dev_to_ir(dev);
3279 "Unable to map PCI %s to iommu\n", pci_name(dev));
3283 index = alloc_irte(iommu, irq, nvec);
3286 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3294 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3299 ret = msi_compose_msg(dev, irq, &msg);
3303 set_irq_msi(irq, msidesc);
3304 write_msi_msg(irq, &msg);
3306 #ifdef CONFIG_INTR_REMAP
3307 if (irq_remapped(irq)) {
3308 struct irq_desc *desc = irq_to_desc(irq);
3310 * irq migration in process context
3312 desc->status |= IRQ_MOVE_PCNTXT;
3313 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3316 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3318 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
3323 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
3327 unsigned int irq_want;
3329 irq_want = nr_irqs_gsi;
3330 irq = create_irq_nr(irq_want);
3334 #ifdef CONFIG_INTR_REMAP
3335 if (!intr_remapping_enabled)
3338 ret = msi_alloc_irte(dev, irq, 1);
3343 ret = setup_msi_irq(dev, msidesc, irq);
3350 #ifdef CONFIG_INTR_REMAP
3357 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3360 int ret, sub_handle;
3361 struct msi_desc *msidesc;
3362 unsigned int irq_want;
3364 #ifdef CONFIG_INTR_REMAP
3365 struct intel_iommu *iommu = 0;
3369 irq_want = nr_irqs_gsi;
3371 list_for_each_entry(msidesc, &dev->msi_list, list) {
3372 irq = create_irq_nr(irq_want);
3376 #ifdef CONFIG_INTR_REMAP
3377 if (!intr_remapping_enabled)
3382 * allocate the consecutive block of IRTE's
3385 index = msi_alloc_irte(dev, irq, nvec);
3391 iommu = map_dev_to_ir(dev);
3397 * setup the mapping between the irq and the IRTE
3398 * base index, the sub_handle pointing to the
3399 * appropriate interrupt remap table entry.
3401 set_irte_irq(irq, iommu, index, sub_handle);
3405 ret = setup_msi_irq(dev, msidesc, irq);
3417 void arch_teardown_msi_irq(unsigned int irq)
3424 static void dmar_msi_set_affinity(unsigned int irq, const cpumask_t *mask)
3426 struct irq_desc *desc = irq_to_desc(irq);
3427 struct irq_cfg *cfg;
3432 if (!cpumask_intersects(mask, cpu_online_mask))
3435 cfg = desc->chip_data;
3436 if (assign_irq_vector(irq, cfg, mask))
3439 set_extra_move_desc(desc, mask);
3441 cpumask_and(&tmp, &cfg->domain, mask);
3442 dest = cpu_mask_to_apicid(&tmp);
3444 dmar_msi_read(irq, &msg);
3446 msg.data &= ~MSI_DATA_VECTOR_MASK;
3447 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3448 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3449 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3451 dmar_msi_write(irq, &msg);
3452 cpumask_copy(&desc->affinity, mask);
3455 #endif /* CONFIG_SMP */
3457 struct irq_chip dmar_msi_type = {
3459 .unmask = dmar_msi_unmask,
3460 .mask = dmar_msi_mask,
3461 .ack = ack_apic_edge,
3463 .set_affinity = dmar_msi_set_affinity,
3465 .retrigger = ioapic_retrigger_irq,
3468 int arch_setup_dmar_msi(unsigned int irq)
3473 ret = msi_compose_msg(NULL, irq, &msg);
3476 dmar_msi_write(irq, &msg);
3477 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3483 #ifdef CONFIG_HPET_TIMER
3486 static void hpet_msi_set_affinity(unsigned int irq, const cpumask_t *mask)
3488 struct irq_desc *desc = irq_to_desc(irq);
3489 struct irq_cfg *cfg;
3494 if (!cpumask_intersects(mask, cpu_online_mask))
3497 cfg = desc->chip_data;
3498 if (assign_irq_vector(irq, cfg, mask))
3501 set_extra_move_desc(desc, mask);
3503 cpumask_and(&tmp, &cfg->domain, mask);
3504 dest = cpu_mask_to_apicid(&tmp);
3506 hpet_msi_read(irq, &msg);
3508 msg.data &= ~MSI_DATA_VECTOR_MASK;
3509 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3510 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3511 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3513 hpet_msi_write(irq, &msg);
3514 cpumask_copy(&desc->affinity, mask);
3517 #endif /* CONFIG_SMP */
3519 struct irq_chip hpet_msi_type = {
3521 .unmask = hpet_msi_unmask,
3522 .mask = hpet_msi_mask,
3523 .ack = ack_apic_edge,
3525 .set_affinity = hpet_msi_set_affinity,
3527 .retrigger = ioapic_retrigger_irq,
3530 int arch_setup_hpet_msi(unsigned int irq)
3535 ret = msi_compose_msg(NULL, irq, &msg);
3539 hpet_msi_write(irq, &msg);
3540 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
3547 #endif /* CONFIG_PCI_MSI */
3549 * Hypertransport interrupt support
3551 #ifdef CONFIG_HT_IRQ
3555 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3557 struct ht_irq_msg msg;
3558 fetch_ht_irq_msg(irq, &msg);
3560 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3561 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3563 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3564 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3566 write_ht_irq_msg(irq, &msg);
3569 static void set_ht_irq_affinity(unsigned int irq, const cpumask_t *mask)
3571 struct irq_desc *desc = irq_to_desc(irq);
3572 struct irq_cfg *cfg;
3576 if (!cpumask_intersects(mask, cpu_online_mask))
3579 cfg = desc->chip_data;
3580 if (assign_irq_vector(irq, cfg, mask))
3583 set_extra_move_desc(desc, mask);
3585 cpumask_and(&tmp, &cfg->domain, mask);
3586 dest = cpu_mask_to_apicid(&tmp);
3588 target_ht_irq(irq, dest, cfg->vector);
3589 cpumask_copy(&desc->affinity, mask);
3594 static struct irq_chip ht_irq_chip = {
3596 .mask = mask_ht_irq,
3597 .unmask = unmask_ht_irq,
3598 .ack = ack_apic_edge,
3600 .set_affinity = set_ht_irq_affinity,
3602 .retrigger = ioapic_retrigger_irq,
3605 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3607 struct irq_cfg *cfg;
3612 err = assign_irq_vector(irq, cfg, TARGET_CPUS);
3614 struct ht_irq_msg msg;
3617 cpus_and(tmp, cfg->domain, tmp);
3618 dest = cpu_mask_to_apicid(&tmp);
3620 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3624 HT_IRQ_LOW_DEST_ID(dest) |
3625 HT_IRQ_LOW_VECTOR(cfg->vector) |
3626 ((INT_DEST_MODE == 0) ?
3627 HT_IRQ_LOW_DM_PHYSICAL :
3628 HT_IRQ_LOW_DM_LOGICAL) |
3629 HT_IRQ_LOW_RQEOI_EDGE |
3630 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3631 HT_IRQ_LOW_MT_FIXED :
3632 HT_IRQ_LOW_MT_ARBITRATED) |
3633 HT_IRQ_LOW_IRQ_MASKED;
3635 write_ht_irq_msg(irq, &msg);
3637 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3638 handle_edge_irq, "edge");
3640 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3644 #endif /* CONFIG_HT_IRQ */
3646 #ifdef CONFIG_X86_64
3648 * Re-target the irq to the specified CPU and enable the specified MMR located
3649 * on the specified blade to allow the sending of MSIs to the specified CPU.
3651 int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3652 unsigned long mmr_offset)
3654 const cpumask_t *eligible_cpu = &cpumask_of_cpu(cpu);
3655 struct irq_cfg *cfg;
3657 unsigned long mmr_value;
3658 struct uv_IO_APIC_route_entry *entry;
3659 unsigned long flags;
3664 err = assign_irq_vector(irq, cfg, eligible_cpu);
3668 spin_lock_irqsave(&vector_lock, flags);
3669 set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
3671 spin_unlock_irqrestore(&vector_lock, flags);
3674 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3675 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3677 entry->vector = cfg->vector;
3678 entry->delivery_mode = INT_DELIVERY_MODE;
3679 entry->dest_mode = INT_DEST_MODE;
3680 entry->polarity = 0;
3683 entry->dest = cpu_mask_to_apicid(eligible_cpu);
3685 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3686 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3692 * Disable the specified MMR located on the specified blade so that MSIs are
3693 * longer allowed to be sent.
3695 void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset)
3697 unsigned long mmr_value;
3698 struct uv_IO_APIC_route_entry *entry;
3702 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3703 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3707 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3708 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3710 #endif /* CONFIG_X86_64 */
3712 int __init io_apic_get_redir_entries (int ioapic)
3714 union IO_APIC_reg_01 reg_01;
3715 unsigned long flags;
3717 spin_lock_irqsave(&ioapic_lock, flags);
3718 reg_01.raw = io_apic_read(ioapic, 1);
3719 spin_unlock_irqrestore(&ioapic_lock, flags);
3721 return reg_01.bits.entries;
3724 void __init probe_nr_irqs_gsi(void)
3729 for (idx = 0; idx < nr_ioapics; idx++)
3730 nr += io_apic_get_redir_entries(idx) + 1;
3732 if (nr > nr_irqs_gsi)
3736 /* --------------------------------------------------------------------------
3737 ACPI-based IOAPIC Configuration
3738 -------------------------------------------------------------------------- */
3742 #ifdef CONFIG_X86_32
3743 int __init io_apic_get_unique_id(int ioapic, int apic_id)
3745 union IO_APIC_reg_00 reg_00;
3746 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3748 unsigned long flags;
3752 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3753 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3754 * supports up to 16 on one shared APIC bus.
3756 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3757 * advantage of new APIC bus architecture.
3760 if (physids_empty(apic_id_map))
3761 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
3763 spin_lock_irqsave(&ioapic_lock, flags);
3764 reg_00.raw = io_apic_read(ioapic, 0);
3765 spin_unlock_irqrestore(&ioapic_lock, flags);
3767 if (apic_id >= get_physical_broadcast()) {
3768 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3769 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3770 apic_id = reg_00.bits.ID;
3774 * Every APIC in a system must have a unique ID or we get lots of nice
3775 * 'stuck on smp_invalidate_needed IPI wait' messages.
3777 if (check_apicid_used(apic_id_map, apic_id)) {
3779 for (i = 0; i < get_physical_broadcast(); i++) {
3780 if (!check_apicid_used(apic_id_map, i))
3784 if (i == get_physical_broadcast())
3785 panic("Max apic_id exceeded!\n");
3787 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3788 "trying %d\n", ioapic, apic_id, i);
3793 tmp = apicid_to_cpu_present(apic_id);
3794 physids_or(apic_id_map, apic_id_map, tmp);
3796 if (reg_00.bits.ID != apic_id) {
3797 reg_00.bits.ID = apic_id;
3799 spin_lock_irqsave(&ioapic_lock, flags);
3800 io_apic_write(ioapic, 0, reg_00.raw);
3801 reg_00.raw = io_apic_read(ioapic, 0);
3802 spin_unlock_irqrestore(&ioapic_lock, flags);
3805 if (reg_00.bits.ID != apic_id) {
3806 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
3811 apic_printk(APIC_VERBOSE, KERN_INFO
3812 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
3817 int __init io_apic_get_version(int ioapic)
3819 union IO_APIC_reg_01 reg_01;
3820 unsigned long flags;
3822 spin_lock_irqsave(&ioapic_lock, flags);
3823 reg_01.raw = io_apic_read(ioapic, 1);
3824 spin_unlock_irqrestore(&ioapic_lock, flags);
3826 return reg_01.bits.version;
3830 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
3832 struct irq_desc *desc;
3833 struct irq_cfg *cfg;
3834 int cpu = boot_cpu_id;
3836 if (!IO_APIC_IRQ(irq)) {
3837 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3842 desc = irq_to_desc_alloc_cpu(irq, cpu);
3844 printk(KERN_INFO "can not get irq_desc %d\n", irq);
3849 * IRQs < 16 are already in the irq_2_pin[] map
3851 if (irq >= NR_IRQS_LEGACY) {
3852 cfg = desc->chip_data;
3853 add_pin_to_irq_cpu(cfg, cpu, ioapic, pin);
3856 setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity);
3862 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3866 if (skip_ioapic_setup)
3869 for (i = 0; i < mp_irq_entries; i++)
3870 if (mp_irqs[i].mp_irqtype == mp_INT &&
3871 mp_irqs[i].mp_srcbusirq == bus_irq)
3873 if (i >= mp_irq_entries)
3876 *trigger = irq_trigger(i);
3877 *polarity = irq_polarity(i);
3881 #endif /* CONFIG_ACPI */
3884 * This function currently is only a helper for the i386 smp boot process where
3885 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3886 * so mask in all cases should simply be TARGET_CPUS
3889 void __init setup_ioapic_dest(void)
3891 int pin, ioapic, irq, irq_entry;
3892 struct irq_desc *desc;
3893 struct irq_cfg *cfg;
3894 const cpumask_t *mask;
3896 if (skip_ioapic_setup == 1)
3899 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
3900 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
3901 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3902 if (irq_entry == -1)
3904 irq = pin_2_irq(irq_entry, ioapic, pin);
3906 /* setup_IO_APIC_irqs could fail to get vector for some device
3907 * when you have too many devices, because at that time only boot
3910 desc = irq_to_desc(irq);
3911 cfg = desc->chip_data;
3913 setup_IO_APIC_irq(ioapic, pin, irq, desc,
3914 irq_trigger(irq_entry),
3915 irq_polarity(irq_entry));
3921 * Honour affinities which have been set in early boot
3924 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
3925 mask = &desc->affinity;
3929 #ifdef CONFIG_INTR_REMAP
3930 if (intr_remapping_enabled)
3931 set_ir_ioapic_affinity_irq_desc(desc, mask);
3934 set_ioapic_affinity_irq_desc(desc, mask);
3941 #define IOAPIC_RESOURCE_NAME_SIZE 11
3943 static struct resource *ioapic_resources;
3945 static struct resource * __init ioapic_setup_resources(void)
3948 struct resource *res;
3952 if (nr_ioapics <= 0)
3955 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3958 mem = alloc_bootmem(n);
3962 mem += sizeof(struct resource) * nr_ioapics;
3964 for (i = 0; i < nr_ioapics; i++) {
3966 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3967 sprintf(mem, "IOAPIC %u", i);
3968 mem += IOAPIC_RESOURCE_NAME_SIZE;
3972 ioapic_resources = res;
3977 void __init ioapic_init_mappings(void)
3979 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3980 struct resource *ioapic_res;
3983 ioapic_res = ioapic_setup_resources();
3984 for (i = 0; i < nr_ioapics; i++) {
3985 if (smp_found_config) {
3986 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3987 #ifdef CONFIG_X86_32
3990 "WARNING: bogus zero IO-APIC "
3991 "address found in MPTABLE, "
3992 "disabling IO/APIC support!\n");
3993 smp_found_config = 0;
3994 skip_ioapic_setup = 1;
3995 goto fake_ioapic_page;
3999 #ifdef CONFIG_X86_32
4002 ioapic_phys = (unsigned long)
4003 alloc_bootmem_pages(PAGE_SIZE);
4004 ioapic_phys = __pa(ioapic_phys);
4006 set_fixmap_nocache(idx, ioapic_phys);
4007 apic_printk(APIC_VERBOSE,
4008 "mapped IOAPIC to %08lx (%08lx)\n",
4009 __fix_to_virt(idx), ioapic_phys);
4012 if (ioapic_res != NULL) {
4013 ioapic_res->start = ioapic_phys;
4014 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
4020 static int __init ioapic_insert_resources(void)
4023 struct resource *r = ioapic_resources;
4027 "IO APIC resources could be not be allocated.\n");
4031 for (i = 0; i < nr_ioapics; i++) {
4032 insert_resource(&iomem_resource, r);
4039 /* Insert the IO APIC resources after PCI initialization has occured to handle
4040 * IO APICS that are mapped in on a BAR in PCI space. */
4041 late_initcall(ioapic_insert_resources);