2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/acpi.h>
31 #include <linux/sysdev.h>
32 #include <linux/msi.h>
33 #include <linux/htirq.h>
34 #include <linux/dmar.h>
35 #include <linux/jiffies.h>
37 #include <acpi/acpi_bus.h>
39 #include <linux/bootmem.h>
40 #include <linux/dmar.h>
46 #include <asm/proto.h>
49 #include <asm/i8259.h>
51 #include <asm/msidef.h>
52 #include <asm/hypertransport.h>
53 #include <asm/irq_remapping.h>
56 #include <mach_apic.h>
58 #define __apicdebuginit(type) static type __init
65 struct irq_pin_list *irq_2_pin;
68 unsigned move_cleanup_count;
70 u8 move_in_progress : 1;
73 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
74 static struct irq_cfg irq_cfg_legacy[] __initdata = {
75 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
76 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
77 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
78 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
79 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
80 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
81 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
82 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
83 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
84 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
85 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
86 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
87 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
88 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
89 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
90 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
93 static struct irq_cfg irq_cfg_init = { .irq = -1U, };
94 /* need to be biger than size of irq_cfg_legacy */
95 static int nr_irq_cfg = 32;
97 static int __init parse_nr_irq_cfg(char *arg)
100 nr_irq_cfg = simple_strtoul(arg, NULL, 0);
107 early_param("nr_irq_cfg", parse_nr_irq_cfg);
109 static void init_one_irq_cfg(struct irq_cfg *cfg)
111 memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
114 static void __init init_work(void *data)
116 struct dyn_array *da = data;
122 memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
124 i = sizeof(irq_cfg_legacy)/sizeof(irq_cfg_legacy[0]);
125 for (; i < *da->nr; i++)
126 init_one_irq_cfg(&cfg[i]);
128 for (i = 1; i < *da->nr; i++)
129 cfg[i-1].next = &cfg[i];
132 #define for_each_irq_cfg(cfg) \
133 for (cfg = irq_cfgx; cfg && cfg->irq != -1U; cfg = cfg->next)
135 static struct irq_cfg *irq_cfgx;
136 DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work);
138 static struct irq_cfg *irq_cfg(unsigned int irq)
158 static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
160 struct irq_cfg *cfg, *cfg_pri;
166 cfg_pri = cfg = &irq_cfgx[0];
171 if (cfg->irq == -1U) {
181 * we run out of pre-allocate ones, allocate more
183 printk(KERN_DEBUG "try to get more irq_cfg %d\n", nr_irq_cfg);
186 cfg = kzalloc(sizeof(struct irq_cfg)*nr_irq_cfg, GFP_ATOMIC);
188 cfg = __alloc_bootmem_nopanic(sizeof(struct irq_cfg)*nr_irq_cfg, PAGE_SIZE, 0);
191 panic("please boot with nr_irq_cfg= %d\n", count * 2);
193 for (i = 0; i < nr_irq_cfg; i++)
194 init_one_irq_cfg(&cfg[i]);
196 for (i = 1; i < nr_irq_cfg; i++)
197 cfg[i-1].next = &cfg[i];
205 static int assign_irq_vector(int irq, cpumask_t mask);
207 int first_system_vector = 0xfe;
209 char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
211 int sis_apic_bug; /* not actually supported, dummy for compile */
213 static int no_timer_check;
215 static int disable_timer_pin_1 __initdata;
217 int timer_through_8259 __initdata;
219 /* Where if anywhere is the i8259 connect in external int mode */
220 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
222 static DEFINE_SPINLOCK(ioapic_lock);
223 static DEFINE_SPINLOCK(vector_lock);
226 * # of IRQ routing registers
228 int nr_ioapic_registers[MAX_IO_APICS];
230 /* I/O APIC RTE contents at the OS boot up */
231 struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
233 /* I/O APIC entries */
234 struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
237 /* MP IRQ source entries */
238 struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
240 /* # of MP IRQ source entries */
243 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
246 * Rough estimation of how many shared IRQs there are, can
247 * be changed anytime.
253 * This is performance-critical, we want to do it O(1)
255 * the indexing order of this array favors 1:1 mappings
256 * between pins and IRQs.
259 struct irq_pin_list {
261 struct irq_pin_list *next;
264 static struct irq_pin_list *irq_2_pin_head;
265 /* fill one page ? */
266 static int nr_irq_2_pin = 0x100;
267 static struct irq_pin_list *irq_2_pin_ptr;
268 static void __init irq_2_pin_init_work(void *data)
270 struct dyn_array *da = data;
271 struct irq_pin_list *pin;
276 for (i = 1; i < *da->nr; i++)
277 pin[i-1].next = &pin[i];
279 irq_2_pin_ptr = &pin[0];
281 DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work);
283 static struct irq_pin_list *get_one_free_irq_2_pin(void)
285 struct irq_pin_list *pin;
291 irq_2_pin_ptr = pin->next;
297 * we run out of pre-allocate ones, allocate more
299 printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin);
302 pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin,
305 pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) *
306 nr_irq_2_pin, PAGE_SIZE, 0);
309 panic("can not get more irq_2_pin\n");
311 for (i = 1; i < nr_irq_2_pin; i++)
312 pin[i-1].next = &pin[i];
314 irq_2_pin_ptr = pin->next;
322 unsigned int unused[3];
326 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
328 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
329 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
332 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
334 struct io_apic __iomem *io_apic = io_apic_base(apic);
335 writel(reg, &io_apic->index);
336 return readl(&io_apic->data);
339 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
341 struct io_apic __iomem *io_apic = io_apic_base(apic);
342 writel(reg, &io_apic->index);
343 writel(value, &io_apic->data);
347 * Re-write a value: to be used for read-modify-write
348 * cycles where the read already set up the index register.
350 static inline void io_apic_modify(unsigned int apic, unsigned int value)
352 struct io_apic __iomem *io_apic = io_apic_base(apic);
353 writel(value, &io_apic->data);
356 static bool io_apic_level_ack_pending(unsigned int irq)
358 struct irq_pin_list *entry;
360 struct irq_cfg *cfg = irq_cfg(irq);
362 spin_lock_irqsave(&ioapic_lock, flags);
363 entry = cfg->irq_2_pin;
371 reg = io_apic_read(entry->apic, 0x10 + pin*2);
372 /* Is the remote IRR bit set? */
373 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
374 spin_unlock_irqrestore(&ioapic_lock, flags);
381 spin_unlock_irqrestore(&ioapic_lock, flags);
387 * Synchronize the IO-APIC and the CPU by doing
388 * a dummy read from the IO-APIC
390 static inline void io_apic_sync(unsigned int apic)
392 struct io_apic __iomem *io_apic = io_apic_base(apic);
393 readl(&io_apic->data);
396 #define __DO_ACTION(R, ACTION, FINAL) \
400 struct irq_cfg *cfg; \
401 struct irq_pin_list *entry; \
403 BUG_ON(irq >= nr_irqs); \
404 cfg = irq_cfg(irq); \
405 entry = cfg->irq_2_pin; \
411 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
413 io_apic_modify(entry->apic, reg); \
417 entry = entry->next; \
422 struct { u32 w1, w2; };
423 struct IO_APIC_route_entry entry;
426 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
428 union entry_union eu;
430 spin_lock_irqsave(&ioapic_lock, flags);
431 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
432 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
433 spin_unlock_irqrestore(&ioapic_lock, flags);
438 * When we write a new IO APIC routing entry, we need to write the high
439 * word first! If the mask bit in the low word is clear, we will enable
440 * the interrupt, and we need to make sure the entry is fully populated
441 * before that happens.
444 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
446 union entry_union eu;
448 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
449 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
452 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
455 spin_lock_irqsave(&ioapic_lock, flags);
456 __ioapic_write_entry(apic, pin, e);
457 spin_unlock_irqrestore(&ioapic_lock, flags);
461 * When we mask an IO APIC routing entry, we need to write the low
462 * word first, in order to set the mask bit before we change the
465 static void ioapic_mask_entry(int apic, int pin)
468 union entry_union eu = { .entry.mask = 1 };
470 spin_lock_irqsave(&ioapic_lock, flags);
471 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
472 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
473 spin_unlock_irqrestore(&ioapic_lock, flags);
477 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
481 struct irq_pin_list *entry;
483 BUG_ON(irq >= nr_irqs);
485 entry = cfg->irq_2_pin;
495 * With interrupt-remapping, destination information comes
496 * from interrupt-remapping table entry.
498 if (!irq_remapped(irq))
499 io_apic_write(apic, 0x11 + pin*2, dest);
500 reg = io_apic_read(apic, 0x10 + pin*2);
501 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
503 io_apic_modify(apic, reg);
510 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
512 struct irq_cfg *cfg = irq_cfg(irq);
516 struct irq_desc *desc;
518 cpus_and(tmp, mask, cpu_online_map);
522 if (assign_irq_vector(irq, mask))
525 cpus_and(tmp, cfg->domain, mask);
526 dest = cpu_mask_to_apicid(tmp);
529 * Only the high 8 bits are valid.
531 dest = SET_APIC_LOGICAL_ID(dest);
533 desc = irq_to_desc(irq);
534 spin_lock_irqsave(&ioapic_lock, flags);
535 __target_IO_APIC_irq(irq, dest, cfg->vector);
536 desc->affinity = mask;
537 spin_unlock_irqrestore(&ioapic_lock, flags);
542 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
543 * shared ISA-space IRQs, so we have to support them. We are super
544 * fast in the common case, and fast for shared ISA-space IRQs.
546 int first_free_entry;
547 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
550 struct irq_pin_list *entry;
552 BUG_ON(irq >= nr_irqs);
553 /* first time to refer irq_cfg, so with new */
554 cfg = irq_cfg_alloc(irq);
555 entry = cfg->irq_2_pin;
557 entry = get_one_free_irq_2_pin();
558 cfg->irq_2_pin = entry;
561 printk(KERN_DEBUG " 0 add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
565 while (entry->next) {
566 /* not again, please */
567 if (entry->apic == apic && entry->pin == pin)
573 entry->next = get_one_free_irq_2_pin();
577 printk(KERN_DEBUG " x add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
581 * Reroute an IRQ to a different pin.
583 static void __init replace_pin_at_irq(unsigned int irq,
584 int oldapic, int oldpin,
585 int newapic, int newpin)
587 struct irq_cfg *cfg = irq_cfg(irq);
588 struct irq_pin_list *entry = cfg->irq_2_pin;
592 if (entry->apic == oldapic && entry->pin == oldpin) {
593 entry->apic = newapic;
596 /* every one is different, right? */
602 /* why? call replace before add? */
604 add_pin_to_irq(irq, newapic, newpin);
608 #define DO_ACTION(name,R,ACTION, FINAL) \
610 static void name##_IO_APIC_irq (unsigned int irq) \
611 __DO_ACTION(R, ACTION, FINAL)
614 DO_ACTION(__mask, 0, |= IO_APIC_REDIR_MASKED, io_apic_sync(entry->apic))
617 DO_ACTION(__unmask, 0, &= ~IO_APIC_REDIR_MASKED, )
619 static void mask_IO_APIC_irq (unsigned int irq)
623 spin_lock_irqsave(&ioapic_lock, flags);
624 __mask_IO_APIC_irq(irq);
625 spin_unlock_irqrestore(&ioapic_lock, flags);
628 static void unmask_IO_APIC_irq (unsigned int irq)
632 spin_lock_irqsave(&ioapic_lock, flags);
633 __unmask_IO_APIC_irq(irq);
634 spin_unlock_irqrestore(&ioapic_lock, flags);
637 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
639 struct IO_APIC_route_entry entry;
641 /* Check delivery_mode to be sure we're not clearing an SMI pin */
642 entry = ioapic_read_entry(apic, pin);
643 if (entry.delivery_mode == dest_SMI)
646 * Disable it in the IO-APIC irq-routing table:
648 ioapic_mask_entry(apic, pin);
651 static void clear_IO_APIC (void)
655 for (apic = 0; apic < nr_ioapics; apic++)
656 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
657 clear_IO_APIC_pin(apic, pin);
661 * Saves and masks all the unmasked IO-APIC RTE's
663 int save_mask_IO_APIC_setup(void)
665 union IO_APIC_reg_01 reg_01;
670 * The number of IO-APIC IRQ registers (== #pins):
672 for (apic = 0; apic < nr_ioapics; apic++) {
673 spin_lock_irqsave(&ioapic_lock, flags);
674 reg_01.raw = io_apic_read(apic, 1);
675 spin_unlock_irqrestore(&ioapic_lock, flags);
676 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
679 for (apic = 0; apic < nr_ioapics; apic++) {
680 early_ioapic_entries[apic] =
681 kzalloc(sizeof(struct IO_APIC_route_entry) *
682 nr_ioapic_registers[apic], GFP_KERNEL);
683 if (!early_ioapic_entries[apic])
687 for (apic = 0; apic < nr_ioapics; apic++)
688 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
689 struct IO_APIC_route_entry entry;
691 entry = early_ioapic_entries[apic][pin] =
692 ioapic_read_entry(apic, pin);
695 ioapic_write_entry(apic, pin, entry);
701 void restore_IO_APIC_setup(void)
705 for (apic = 0; apic < nr_ioapics; apic++)
706 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
707 ioapic_write_entry(apic, pin,
708 early_ioapic_entries[apic][pin]);
711 void reinit_intr_remapped_IO_APIC(int intr_remapping)
714 * for now plain restore of previous settings.
715 * TBD: In the case of OS enabling interrupt-remapping,
716 * IO-APIC RTE's need to be setup to point to interrupt-remapping
717 * table entries. for now, do a plain restore, and wait for
718 * the setup_IO_APIC_irqs() to do proper initialization.
720 restore_IO_APIC_setup();
723 int skip_ioapic_setup;
726 static int __init parse_noapic(char *str)
728 disable_ioapic_setup();
731 early_param("noapic", parse_noapic);
733 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
734 static int __init disable_timer_pin_setup(char *arg)
736 disable_timer_pin_1 = 1;
739 __setup("disable_timer_pin_1", disable_timer_pin_setup);
743 * Find the IRQ entry number of a certain pin.
745 static int find_irq_entry(int apic, int pin, int type)
749 for (i = 0; i < mp_irq_entries; i++)
750 if (mp_irqs[i].mp_irqtype == type &&
751 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
752 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
753 mp_irqs[i].mp_dstirq == pin)
760 * Find the pin to which IRQ[irq] (ISA) is connected
762 static int __init find_isa_irq_pin(int irq, int type)
766 for (i = 0; i < mp_irq_entries; i++) {
767 int lbus = mp_irqs[i].mp_srcbus;
769 if (test_bit(lbus, mp_bus_not_pci) &&
770 (mp_irqs[i].mp_irqtype == type) &&
771 (mp_irqs[i].mp_srcbusirq == irq))
773 return mp_irqs[i].mp_dstirq;
778 static int __init find_isa_irq_apic(int irq, int type)
782 for (i = 0; i < mp_irq_entries; i++) {
783 int lbus = mp_irqs[i].mp_srcbus;
785 if (test_bit(lbus, mp_bus_not_pci) &&
786 (mp_irqs[i].mp_irqtype == type) &&
787 (mp_irqs[i].mp_srcbusirq == irq))
790 if (i < mp_irq_entries) {
792 for(apic = 0; apic < nr_ioapics; apic++) {
793 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
802 * Find a specific PCI IRQ entry.
803 * Not an __init, possibly needed by modules
805 static int pin_2_irq(int idx, int apic, int pin);
807 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
809 int apic, i, best_guess = -1;
811 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
813 if (test_bit(bus, mp_bus_not_pci)) {
814 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
817 for (i = 0; i < mp_irq_entries; i++) {
818 int lbus = mp_irqs[i].mp_srcbus;
820 for (apic = 0; apic < nr_ioapics; apic++)
821 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
822 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
825 if (!test_bit(lbus, mp_bus_not_pci) &&
826 !mp_irqs[i].mp_irqtype &&
828 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
829 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
831 if (!(apic || IO_APIC_IRQ(irq)))
834 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
837 * Use the first all-but-pin matching entry as a
838 * best-guess fuzzy result for broken mptables.
844 BUG_ON(best_guess >= nr_irqs);
848 /* ISA interrupts are always polarity zero edge triggered,
849 * when listed as conforming in the MP table. */
851 #define default_ISA_trigger(idx) (0)
852 #define default_ISA_polarity(idx) (0)
854 /* PCI interrupts are always polarity one level triggered,
855 * when listed as conforming in the MP table. */
857 #define default_PCI_trigger(idx) (1)
858 #define default_PCI_polarity(idx) (1)
860 static int MPBIOS_polarity(int idx)
862 int bus = mp_irqs[idx].mp_srcbus;
866 * Determine IRQ line polarity (high active or low active):
868 switch (mp_irqs[idx].mp_irqflag & 3)
870 case 0: /* conforms, ie. bus-type dependent polarity */
871 if (test_bit(bus, mp_bus_not_pci))
872 polarity = default_ISA_polarity(idx);
874 polarity = default_PCI_polarity(idx);
876 case 1: /* high active */
881 case 2: /* reserved */
883 printk(KERN_WARNING "broken BIOS!!\n");
887 case 3: /* low active */
892 default: /* invalid */
894 printk(KERN_WARNING "broken BIOS!!\n");
902 static int MPBIOS_trigger(int idx)
904 int bus = mp_irqs[idx].mp_srcbus;
908 * Determine IRQ trigger mode (edge or level sensitive):
910 switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
912 case 0: /* conforms, ie. bus-type dependent */
913 if (test_bit(bus, mp_bus_not_pci))
914 trigger = default_ISA_trigger(idx);
916 trigger = default_PCI_trigger(idx);
923 case 2: /* reserved */
925 printk(KERN_WARNING "broken BIOS!!\n");
934 default: /* invalid */
936 printk(KERN_WARNING "broken BIOS!!\n");
944 static inline int irq_polarity(int idx)
946 return MPBIOS_polarity(idx);
949 static inline int irq_trigger(int idx)
951 return MPBIOS_trigger(idx);
954 static int pin_2_irq(int idx, int apic, int pin)
957 int bus = mp_irqs[idx].mp_srcbus;
960 * Debugging check, we are in big trouble if this message pops up!
962 if (mp_irqs[idx].mp_dstirq != pin)
963 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
965 if (test_bit(bus, mp_bus_not_pci)) {
966 irq = mp_irqs[idx].mp_srcbusirq;
969 * PCI IRQs are mapped in order
973 irq += nr_ioapic_registers[i++];
976 BUG_ON(irq >= nr_irqs);
980 void lock_vector_lock(void)
982 /* Used to the online set of cpus does not change
983 * during assign_irq_vector.
985 spin_lock(&vector_lock);
988 void unlock_vector_lock(void)
990 spin_unlock(&vector_lock);
993 static int __assign_irq_vector(int irq, cpumask_t mask)
996 * NOTE! The local APIC isn't very good at handling
997 * multiple interrupts at the same interrupt level.
998 * As the interrupt level is determined by taking the
999 * vector number and shifting that right by 4, we
1000 * want to spread these out a bit so that they don't
1001 * all fall in the same interrupt level.
1003 * Also, we've got to be careful not to trash gate
1004 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1006 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1007 unsigned int old_vector;
1009 struct irq_cfg *cfg;
1011 BUG_ON((unsigned)irq >= nr_irqs);
1014 /* Only try and allocate irqs on cpus that are present */
1015 cpus_and(mask, mask, cpu_online_map);
1017 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1020 old_vector = cfg->vector;
1023 cpus_and(tmp, cfg->domain, mask);
1024 if (!cpus_empty(tmp))
1028 for_each_cpu_mask_nr(cpu, mask) {
1029 cpumask_t domain, new_mask;
1033 domain = vector_allocation_domain(cpu);
1034 cpus_and(new_mask, domain, cpu_online_map);
1036 vector = current_vector;
1037 offset = current_offset;
1040 if (vector >= first_system_vector) {
1041 /* If we run out of vectors on large boxen, must share them. */
1042 offset = (offset + 1) % 8;
1043 vector = FIRST_DEVICE_VECTOR + offset;
1045 if (unlikely(current_vector == vector))
1047 if (vector == IA32_SYSCALL_VECTOR)
1049 for_each_cpu_mask_nr(new_cpu, new_mask)
1050 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1053 current_vector = vector;
1054 current_offset = offset;
1056 cfg->move_in_progress = 1;
1057 cfg->old_domain = cfg->domain;
1059 for_each_cpu_mask_nr(new_cpu, new_mask)
1060 per_cpu(vector_irq, new_cpu)[vector] = irq;
1061 cfg->vector = vector;
1062 cfg->domain = domain;
1068 static int assign_irq_vector(int irq, cpumask_t mask)
1071 unsigned long flags;
1073 spin_lock_irqsave(&vector_lock, flags);
1074 err = __assign_irq_vector(irq, mask);
1075 spin_unlock_irqrestore(&vector_lock, flags);
1079 static void __clear_irq_vector(int irq)
1081 struct irq_cfg *cfg;
1085 BUG_ON((unsigned)irq >= nr_irqs);
1087 BUG_ON(!cfg->vector);
1089 vector = cfg->vector;
1090 cpus_and(mask, cfg->domain, cpu_online_map);
1091 for_each_cpu_mask_nr(cpu, mask)
1092 per_cpu(vector_irq, cpu)[vector] = -1;
1095 cpus_clear(cfg->domain);
1098 void __setup_vector_irq(int cpu)
1100 /* Initialize vector_irq on a new cpu */
1101 /* This function must be called with vector_lock held */
1103 struct irq_cfg *cfg;
1105 /* Mark the inuse vectors */
1106 for_each_irq_cfg(cfg) {
1107 if (!cpu_isset(cpu, cfg->domain))
1109 vector = cfg->vector;
1111 per_cpu(vector_irq, cpu)[vector] = irq;
1113 /* Mark the free vectors */
1114 for (vector = 0; vector < NR_VECTORS; ++vector) {
1115 irq = per_cpu(vector_irq, cpu)[vector];
1120 if (!cpu_isset(cpu, cfg->domain))
1121 per_cpu(vector_irq, cpu)[vector] = -1;
1125 static struct irq_chip ioapic_chip;
1126 #ifdef CONFIG_INTR_REMAP
1127 static struct irq_chip ir_ioapic_chip;
1130 static void ioapic_register_intr(int irq, unsigned long trigger)
1132 struct irq_desc *desc;
1134 desc = irq_to_desc(irq);
1136 desc->status |= IRQ_LEVEL;
1138 desc->status &= ~IRQ_LEVEL;
1140 #ifdef CONFIG_INTR_REMAP
1141 if (irq_remapped(irq)) {
1142 desc->status |= IRQ_MOVE_PCNTXT;
1144 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1148 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1149 handle_edge_irq, "edge");
1154 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1158 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1159 handle_edge_irq, "edge");
1162 static int setup_ioapic_entry(int apic, int irq,
1163 struct IO_APIC_route_entry *entry,
1164 unsigned int destination, int trigger,
1165 int polarity, int vector)
1168 * add it to the IO-APIC irq-routing table:
1170 memset(entry,0,sizeof(*entry));
1172 #ifdef CONFIG_INTR_REMAP
1173 if (intr_remapping_enabled) {
1174 struct intel_iommu *iommu = map_ioapic_to_ir(apic);
1176 struct IR_IO_APIC_route_entry *ir_entry =
1177 (struct IR_IO_APIC_route_entry *) entry;
1181 panic("No mapping iommu for ioapic %d\n", apic);
1183 index = alloc_irte(iommu, irq, 1);
1185 panic("Failed to allocate IRTE for ioapic %d\n", apic);
1187 memset(&irte, 0, sizeof(irte));
1190 irte.dst_mode = INT_DEST_MODE;
1191 irte.trigger_mode = trigger;
1192 irte.dlvry_mode = INT_DELIVERY_MODE;
1193 irte.vector = vector;
1194 irte.dest_id = IRTE_DEST(destination);
1196 modify_irte(irq, &irte);
1198 ir_entry->index2 = (index >> 15) & 0x1;
1200 ir_entry->format = 1;
1201 ir_entry->index = (index & 0x7fff);
1205 entry->delivery_mode = INT_DELIVERY_MODE;
1206 entry->dest_mode = INT_DEST_MODE;
1207 entry->dest = destination;
1210 entry->mask = 0; /* enable IRQ */
1211 entry->trigger = trigger;
1212 entry->polarity = polarity;
1213 entry->vector = vector;
1215 /* Mask level triggered irqs.
1216 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1223 static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1224 int trigger, int polarity)
1226 struct irq_cfg *cfg;
1227 struct IO_APIC_route_entry entry;
1230 if (!IO_APIC_IRQ(irq))
1236 if (assign_irq_vector(irq, mask))
1239 cpus_and(mask, cfg->domain, mask);
1241 apic_printk(APIC_VERBOSE,KERN_DEBUG
1242 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1243 "IRQ %d Mode:%i Active:%i)\n",
1244 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
1245 irq, trigger, polarity);
1248 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1249 cpu_mask_to_apicid(mask), trigger, polarity,
1251 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1252 mp_ioapics[apic].mp_apicid, pin);
1253 __clear_irq_vector(irq);
1257 ioapic_register_intr(irq, trigger);
1259 disable_8259A_irq(irq);
1261 ioapic_write_entry(apic, pin, entry);
1264 static void __init setup_IO_APIC_irqs(void)
1266 int apic, pin, idx, irq, first_notcon = 1;
1268 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1270 for (apic = 0; apic < nr_ioapics; apic++) {
1271 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1273 idx = find_irq_entry(apic,pin,mp_INT);
1276 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin);
1279 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin);
1282 if (!first_notcon) {
1283 apic_printk(APIC_VERBOSE, " not connected.\n");
1287 irq = pin_2_irq(idx, apic, pin);
1288 add_pin_to_irq(irq, apic, pin);
1290 setup_IO_APIC_irq(apic, pin, irq,
1291 irq_trigger(idx), irq_polarity(idx));
1296 apic_printk(APIC_VERBOSE, " not connected.\n");
1300 * Set up the timer pin, possibly with the 8259A-master behind.
1302 static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1305 struct IO_APIC_route_entry entry;
1307 if (intr_remapping_enabled)
1310 memset(&entry, 0, sizeof(entry));
1313 * We use logical delivery to get the timer IRQ
1316 entry.dest_mode = INT_DEST_MODE;
1317 entry.mask = 1; /* mask IRQ now */
1318 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
1319 entry.delivery_mode = INT_DELIVERY_MODE;
1322 entry.vector = vector;
1325 * The timer IRQ doesn't have to know that behind the
1326 * scene we may have a 8259A-master in AEOI mode ...
1328 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1331 * Add it to the IO-APIC irq-routing table:
1333 ioapic_write_entry(apic, pin, entry);
1337 __apicdebuginit(void) print_IO_APIC(void)
1340 union IO_APIC_reg_00 reg_00;
1341 union IO_APIC_reg_01 reg_01;
1342 union IO_APIC_reg_02 reg_02;
1343 unsigned long flags;
1344 struct irq_cfg *cfg;
1346 if (apic_verbosity == APIC_QUIET)
1349 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1350 for (i = 0; i < nr_ioapics; i++)
1351 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1352 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1355 * We are a bit conservative about what we expect. We have to
1356 * know about every hardware change ASAP.
1358 printk(KERN_INFO "testing the IO APIC.......................\n");
1360 for (apic = 0; apic < nr_ioapics; apic++) {
1362 spin_lock_irqsave(&ioapic_lock, flags);
1363 reg_00.raw = io_apic_read(apic, 0);
1364 reg_01.raw = io_apic_read(apic, 1);
1365 if (reg_01.bits.version >= 0x10)
1366 reg_02.raw = io_apic_read(apic, 2);
1367 spin_unlock_irqrestore(&ioapic_lock, flags);
1370 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1371 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1372 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1374 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1375 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1377 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1378 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1380 if (reg_01.bits.version >= 0x10) {
1381 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1382 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1385 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1387 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1388 " Stat Dmod Deli Vect: \n");
1390 for (i = 0; i <= reg_01.bits.entries; i++) {
1391 struct IO_APIC_route_entry entry;
1393 entry = ioapic_read_entry(apic, i);
1395 printk(KERN_DEBUG " %02x %03X ",
1400 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1405 entry.delivery_status,
1407 entry.delivery_mode,
1412 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1413 for_each_irq_cfg(cfg) {
1414 struct irq_pin_list *entry = cfg->irq_2_pin;
1417 printk(KERN_DEBUG "IRQ%d ", cfg->irq);
1419 printk("-> %d:%d", entry->apic, entry->pin);
1422 entry = entry->next;
1427 printk(KERN_INFO ".................................... done.\n");
1432 __apicdebuginit(void) print_APIC_bitfield(int base)
1437 if (apic_verbosity == APIC_QUIET)
1440 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1441 for (i = 0; i < 8; i++) {
1442 v = apic_read(base + i*0x10);
1443 for (j = 0; j < 32; j++) {
1453 __apicdebuginit(void) print_local_APIC(void *dummy)
1455 unsigned int v, ver, maxlvt;
1458 if (apic_verbosity == APIC_QUIET)
1461 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1462 smp_processor_id(), hard_smp_processor_id());
1463 v = apic_read(APIC_ID);
1464 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1465 v = apic_read(APIC_LVR);
1466 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1467 ver = GET_APIC_VERSION(v);
1468 maxlvt = lapic_get_maxlvt();
1470 v = apic_read(APIC_TASKPRI);
1471 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1473 v = apic_read(APIC_ARBPRI);
1474 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1475 v & APIC_ARBPRI_MASK);
1476 v = apic_read(APIC_PROCPRI);
1477 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1479 v = apic_read(APIC_EOI);
1480 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1481 v = apic_read(APIC_RRR);
1482 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1483 v = apic_read(APIC_LDR);
1484 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1485 v = apic_read(APIC_DFR);
1486 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1487 v = apic_read(APIC_SPIV);
1488 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1490 printk(KERN_DEBUG "... APIC ISR field:\n");
1491 print_APIC_bitfield(APIC_ISR);
1492 printk(KERN_DEBUG "... APIC TMR field:\n");
1493 print_APIC_bitfield(APIC_TMR);
1494 printk(KERN_DEBUG "... APIC IRR field:\n");
1495 print_APIC_bitfield(APIC_IRR);
1497 v = apic_read(APIC_ESR);
1498 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1500 icr = apic_icr_read();
1501 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1502 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1504 v = apic_read(APIC_LVTT);
1505 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1507 if (maxlvt > 3) { /* PC is LVT#4. */
1508 v = apic_read(APIC_LVTPC);
1509 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1511 v = apic_read(APIC_LVT0);
1512 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1513 v = apic_read(APIC_LVT1);
1514 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1516 if (maxlvt > 2) { /* ERR is LVT#3. */
1517 v = apic_read(APIC_LVTERR);
1518 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1521 v = apic_read(APIC_TMICT);
1522 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1523 v = apic_read(APIC_TMCCT);
1524 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1525 v = apic_read(APIC_TDCR);
1526 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1530 __apicdebuginit(void) print_all_local_APICs(void)
1532 on_each_cpu(print_local_APIC, NULL, 1);
1535 __apicdebuginit(void) print_PIC(void)
1538 unsigned long flags;
1540 if (apic_verbosity == APIC_QUIET)
1543 printk(KERN_DEBUG "\nprinting PIC contents\n");
1545 spin_lock_irqsave(&i8259A_lock, flags);
1547 v = inb(0xa1) << 8 | inb(0x21);
1548 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1550 v = inb(0xa0) << 8 | inb(0x20);
1551 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1555 v = inb(0xa0) << 8 | inb(0x20);
1559 spin_unlock_irqrestore(&i8259A_lock, flags);
1561 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1563 v = inb(0x4d1) << 8 | inb(0x4d0);
1564 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1567 __apicdebuginit(int) print_all_ICs(void)
1570 print_all_local_APICs();
1576 fs_initcall(print_all_ICs);
1579 void __init enable_IO_APIC(void)
1581 union IO_APIC_reg_01 reg_01;
1582 int i8259_apic, i8259_pin;
1584 unsigned long flags;
1587 * The number of IO-APIC IRQ registers (== #pins):
1589 for (apic = 0; apic < nr_ioapics; apic++) {
1590 spin_lock_irqsave(&ioapic_lock, flags);
1591 reg_01.raw = io_apic_read(apic, 1);
1592 spin_unlock_irqrestore(&ioapic_lock, flags);
1593 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1595 for(apic = 0; apic < nr_ioapics; apic++) {
1597 /* See if any of the pins is in ExtINT mode */
1598 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1599 struct IO_APIC_route_entry entry;
1600 entry = ioapic_read_entry(apic, pin);
1602 /* If the interrupt line is enabled and in ExtInt mode
1603 * I have found the pin where the i8259 is connected.
1605 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1606 ioapic_i8259.apic = apic;
1607 ioapic_i8259.pin = pin;
1613 /* Look to see what if the MP table has reported the ExtINT */
1614 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1615 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1616 /* Trust the MP table if nothing is setup in the hardware */
1617 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1618 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1619 ioapic_i8259.pin = i8259_pin;
1620 ioapic_i8259.apic = i8259_apic;
1622 /* Complain if the MP table and the hardware disagree */
1623 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1624 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1626 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1630 * Do not trust the IO-APIC being empty at bootup
1636 * Not an __init, needed by the reboot code
1638 void disable_IO_APIC(void)
1641 * Clear the IO-APIC before rebooting:
1646 * If the i8259 is routed through an IOAPIC
1647 * Put that IOAPIC in virtual wire mode
1648 * so legacy interrupts can be delivered.
1650 if (ioapic_i8259.pin != -1) {
1651 struct IO_APIC_route_entry entry;
1653 memset(&entry, 0, sizeof(entry));
1654 entry.mask = 0; /* Enabled */
1655 entry.trigger = 0; /* Edge */
1657 entry.polarity = 0; /* High */
1658 entry.delivery_status = 0;
1659 entry.dest_mode = 0; /* Physical */
1660 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1662 entry.dest = read_apic_id();
1665 * Add it to the IO-APIC irq-routing table:
1667 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1670 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1674 * There is a nasty bug in some older SMP boards, their mptable lies
1675 * about the timer IRQ. We do the following to work around the situation:
1677 * - timer IRQ defaults to IO-APIC IRQ
1678 * - if this function detects that timer IRQs are defunct, then we fall
1679 * back to ISA timer IRQs
1681 static int __init timer_irq_works(void)
1683 unsigned long t1 = jiffies;
1684 unsigned long flags;
1686 local_save_flags(flags);
1688 /* Let ten ticks pass... */
1689 mdelay((10 * 1000) / HZ);
1690 local_irq_restore(flags);
1693 * Expect a few ticks at least, to be sure some possible
1694 * glue logic does not lock up after one or two first
1695 * ticks in a non-ExtINT mode. Also the local APIC
1696 * might have cached one ExtINT interrupt. Finally, at
1697 * least one tick may be lost due to delays.
1701 if (time_after(jiffies, t1 + 4))
1707 * In the SMP+IOAPIC case it might happen that there are an unspecified
1708 * number of pending IRQ events unhandled. These cases are very rare,
1709 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1710 * better to do it this way as thus we do not have to be aware of
1711 * 'pending' interrupts in the IRQ path, except at this point.
1714 * Edge triggered needs to resend any interrupt
1715 * that was delayed but this is now handled in the device
1720 * Starting up a edge-triggered IO-APIC interrupt is
1721 * nasty - we need to make sure that we get the edge.
1722 * If it is already asserted for some reason, we need
1723 * return 1 to indicate that is was pending.
1725 * This is not complete - we should be able to fake
1726 * an edge even if it isn't on the 8259A...
1729 static unsigned int startup_ioapic_irq(unsigned int irq)
1731 int was_pending = 0;
1732 unsigned long flags;
1734 spin_lock_irqsave(&ioapic_lock, flags);
1736 disable_8259A_irq(irq);
1737 if (i8259A_irq_pending(irq))
1740 __unmask_IO_APIC_irq(irq);
1741 spin_unlock_irqrestore(&ioapic_lock, flags);
1746 static int ioapic_retrigger_irq(unsigned int irq)
1748 struct irq_cfg *cfg = irq_cfg(irq);
1749 unsigned long flags;
1751 spin_lock_irqsave(&vector_lock, flags);
1752 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
1753 spin_unlock_irqrestore(&vector_lock, flags);
1759 * Level and edge triggered IO-APIC interrupts need different handling,
1760 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1761 * handled with the level-triggered descriptor, but that one has slightly
1762 * more overhead. Level-triggered interrupts cannot be handled with the
1763 * edge-triggered handler, without risking IRQ storms and other ugly
1769 #ifdef CONFIG_INTR_REMAP
1770 static void ir_irq_migration(struct work_struct *work);
1772 static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
1775 * Migrate the IO-APIC irq in the presence of intr-remapping.
1777 * For edge triggered, irq migration is a simple atomic update(of vector
1778 * and cpu destination) of IRTE and flush the hardware cache.
1780 * For level triggered, we need to modify the io-apic RTE aswell with the update
1781 * vector information, along with modifying IRTE with vector and destination.
1782 * So irq migration for level triggered is little bit more complex compared to
1783 * edge triggered migration. But the good news is, we use the same algorithm
1784 * for level triggered migration as we have today, only difference being,
1785 * we now initiate the irq migration from process context instead of the
1786 * interrupt context.
1788 * In future, when we do a directed EOI (combined with cpu EOI broadcast
1789 * suppression) to the IO-APIC, level triggered irq migration will also be
1790 * as simple as edge triggered migration and we can do the irq migration
1791 * with a simple atomic update to IO-APIC RTE.
1793 static void migrate_ioapic_irq(int irq, cpumask_t mask)
1795 struct irq_cfg *cfg;
1796 struct irq_desc *desc;
1797 cpumask_t tmp, cleanup_mask;
1799 int modify_ioapic_rte;
1801 unsigned long flags;
1803 cpus_and(tmp, mask, cpu_online_map);
1804 if (cpus_empty(tmp))
1807 if (get_irte(irq, &irte))
1810 if (assign_irq_vector(irq, mask))
1814 cpus_and(tmp, cfg->domain, mask);
1815 dest = cpu_mask_to_apicid(tmp);
1817 desc = irq_to_desc(irq);
1818 modify_ioapic_rte = desc->status & IRQ_LEVEL;
1819 if (modify_ioapic_rte) {
1820 spin_lock_irqsave(&ioapic_lock, flags);
1821 __target_IO_APIC_irq(irq, dest, cfg->vector);
1822 spin_unlock_irqrestore(&ioapic_lock, flags);
1825 irte.vector = cfg->vector;
1826 irte.dest_id = IRTE_DEST(dest);
1829 * Modified the IRTE and flushes the Interrupt entry cache.
1831 modify_irte(irq, &irte);
1833 if (cfg->move_in_progress) {
1834 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1835 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1836 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1837 cfg->move_in_progress = 0;
1840 desc->affinity = mask;
1843 static int migrate_irq_remapped_level(int irq)
1846 struct irq_desc *desc = irq_to_desc(irq);
1848 mask_IO_APIC_irq(irq);
1850 if (io_apic_level_ack_pending(irq)) {
1852 * Interrupt in progress. Migrating irq now will change the
1853 * vector information in the IO-APIC RTE and that will confuse
1854 * the EOI broadcast performed by cpu.
1855 * So, delay the irq migration to the next instance.
1857 schedule_delayed_work(&ir_migration_work, 1);
1861 /* everthing is clear. we have right of way */
1862 migrate_ioapic_irq(irq, desc->pending_mask);
1865 desc->status &= ~IRQ_MOVE_PENDING;
1866 cpus_clear(desc->pending_mask);
1869 unmask_IO_APIC_irq(irq);
1873 static void ir_irq_migration(struct work_struct *work)
1876 struct irq_desc *desc;
1878 for_each_irq_desc(irq, desc) {
1879 if (desc->status & IRQ_MOVE_PENDING) {
1880 unsigned long flags;
1882 spin_lock_irqsave(&desc->lock, flags);
1883 if (!desc->chip->set_affinity ||
1884 !(desc->status & IRQ_MOVE_PENDING)) {
1885 desc->status &= ~IRQ_MOVE_PENDING;
1886 spin_unlock_irqrestore(&desc->lock, flags);
1890 desc->chip->set_affinity(irq, desc->pending_mask);
1891 spin_unlock_irqrestore(&desc->lock, flags);
1897 * Migrates the IRQ destination in the process context.
1899 static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
1901 struct irq_desc *desc = irq_to_desc(irq);
1903 if (desc->status & IRQ_LEVEL) {
1904 desc->status |= IRQ_MOVE_PENDING;
1905 desc->pending_mask = mask;
1906 migrate_irq_remapped_level(irq);
1910 migrate_ioapic_irq(irq, mask);
1914 asmlinkage void smp_irq_move_cleanup_interrupt(void)
1916 unsigned vector, me;
1921 me = smp_processor_id();
1922 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1924 struct irq_desc *desc;
1925 struct irq_cfg *cfg;
1926 irq = __get_cpu_var(vector_irq)[vector];
1930 desc = irq_to_desc(irq);
1932 spin_lock(&desc->lock);
1933 if (!cfg->move_cleanup_count)
1936 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
1939 __get_cpu_var(vector_irq)[vector] = -1;
1940 cfg->move_cleanup_count--;
1942 spin_unlock(&desc->lock);
1948 static void irq_complete_move(unsigned int irq)
1950 struct irq_cfg *cfg = irq_cfg(irq);
1951 unsigned vector, me;
1953 if (likely(!cfg->move_in_progress))
1956 vector = ~get_irq_regs()->orig_ax;
1957 me = smp_processor_id();
1958 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
1959 cpumask_t cleanup_mask;
1961 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1962 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1963 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1964 cfg->move_in_progress = 0;
1968 static inline void irq_complete_move(unsigned int irq) {}
1970 #ifdef CONFIG_INTR_REMAP
1971 static void ack_x2apic_level(unsigned int irq)
1976 static void ack_x2apic_edge(unsigned int irq)
1982 static void ack_apic_edge(unsigned int irq)
1984 irq_complete_move(irq);
1985 move_native_irq(irq);
1989 static void ack_apic_level(unsigned int irq)
1991 int do_unmask_irq = 0;
1993 irq_complete_move(irq);
1994 #ifdef CONFIG_GENERIC_PENDING_IRQ
1995 /* If we are moving the irq we need to mask it */
1996 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
1998 mask_IO_APIC_irq(irq);
2003 * We must acknowledge the irq before we move it or the acknowledge will
2004 * not propagate properly.
2008 /* Now we can move and renable the irq */
2009 if (unlikely(do_unmask_irq)) {
2010 /* Only migrate the irq if the ack has been received.
2012 * On rare occasions the broadcast level triggered ack gets
2013 * delayed going to ioapics, and if we reprogram the
2014 * vector while Remote IRR is still set the irq will never
2017 * To prevent this scenario we read the Remote IRR bit
2018 * of the ioapic. This has two effects.
2019 * - On any sane system the read of the ioapic will
2020 * flush writes (and acks) going to the ioapic from
2022 * - We get to see if the ACK has actually been delivered.
2024 * Based on failed experiments of reprogramming the
2025 * ioapic entry from outside of irq context starting
2026 * with masking the ioapic entry and then polling until
2027 * Remote IRR was clear before reprogramming the
2028 * ioapic I don't trust the Remote IRR bit to be
2029 * completey accurate.
2031 * However there appears to be no other way to plug
2032 * this race, so if the Remote IRR bit is not
2033 * accurate and is causing problems then it is a hardware bug
2034 * and you can go talk to the chipset vendor about it.
2036 if (!io_apic_level_ack_pending(irq))
2037 move_masked_irq(irq);
2038 unmask_IO_APIC_irq(irq);
2042 static struct irq_chip ioapic_chip __read_mostly = {
2044 .startup = startup_ioapic_irq,
2045 .mask = mask_IO_APIC_irq,
2046 .unmask = unmask_IO_APIC_irq,
2047 .ack = ack_apic_edge,
2048 .eoi = ack_apic_level,
2050 .set_affinity = set_ioapic_affinity_irq,
2052 .retrigger = ioapic_retrigger_irq,
2055 #ifdef CONFIG_INTR_REMAP
2056 static struct irq_chip ir_ioapic_chip __read_mostly = {
2057 .name = "IR-IO-APIC",
2058 .startup = startup_ioapic_irq,
2059 .mask = mask_IO_APIC_irq,
2060 .unmask = unmask_IO_APIC_irq,
2061 .ack = ack_x2apic_edge,
2062 .eoi = ack_x2apic_level,
2064 .set_affinity = set_ir_ioapic_affinity_irq,
2066 .retrigger = ioapic_retrigger_irq,
2070 static inline void init_IO_APIC_traps(void)
2073 struct irq_desc *desc;
2074 struct irq_cfg *cfg;
2077 * NOTE! The local APIC isn't very good at handling
2078 * multiple interrupts at the same interrupt level.
2079 * As the interrupt level is determined by taking the
2080 * vector number and shifting that right by 4, we
2081 * want to spread these out a bit so that they don't
2082 * all fall in the same interrupt level.
2084 * Also, we've got to be careful not to trash gate
2085 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2087 for_each_irq_cfg(cfg) {
2089 if (IO_APIC_IRQ(irq) && !cfg->vector) {
2091 * Hmm.. We don't have an entry for this,
2092 * so default to an old-fashioned 8259
2093 * interrupt if we can..
2096 make_8259A_irq(irq);
2098 desc = irq_to_desc(irq);
2099 /* Strange. Oh, well.. */
2100 desc->chip = &no_irq_chip;
2106 static void unmask_lapic_irq(unsigned int irq)
2110 v = apic_read(APIC_LVT0);
2111 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2114 static void mask_lapic_irq(unsigned int irq)
2118 v = apic_read(APIC_LVT0);
2119 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2122 static void ack_lapic_irq (unsigned int irq)
2127 static struct irq_chip lapic_chip __read_mostly = {
2128 .name = "local-APIC",
2129 .mask = mask_lapic_irq,
2130 .unmask = unmask_lapic_irq,
2131 .ack = ack_lapic_irq,
2134 static void lapic_register_intr(int irq)
2136 struct irq_desc *desc;
2138 desc = irq_to_desc(irq);
2139 desc->status &= ~IRQ_LEVEL;
2140 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2144 static void __init setup_nmi(void)
2147 * Dirty trick to enable the NMI watchdog ...
2148 * We put the 8259A master into AEOI mode and
2149 * unmask on all local APICs LVT0 as NMI.
2151 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2152 * is from Maciej W. Rozycki - so we do not have to EOI from
2153 * the NMI handler or the timer interrupt.
2155 printk(KERN_INFO "activating NMI Watchdog ...");
2157 enable_NMI_through_LVT0();
2163 * This looks a bit hackish but it's about the only one way of sending
2164 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2165 * not support the ExtINT mode, unfortunately. We need to send these
2166 * cycles as some i82489DX-based boards have glue logic that keeps the
2167 * 8259A interrupt line asserted until INTA. --macro
2169 static inline void __init unlock_ExtINT_logic(void)
2172 struct IO_APIC_route_entry entry0, entry1;
2173 unsigned char save_control, save_freq_select;
2175 pin = find_isa_irq_pin(8, mp_INT);
2176 apic = find_isa_irq_apic(8, mp_INT);
2180 entry0 = ioapic_read_entry(apic, pin);
2182 clear_IO_APIC_pin(apic, pin);
2184 memset(&entry1, 0, sizeof(entry1));
2186 entry1.dest_mode = 0; /* physical delivery */
2187 entry1.mask = 0; /* unmask IRQ now */
2188 entry1.dest = hard_smp_processor_id();
2189 entry1.delivery_mode = dest_ExtINT;
2190 entry1.polarity = entry0.polarity;
2194 ioapic_write_entry(apic, pin, entry1);
2196 save_control = CMOS_READ(RTC_CONTROL);
2197 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2198 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2200 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2205 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2209 CMOS_WRITE(save_control, RTC_CONTROL);
2210 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2211 clear_IO_APIC_pin(apic, pin);
2213 ioapic_write_entry(apic, pin, entry0);
2217 * This code may look a bit paranoid, but it's supposed to cooperate with
2218 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2219 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2220 * fanatically on his truly buggy board.
2222 * FIXME: really need to revamp this for modern platforms only.
2224 static inline void __init check_timer(void)
2226 struct irq_cfg *cfg = irq_cfg(0);
2227 int apic1, pin1, apic2, pin2;
2228 unsigned long flags;
2231 local_irq_save(flags);
2234 * get/set the timer IRQ vector:
2236 disable_8259A_irq(0);
2237 assign_irq_vector(0, TARGET_CPUS);
2240 * As IRQ0 is to be enabled in the 8259A, the virtual
2241 * wire has to be disabled in the local APIC.
2243 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2246 pin1 = find_isa_irq_pin(0, mp_INT);
2247 apic1 = find_isa_irq_apic(0, mp_INT);
2248 pin2 = ioapic_i8259.pin;
2249 apic2 = ioapic_i8259.apic;
2251 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2252 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2253 cfg->vector, apic1, pin1, apic2, pin2);
2256 * Some BIOS writers are clueless and report the ExtINTA
2257 * I/O APIC input from the cascaded 8259A as the timer
2258 * interrupt input. So just in case, if only one pin
2259 * was found above, try it both directly and through the
2263 if (intr_remapping_enabled)
2264 panic("BIOS bug: timer not connected to IO-APIC");
2268 } else if (pin2 == -1) {
2275 * Ok, does IRQ0 through the IOAPIC work?
2278 add_pin_to_irq(0, apic1, pin1);
2279 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2281 unmask_IO_APIC_irq(0);
2282 if (!no_timer_check && timer_irq_works()) {
2283 if (nmi_watchdog == NMI_IO_APIC) {
2285 enable_8259A_irq(0);
2287 if (disable_timer_pin_1 > 0)
2288 clear_IO_APIC_pin(0, pin1);
2291 if (intr_remapping_enabled)
2292 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2293 clear_IO_APIC_pin(apic1, pin1);
2295 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2296 "8254 timer not connected to IO-APIC\n");
2298 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2299 "(IRQ0) through the 8259A ...\n");
2300 apic_printk(APIC_QUIET, KERN_INFO
2301 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2303 * legacy devices should be connected to IO APIC #0
2305 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
2306 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2307 unmask_IO_APIC_irq(0);
2308 enable_8259A_irq(0);
2309 if (timer_irq_works()) {
2310 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2311 timer_through_8259 = 1;
2312 if (nmi_watchdog == NMI_IO_APIC) {
2313 disable_8259A_irq(0);
2315 enable_8259A_irq(0);
2320 * Cleanup, just in case ...
2322 disable_8259A_irq(0);
2323 clear_IO_APIC_pin(apic2, pin2);
2324 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2327 if (nmi_watchdog == NMI_IO_APIC) {
2328 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2329 "through the IO-APIC - disabling NMI Watchdog!\n");
2330 nmi_watchdog = NMI_NONE;
2333 apic_printk(APIC_QUIET, KERN_INFO
2334 "...trying to set up timer as Virtual Wire IRQ...\n");
2336 lapic_register_intr(0);
2337 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2338 enable_8259A_irq(0);
2340 if (timer_irq_works()) {
2341 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2344 disable_8259A_irq(0);
2345 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2346 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2348 apic_printk(APIC_QUIET, KERN_INFO
2349 "...trying to set up timer as ExtINT IRQ...\n");
2353 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2355 unlock_ExtINT_logic();
2357 if (timer_irq_works()) {
2358 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2361 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2362 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2363 "report. Then try booting with the 'noapic' option.\n");
2365 local_irq_restore(flags);
2368 static int __init notimercheck(char *s)
2373 __setup("no_timer_check", notimercheck);
2376 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2377 * to devices. However there may be an I/O APIC pin available for
2378 * this interrupt regardless. The pin may be left unconnected, but
2379 * typically it will be reused as an ExtINT cascade interrupt for
2380 * the master 8259A. In the MPS case such a pin will normally be
2381 * reported as an ExtINT interrupt in the MP table. With ACPI
2382 * there is no provision for ExtINT interrupts, and in the absence
2383 * of an override it would be treated as an ordinary ISA I/O APIC
2384 * interrupt, that is edge-triggered and unmasked by default. We
2385 * used to do this, but it caused problems on some systems because
2386 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2387 * the same ExtINT cascade interrupt to drive the local APIC of the
2388 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2389 * the I/O APIC in all cases now. No actual device should request
2390 * it anyway. --macro
2392 #define PIC_IRQS (1<<2)
2394 void __init setup_IO_APIC(void)
2398 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2401 io_apic_irqs = ~PIC_IRQS;
2403 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2406 setup_IO_APIC_irqs();
2407 init_IO_APIC_traps();
2411 struct sysfs_ioapic_data {
2412 struct sys_device dev;
2413 struct IO_APIC_route_entry entry[0];
2415 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
2417 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
2419 struct IO_APIC_route_entry *entry;
2420 struct sysfs_ioapic_data *data;
2423 data = container_of(dev, struct sysfs_ioapic_data, dev);
2424 entry = data->entry;
2425 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2426 *entry = ioapic_read_entry(dev->id, i);
2431 static int ioapic_resume(struct sys_device *dev)
2433 struct IO_APIC_route_entry *entry;
2434 struct sysfs_ioapic_data *data;
2435 unsigned long flags;
2436 union IO_APIC_reg_00 reg_00;
2439 data = container_of(dev, struct sysfs_ioapic_data, dev);
2440 entry = data->entry;
2442 spin_lock_irqsave(&ioapic_lock, flags);
2443 reg_00.raw = io_apic_read(dev->id, 0);
2444 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
2445 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
2446 io_apic_write(dev->id, 0, reg_00.raw);
2448 spin_unlock_irqrestore(&ioapic_lock, flags);
2449 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2450 ioapic_write_entry(dev->id, i, entry[i]);
2455 static struct sysdev_class ioapic_sysdev_class = {
2457 .suspend = ioapic_suspend,
2458 .resume = ioapic_resume,
2461 static int __init ioapic_init_sysfs(void)
2463 struct sys_device * dev;
2466 error = sysdev_class_register(&ioapic_sysdev_class);
2470 for (i = 0; i < nr_ioapics; i++ ) {
2471 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
2472 * sizeof(struct IO_APIC_route_entry);
2473 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
2474 if (!mp_ioapic_data[i]) {
2475 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2478 dev = &mp_ioapic_data[i]->dev;
2480 dev->cls = &ioapic_sysdev_class;
2481 error = sysdev_register(dev);
2483 kfree(mp_ioapic_data[i]);
2484 mp_ioapic_data[i] = NULL;
2485 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2493 device_initcall(ioapic_init_sysfs);
2496 * Dynamic irq allocate and deallocation
2498 int create_irq(void)
2500 /* Allocate an unused irq */
2503 unsigned long flags;
2504 struct irq_cfg *cfg_new;
2507 spin_lock_irqsave(&vector_lock, flags);
2508 for (new = (nr_irqs - 1); new >= 0; new--) {
2509 if (platform_legacy_irq(new))
2511 cfg_new = irq_cfg(new);
2512 if (cfg_new && cfg_new->vector != 0)
2514 /* check if need to create one */
2516 cfg_new = irq_cfg_alloc(new);
2517 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
2521 spin_unlock_irqrestore(&vector_lock, flags);
2524 dynamic_irq_init(irq);
2529 void destroy_irq(unsigned int irq)
2531 unsigned long flags;
2533 dynamic_irq_cleanup(irq);
2535 #ifdef CONFIG_INTR_REMAP
2538 spin_lock_irqsave(&vector_lock, flags);
2539 __clear_irq_vector(irq);
2540 spin_unlock_irqrestore(&vector_lock, flags);
2544 * MSI message composition
2546 #ifdef CONFIG_PCI_MSI
2547 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
2549 struct irq_cfg *cfg;
2555 err = assign_irq_vector(irq, tmp);
2560 cpus_and(tmp, cfg->domain, tmp);
2561 dest = cpu_mask_to_apicid(tmp);
2563 #ifdef CONFIG_INTR_REMAP
2564 if (irq_remapped(irq)) {
2569 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
2570 BUG_ON(ir_index == -1);
2572 memset (&irte, 0, sizeof(irte));
2575 irte.dst_mode = INT_DEST_MODE;
2576 irte.trigger_mode = 0; /* edge */
2577 irte.dlvry_mode = INT_DELIVERY_MODE;
2578 irte.vector = cfg->vector;
2579 irte.dest_id = IRTE_DEST(dest);
2581 modify_irte(irq, &irte);
2583 msg->address_hi = MSI_ADDR_BASE_HI;
2584 msg->data = sub_handle;
2585 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
2587 MSI_ADDR_IR_INDEX1(ir_index) |
2588 MSI_ADDR_IR_INDEX2(ir_index);
2592 msg->address_hi = MSI_ADDR_BASE_HI;
2595 ((INT_DEST_MODE == 0) ?
2596 MSI_ADDR_DEST_MODE_PHYSICAL:
2597 MSI_ADDR_DEST_MODE_LOGICAL) |
2598 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2599 MSI_ADDR_REDIRECTION_CPU:
2600 MSI_ADDR_REDIRECTION_LOWPRI) |
2601 MSI_ADDR_DEST_ID(dest);
2604 MSI_DATA_TRIGGER_EDGE |
2605 MSI_DATA_LEVEL_ASSERT |
2606 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2607 MSI_DATA_DELIVERY_FIXED:
2608 MSI_DATA_DELIVERY_LOWPRI) |
2609 MSI_DATA_VECTOR(cfg->vector);
2615 static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2617 struct irq_cfg *cfg;
2621 struct irq_desc *desc;
2623 cpus_and(tmp, mask, cpu_online_map);
2624 if (cpus_empty(tmp))
2627 if (assign_irq_vector(irq, mask))
2631 cpus_and(tmp, cfg->domain, mask);
2632 dest = cpu_mask_to_apicid(tmp);
2634 read_msi_msg(irq, &msg);
2636 msg.data &= ~MSI_DATA_VECTOR_MASK;
2637 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2638 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2639 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2641 write_msi_msg(irq, &msg);
2642 desc = irq_to_desc(irq);
2643 desc->affinity = mask;
2646 #ifdef CONFIG_INTR_REMAP
2648 * Migrate the MSI irq to another cpumask. This migration is
2649 * done in the process context using interrupt-remapping hardware.
2651 static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2653 struct irq_cfg *cfg;
2655 cpumask_t tmp, cleanup_mask;
2657 struct irq_desc *desc;
2659 cpus_and(tmp, mask, cpu_online_map);
2660 if (cpus_empty(tmp))
2663 if (get_irte(irq, &irte))
2666 if (assign_irq_vector(irq, mask))
2670 cpus_and(tmp, cfg->domain, mask);
2671 dest = cpu_mask_to_apicid(tmp);
2673 irte.vector = cfg->vector;
2674 irte.dest_id = IRTE_DEST(dest);
2677 * atomically update the IRTE with the new destination and vector.
2679 modify_irte(irq, &irte);
2682 * After this point, all the interrupts will start arriving
2683 * at the new destination. So, time to cleanup the previous
2684 * vector allocation.
2686 if (cfg->move_in_progress) {
2687 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2688 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2689 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2690 cfg->move_in_progress = 0;
2693 desc = irq_to_desc(irq);
2694 desc->affinity = mask;
2697 #endif /* CONFIG_SMP */
2700 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
2701 * which implement the MSI or MSI-X Capability Structure.
2703 static struct irq_chip msi_chip = {
2705 .unmask = unmask_msi_irq,
2706 .mask = mask_msi_irq,
2707 .ack = ack_apic_edge,
2709 .set_affinity = set_msi_irq_affinity,
2711 .retrigger = ioapic_retrigger_irq,
2714 #ifdef CONFIG_INTR_REMAP
2715 static struct irq_chip msi_ir_chip = {
2716 .name = "IR-PCI-MSI",
2717 .unmask = unmask_msi_irq,
2718 .mask = mask_msi_irq,
2719 .ack = ack_x2apic_edge,
2721 .set_affinity = ir_set_msi_irq_affinity,
2723 .retrigger = ioapic_retrigger_irq,
2727 * Map the PCI dev to the corresponding remapping hardware unit
2728 * and allocate 'nvec' consecutive interrupt-remapping table entries
2731 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
2733 struct intel_iommu *iommu;
2736 iommu = map_dev_to_ir(dev);
2739 "Unable to map PCI %s to iommu\n", pci_name(dev));
2743 index = alloc_irte(iommu, irq, nvec);
2746 "Unable to allocate %d IRTE for PCI %s\n", nvec,
2754 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
2759 ret = msi_compose_msg(dev, irq, &msg);
2763 set_irq_msi(irq, desc);
2764 write_msi_msg(irq, &msg);
2766 #ifdef CONFIG_INTR_REMAP
2767 if (irq_remapped(irq)) {
2768 struct irq_desc *desc = irq_to_desc(irq);
2770 * irq migration in process context
2772 desc->status |= IRQ_MOVE_PCNTXT;
2773 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
2776 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
2781 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
2789 #ifdef CONFIG_INTR_REMAP
2790 if (!intr_remapping_enabled)
2793 ret = msi_alloc_irte(dev, irq, 1);
2798 ret = setup_msi_irq(dev, desc, irq);
2805 #ifdef CONFIG_INTR_REMAP
2812 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
2814 int irq, ret, sub_handle;
2815 struct msi_desc *desc;
2816 #ifdef CONFIG_INTR_REMAP
2817 struct intel_iommu *iommu = 0;
2822 list_for_each_entry(desc, &dev->msi_list, list) {
2826 #ifdef CONFIG_INTR_REMAP
2827 if (!intr_remapping_enabled)
2832 * allocate the consecutive block of IRTE's
2835 index = msi_alloc_irte(dev, irq, nvec);
2841 iommu = map_dev_to_ir(dev);
2847 * setup the mapping between the irq and the IRTE
2848 * base index, the sub_handle pointing to the
2849 * appropriate interrupt remap table entry.
2851 set_irte_irq(irq, iommu, index, sub_handle);
2855 ret = setup_msi_irq(dev, desc, irq);
2867 void arch_teardown_msi_irq(unsigned int irq)
2874 static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2876 struct irq_cfg *cfg;
2880 struct irq_desc *desc;
2882 cpus_and(tmp, mask, cpu_online_map);
2883 if (cpus_empty(tmp))
2886 if (assign_irq_vector(irq, mask))
2890 cpus_and(tmp, cfg->domain, mask);
2891 dest = cpu_mask_to_apicid(tmp);
2893 dmar_msi_read(irq, &msg);
2895 msg.data &= ~MSI_DATA_VECTOR_MASK;
2896 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2897 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2898 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2900 dmar_msi_write(irq, &msg);
2901 desc = irq_to_desc(irq);
2902 desc->affinity = mask;
2904 #endif /* CONFIG_SMP */
2906 struct irq_chip dmar_msi_type = {
2908 .unmask = dmar_msi_unmask,
2909 .mask = dmar_msi_mask,
2910 .ack = ack_apic_edge,
2912 .set_affinity = dmar_msi_set_affinity,
2914 .retrigger = ioapic_retrigger_irq,
2917 int arch_setup_dmar_msi(unsigned int irq)
2922 ret = msi_compose_msg(NULL, irq, &msg);
2925 dmar_msi_write(irq, &msg);
2926 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
2932 #endif /* CONFIG_PCI_MSI */
2934 * Hypertransport interrupt support
2936 #ifdef CONFIG_HT_IRQ
2940 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
2942 struct ht_irq_msg msg;
2943 fetch_ht_irq_msg(irq, &msg);
2945 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
2946 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
2948 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
2949 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
2951 write_ht_irq_msg(irq, &msg);
2954 static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
2956 struct irq_cfg *cfg;
2959 struct irq_desc *desc;
2961 cpus_and(tmp, mask, cpu_online_map);
2962 if (cpus_empty(tmp))
2965 if (assign_irq_vector(irq, mask))
2969 cpus_and(tmp, cfg->domain, mask);
2970 dest = cpu_mask_to_apicid(tmp);
2972 target_ht_irq(irq, dest, cfg->vector);
2973 desc = irq_to_desc(irq);
2974 desc->affinity = mask;
2978 static struct irq_chip ht_irq_chip = {
2980 .mask = mask_ht_irq,
2981 .unmask = unmask_ht_irq,
2982 .ack = ack_apic_edge,
2984 .set_affinity = set_ht_irq_affinity,
2986 .retrigger = ioapic_retrigger_irq,
2989 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2991 struct irq_cfg *cfg;
2996 err = assign_irq_vector(irq, tmp);
2998 struct ht_irq_msg msg;
3002 cpus_and(tmp, cfg->domain, tmp);
3003 dest = cpu_mask_to_apicid(tmp);
3005 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3009 HT_IRQ_LOW_DEST_ID(dest) |
3010 HT_IRQ_LOW_VECTOR(cfg->vector) |
3011 ((INT_DEST_MODE == 0) ?
3012 HT_IRQ_LOW_DM_PHYSICAL :
3013 HT_IRQ_LOW_DM_LOGICAL) |
3014 HT_IRQ_LOW_RQEOI_EDGE |
3015 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3016 HT_IRQ_LOW_MT_FIXED :
3017 HT_IRQ_LOW_MT_ARBITRATED) |
3018 HT_IRQ_LOW_IRQ_MASKED;
3020 write_ht_irq_msg(irq, &msg);
3022 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3023 handle_edge_irq, "edge");
3027 #endif /* CONFIG_HT_IRQ */
3029 /* --------------------------------------------------------------------------
3030 ACPI-based IOAPIC Configuration
3031 -------------------------------------------------------------------------- */
3035 #define IO_APIC_MAX_ID 0xFE
3037 int __init io_apic_get_redir_entries (int ioapic)
3039 union IO_APIC_reg_01 reg_01;
3040 unsigned long flags;
3042 spin_lock_irqsave(&ioapic_lock, flags);
3043 reg_01.raw = io_apic_read(ioapic, 1);
3044 spin_unlock_irqrestore(&ioapic_lock, flags);
3046 return reg_01.bits.entries;
3050 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
3052 if (!IO_APIC_IRQ(irq)) {
3053 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3059 * IRQs < 16 are already in the irq_2_pin[] map
3062 add_pin_to_irq(irq, ioapic, pin);
3064 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
3070 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3074 if (skip_ioapic_setup)
3077 for (i = 0; i < mp_irq_entries; i++)
3078 if (mp_irqs[i].mp_irqtype == mp_INT &&
3079 mp_irqs[i].mp_srcbusirq == bus_irq)
3081 if (i >= mp_irq_entries)
3084 *trigger = irq_trigger(i);
3085 *polarity = irq_polarity(i);
3089 #endif /* CONFIG_ACPI */
3092 * This function currently is only a helper for the i386 smp boot process where
3093 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3094 * so mask in all cases should simply be TARGET_CPUS
3097 void __init setup_ioapic_dest(void)
3099 int pin, ioapic, irq, irq_entry;
3100 struct irq_cfg *cfg;
3102 if (skip_ioapic_setup == 1)
3105 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
3106 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
3107 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3108 if (irq_entry == -1)
3110 irq = pin_2_irq(irq_entry, ioapic, pin);
3112 /* setup_IO_APIC_irqs could fail to get vector for some device
3113 * when you have too many devices, because at that time only boot
3118 setup_IO_APIC_irq(ioapic, pin, irq,
3119 irq_trigger(irq_entry),
3120 irq_polarity(irq_entry));
3121 #ifdef CONFIG_INTR_REMAP
3122 else if (intr_remapping_enabled)
3123 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
3126 set_ioapic_affinity_irq(irq, TARGET_CPUS);
3133 #define IOAPIC_RESOURCE_NAME_SIZE 11
3135 static struct resource *ioapic_resources;
3137 static struct resource * __init ioapic_setup_resources(void)
3140 struct resource *res;
3144 if (nr_ioapics <= 0)
3147 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3150 mem = alloc_bootmem(n);
3154 mem += sizeof(struct resource) * nr_ioapics;
3156 for (i = 0; i < nr_ioapics; i++) {
3158 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3159 sprintf(mem, "IOAPIC %u", i);
3160 mem += IOAPIC_RESOURCE_NAME_SIZE;
3164 ioapic_resources = res;
3169 void __init ioapic_init_mappings(void)
3171 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3172 struct resource *ioapic_res;
3175 ioapic_res = ioapic_setup_resources();
3176 for (i = 0; i < nr_ioapics; i++) {
3177 if (smp_found_config) {
3178 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3180 ioapic_phys = (unsigned long)
3181 alloc_bootmem_pages(PAGE_SIZE);
3182 ioapic_phys = __pa(ioapic_phys);
3184 set_fixmap_nocache(idx, ioapic_phys);
3185 apic_printk(APIC_VERBOSE,
3186 "mapped IOAPIC to %016lx (%016lx)\n",
3187 __fix_to_virt(idx), ioapic_phys);
3190 if (ioapic_res != NULL) {
3191 ioapic_res->start = ioapic_phys;
3192 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
3198 static int __init ioapic_insert_resources(void)
3201 struct resource *r = ioapic_resources;
3205 "IO APIC resources could be not be allocated.\n");
3209 for (i = 0; i < nr_ioapics; i++) {
3210 insert_resource(&iomem_resource, r);
3217 /* Insert the IO APIC resources after PCI initialization has occured to handle
3218 * IO APICS that are mapped in on a BAR in PCI space. */
3219 late_initcall(ioapic_insert_resources);