2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
40 #include <acpi/acpi_bus.h>
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
44 #include <linux/hpet.h>
50 #include <asm/proto.h>
53 #include <asm/timer.h>
54 #include <asm/i8259.h>
56 #include <asm/msidef.h>
57 #include <asm/hypertransport.h>
58 #include <asm/setup.h>
59 #include <asm/irq_remapping.h>
61 #include <asm/uv/uv_hub.h>
62 #include <asm/uv/uv_irq.h>
65 #include <mach_apic.h>
66 #include <mach_apicdef.h>
68 #define __apicdebuginit(type) static type __init
71 * Is the SiS APIC rmw bug present ?
72 * -1 = don't know, 0 = no, 1 = yes
74 int sis_apic_bug = -1;
76 static DEFINE_SPINLOCK(ioapic_lock);
77 static DEFINE_SPINLOCK(vector_lock);
80 * # of IRQ routing registers
82 int nr_ioapic_registers[MAX_IO_APICS];
84 /* I/O APIC entries */
85 struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
88 /* MP IRQ source entries */
89 struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
91 /* # of MP IRQ source entries */
94 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
95 int mp_bus_id_to_type[MAX_MP_BUSSES];
98 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
100 int skip_ioapic_setup;
102 static int __init parse_noapic(char *str)
104 /* disable IO-APIC */
105 disable_ioapic_setup();
108 early_param("noapic", parse_noapic);
113 * This is performance-critical, we want to do it O(1)
115 * the indexing order of this array favors 1:1 mappings
116 * between pins and IRQs.
119 struct irq_pin_list {
121 struct irq_pin_list *next;
124 static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
126 struct irq_pin_list *pin;
129 node = cpu_to_node(cpu);
131 pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
132 printk(KERN_DEBUG " alloc irq_2_pin on cpu %d node %d\n", cpu, node);
138 struct irq_pin_list *irq_2_pin;
140 cpumask_t old_domain;
141 unsigned move_cleanup_count;
143 u8 move_in_progress : 1;
146 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
147 #ifdef CONFIG_SPARSE_IRQ
148 static struct irq_cfg irq_cfgx[] = {
150 static struct irq_cfg irq_cfgx[NR_IRQS] = {
152 [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
153 [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
154 [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
155 [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
156 [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
157 [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
158 [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
159 [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
160 [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
161 [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
162 [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
163 [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
164 [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
165 [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
166 [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
167 [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
170 void __init arch_early_irq_init(void)
173 struct irq_desc *desc;
178 count = ARRAY_SIZE(irq_cfgx);
180 for (i = 0; i < count; i++) {
181 desc = irq_to_desc(i);
182 desc->chip_data = &cfg[i];
186 #ifdef CONFIG_SPARSE_IRQ
187 static struct irq_cfg *irq_cfg(unsigned int irq)
189 struct irq_cfg *cfg = NULL;
190 struct irq_desc *desc;
192 desc = irq_to_desc(irq);
194 cfg = desc->chip_data;
199 static struct irq_cfg *get_one_free_irq_cfg(int cpu)
204 node = cpu_to_node(cpu);
206 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
207 printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node);
212 void arch_init_chip_data(struct irq_desc *desc, int cpu)
216 cfg = desc->chip_data;
218 desc->chip_data = get_one_free_irq_cfg(cpu);
219 if (!desc->chip_data) {
220 printk(KERN_ERR "can not alloc irq_cfg\n");
227 static struct irq_cfg *irq_cfg(unsigned int irq)
229 return irq < nr_irqs ? irq_cfgx + irq : NULL;
236 unsigned int unused[3];
240 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
242 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
243 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
246 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
248 struct io_apic __iomem *io_apic = io_apic_base(apic);
249 writel(reg, &io_apic->index);
250 return readl(&io_apic->data);
253 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
255 struct io_apic __iomem *io_apic = io_apic_base(apic);
256 writel(reg, &io_apic->index);
257 writel(value, &io_apic->data);
261 * Re-write a value: to be used for read-modify-write
262 * cycles where the read already set up the index register.
264 * Older SiS APIC requires we rewrite the index register
266 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
268 struct io_apic __iomem *io_apic = io_apic_base(apic);
271 writel(reg, &io_apic->index);
272 writel(value, &io_apic->data);
275 static bool io_apic_level_ack_pending(unsigned int irq)
277 struct irq_pin_list *entry;
279 struct irq_cfg *cfg = irq_cfg(irq);
281 spin_lock_irqsave(&ioapic_lock, flags);
282 entry = cfg->irq_2_pin;
290 reg = io_apic_read(entry->apic, 0x10 + pin*2);
291 /* Is the remote IRR bit set? */
292 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
293 spin_unlock_irqrestore(&ioapic_lock, flags);
300 spin_unlock_irqrestore(&ioapic_lock, flags);
306 struct { u32 w1, w2; };
307 struct IO_APIC_route_entry entry;
310 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
312 union entry_union eu;
314 spin_lock_irqsave(&ioapic_lock, flags);
315 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
316 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
317 spin_unlock_irqrestore(&ioapic_lock, flags);
322 * When we write a new IO APIC routing entry, we need to write the high
323 * word first! If the mask bit in the low word is clear, we will enable
324 * the interrupt, and we need to make sure the entry is fully populated
325 * before that happens.
328 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
330 union entry_union eu;
332 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
333 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
336 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
339 spin_lock_irqsave(&ioapic_lock, flags);
340 __ioapic_write_entry(apic, pin, e);
341 spin_unlock_irqrestore(&ioapic_lock, flags);
345 * When we mask an IO APIC routing entry, we need to write the low
346 * word first, in order to set the mask bit before we change the
349 static void ioapic_mask_entry(int apic, int pin)
352 union entry_union eu = { .entry.mask = 1 };
354 spin_lock_irqsave(&ioapic_lock, flags);
355 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
356 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
357 spin_unlock_irqrestore(&ioapic_lock, flags);
361 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
365 struct irq_pin_list *entry;
368 entry = cfg->irq_2_pin;
377 #ifdef CONFIG_INTR_REMAP
379 * With interrupt-remapping, destination information comes
380 * from interrupt-remapping table entry.
382 if (!irq_remapped(irq))
383 io_apic_write(apic, 0x11 + pin*2, dest);
385 io_apic_write(apic, 0x11 + pin*2, dest);
387 reg = io_apic_read(apic, 0x10 + pin*2);
388 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
390 io_apic_modify(apic, 0x10 + pin*2, reg);
397 static int assign_irq_vector(int irq, cpumask_t mask);
399 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
405 struct irq_desc *desc;
407 cpus_and(tmp, mask, cpu_online_map);
412 if (assign_irq_vector(irq, mask))
415 cpus_and(tmp, cfg->domain, mask);
416 dest = cpu_mask_to_apicid(tmp);
418 * Only the high 8 bits are valid.
420 dest = SET_APIC_LOGICAL_ID(dest);
422 desc = irq_to_desc(irq);
423 spin_lock_irqsave(&ioapic_lock, flags);
424 __target_IO_APIC_irq(irq, dest, cfg->vector);
425 desc->affinity = mask;
426 spin_unlock_irqrestore(&ioapic_lock, flags);
428 #endif /* CONFIG_SMP */
431 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
432 * shared ISA-space IRQs, so we have to support them. We are super
433 * fast in the common case, and fast for shared ISA-space IRQs.
435 static void add_pin_to_irq_cpu(unsigned int irq, int cpu, int apic, int pin)
437 struct irq_pin_list *entry;
438 struct irq_cfg *cfg = irq_cfg(irq);
440 entry = cfg->irq_2_pin;
442 entry = get_one_free_irq_2_pin(cpu);
444 printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n",
448 cfg->irq_2_pin = entry;
454 while (entry->next) {
455 /* not again, please */
456 if (entry->apic == apic && entry->pin == pin)
462 entry->next = get_one_free_irq_2_pin(cpu);
469 * Reroute an IRQ to a different pin.
471 static void __init replace_pin_at_irq(unsigned int irq, int cpu,
472 int oldapic, int oldpin,
473 int newapic, int newpin)
475 struct irq_cfg *cfg = irq_cfg(irq);
476 struct irq_pin_list *entry = cfg->irq_2_pin;
480 if (entry->apic == oldapic && entry->pin == oldpin) {
481 entry->apic = newapic;
484 /* every one is different, right? */
490 /* why? call replace before add? */
492 add_pin_to_irq_cpu(irq, cpu, newapic, newpin);
495 static inline void io_apic_modify_irq(unsigned int irq,
496 int mask_and, int mask_or,
497 void (*final)(struct irq_pin_list *entry))
501 struct irq_pin_list *entry;
504 for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
507 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
510 io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
516 static void __unmask_IO_APIC_irq(unsigned int irq)
518 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 0, NULL);
522 void io_apic_sync(struct irq_pin_list *entry)
525 * Synchronize the IO-APIC and the CPU by doing
526 * a dummy read from the IO-APIC
528 struct io_apic __iomem *io_apic;
529 io_apic = io_apic_base(entry->apic);
530 readl(&io_apic->data);
533 static void __mask_IO_APIC_irq(unsigned int irq)
535 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
537 #else /* CONFIG_X86_32 */
538 static void __mask_IO_APIC_irq(unsigned int irq)
540 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, NULL);
543 static void __mask_and_edge_IO_APIC_irq(unsigned int irq)
545 io_apic_modify_irq(irq, ~IO_APIC_REDIR_LEVEL_TRIGGER,
546 IO_APIC_REDIR_MASKED, NULL);
549 static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
551 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED,
552 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
554 #endif /* CONFIG_X86_32 */
556 static void mask_IO_APIC_irq (unsigned int irq)
560 spin_lock_irqsave(&ioapic_lock, flags);
561 __mask_IO_APIC_irq(irq);
562 spin_unlock_irqrestore(&ioapic_lock, flags);
565 static void unmask_IO_APIC_irq (unsigned int irq)
569 spin_lock_irqsave(&ioapic_lock, flags);
570 __unmask_IO_APIC_irq(irq);
571 spin_unlock_irqrestore(&ioapic_lock, flags);
574 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
576 struct IO_APIC_route_entry entry;
578 /* Check delivery_mode to be sure we're not clearing an SMI pin */
579 entry = ioapic_read_entry(apic, pin);
580 if (entry.delivery_mode == dest_SMI)
583 * Disable it in the IO-APIC irq-routing table:
585 ioapic_mask_entry(apic, pin);
588 static void clear_IO_APIC (void)
592 for (apic = 0; apic < nr_ioapics; apic++)
593 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
594 clear_IO_APIC_pin(apic, pin);
597 #if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
598 void send_IPI_self(int vector)
605 apic_wait_icr_idle();
606 cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
608 * Send the IPI. The write to APIC_ICR fires this off.
610 apic_write(APIC_ICR, cfg);
612 #endif /* !CONFIG_SMP && CONFIG_X86_32*/
616 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
617 * specific CPU-side IRQs.
621 static int pirq_entries [MAX_PIRQS];
622 static int pirqs_enabled;
624 static int __init ioapic_pirq_setup(char *str)
627 int ints[MAX_PIRQS+1];
629 get_options(str, ARRAY_SIZE(ints), ints);
631 for (i = 0; i < MAX_PIRQS; i++)
632 pirq_entries[i] = -1;
635 apic_printk(APIC_VERBOSE, KERN_INFO
636 "PIRQ redirection, working around broken MP-BIOS.\n");
638 if (ints[0] < MAX_PIRQS)
641 for (i = 0; i < max; i++) {
642 apic_printk(APIC_VERBOSE, KERN_DEBUG
643 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
645 * PIRQs are mapped upside down, usually.
647 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
652 __setup("pirq=", ioapic_pirq_setup);
653 #endif /* CONFIG_X86_32 */
655 #ifdef CONFIG_INTR_REMAP
656 /* I/O APIC RTE contents at the OS boot up */
657 static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
660 * Saves and masks all the unmasked IO-APIC RTE's
662 int save_mask_IO_APIC_setup(void)
664 union IO_APIC_reg_01 reg_01;
669 * The number of IO-APIC IRQ registers (== #pins):
671 for (apic = 0; apic < nr_ioapics; apic++) {
672 spin_lock_irqsave(&ioapic_lock, flags);
673 reg_01.raw = io_apic_read(apic, 1);
674 spin_unlock_irqrestore(&ioapic_lock, flags);
675 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
678 for (apic = 0; apic < nr_ioapics; apic++) {
679 early_ioapic_entries[apic] =
680 kzalloc(sizeof(struct IO_APIC_route_entry) *
681 nr_ioapic_registers[apic], GFP_KERNEL);
682 if (!early_ioapic_entries[apic])
686 for (apic = 0; apic < nr_ioapics; apic++)
687 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
688 struct IO_APIC_route_entry entry;
690 entry = early_ioapic_entries[apic][pin] =
691 ioapic_read_entry(apic, pin);
694 ioapic_write_entry(apic, pin, entry);
702 kfree(early_ioapic_entries[apic--]);
703 memset(early_ioapic_entries, 0,
704 ARRAY_SIZE(early_ioapic_entries));
709 void restore_IO_APIC_setup(void)
713 for (apic = 0; apic < nr_ioapics; apic++) {
714 if (!early_ioapic_entries[apic])
716 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
717 ioapic_write_entry(apic, pin,
718 early_ioapic_entries[apic][pin]);
719 kfree(early_ioapic_entries[apic]);
720 early_ioapic_entries[apic] = NULL;
724 void reinit_intr_remapped_IO_APIC(int intr_remapping)
727 * for now plain restore of previous settings.
728 * TBD: In the case of OS enabling interrupt-remapping,
729 * IO-APIC RTE's need to be setup to point to interrupt-remapping
730 * table entries. for now, do a plain restore, and wait for
731 * the setup_IO_APIC_irqs() to do proper initialization.
733 restore_IO_APIC_setup();
738 * Find the IRQ entry number of a certain pin.
740 static int find_irq_entry(int apic, int pin, int type)
744 for (i = 0; i < mp_irq_entries; i++)
745 if (mp_irqs[i].mp_irqtype == type &&
746 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
747 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
748 mp_irqs[i].mp_dstirq == pin)
755 * Find the pin to which IRQ[irq] (ISA) is connected
757 static int __init find_isa_irq_pin(int irq, int type)
761 for (i = 0; i < mp_irq_entries; i++) {
762 int lbus = mp_irqs[i].mp_srcbus;
764 if (test_bit(lbus, mp_bus_not_pci) &&
765 (mp_irqs[i].mp_irqtype == type) &&
766 (mp_irqs[i].mp_srcbusirq == irq))
768 return mp_irqs[i].mp_dstirq;
773 static int __init find_isa_irq_apic(int irq, int type)
777 for (i = 0; i < mp_irq_entries; i++) {
778 int lbus = mp_irqs[i].mp_srcbus;
780 if (test_bit(lbus, mp_bus_not_pci) &&
781 (mp_irqs[i].mp_irqtype == type) &&
782 (mp_irqs[i].mp_srcbusirq == irq))
785 if (i < mp_irq_entries) {
787 for(apic = 0; apic < nr_ioapics; apic++) {
788 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
797 * Find a specific PCI IRQ entry.
798 * Not an __init, possibly needed by modules
800 static int pin_2_irq(int idx, int apic, int pin);
802 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
804 int apic, i, best_guess = -1;
806 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
808 if (test_bit(bus, mp_bus_not_pci)) {
809 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
812 for (i = 0; i < mp_irq_entries; i++) {
813 int lbus = mp_irqs[i].mp_srcbus;
815 for (apic = 0; apic < nr_ioapics; apic++)
816 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
817 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
820 if (!test_bit(lbus, mp_bus_not_pci) &&
821 !mp_irqs[i].mp_irqtype &&
823 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
824 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
826 if (!(apic || IO_APIC_IRQ(irq)))
829 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
832 * Use the first all-but-pin matching entry as a
833 * best-guess fuzzy result for broken mptables.
842 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
844 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
846 * EISA Edge/Level control register, ELCR
848 static int EISA_ELCR(unsigned int irq)
850 if (irq < NR_IRQS_LEGACY) {
851 unsigned int port = 0x4d0 + (irq >> 3);
852 return (inb(port) >> (irq & 7)) & 1;
854 apic_printk(APIC_VERBOSE, KERN_INFO
855 "Broken MPtable reports ISA irq %d\n", irq);
861 /* ISA interrupts are always polarity zero edge triggered,
862 * when listed as conforming in the MP table. */
864 #define default_ISA_trigger(idx) (0)
865 #define default_ISA_polarity(idx) (0)
867 /* EISA interrupts are always polarity zero and can be edge or level
868 * trigger depending on the ELCR value. If an interrupt is listed as
869 * EISA conforming in the MP table, that means its trigger type must
870 * be read in from the ELCR */
872 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
873 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
875 /* PCI interrupts are always polarity one level triggered,
876 * when listed as conforming in the MP table. */
878 #define default_PCI_trigger(idx) (1)
879 #define default_PCI_polarity(idx) (1)
881 /* MCA interrupts are always polarity zero level triggered,
882 * when listed as conforming in the MP table. */
884 #define default_MCA_trigger(idx) (1)
885 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
887 static int MPBIOS_polarity(int idx)
889 int bus = mp_irqs[idx].mp_srcbus;
893 * Determine IRQ line polarity (high active or low active):
895 switch (mp_irqs[idx].mp_irqflag & 3)
897 case 0: /* conforms, ie. bus-type dependent polarity */
898 if (test_bit(bus, mp_bus_not_pci))
899 polarity = default_ISA_polarity(idx);
901 polarity = default_PCI_polarity(idx);
903 case 1: /* high active */
908 case 2: /* reserved */
910 printk(KERN_WARNING "broken BIOS!!\n");
914 case 3: /* low active */
919 default: /* invalid */
921 printk(KERN_WARNING "broken BIOS!!\n");
929 static int MPBIOS_trigger(int idx)
931 int bus = mp_irqs[idx].mp_srcbus;
935 * Determine IRQ trigger mode (edge or level sensitive):
937 switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
939 case 0: /* conforms, ie. bus-type dependent */
940 if (test_bit(bus, mp_bus_not_pci))
941 trigger = default_ISA_trigger(idx);
943 trigger = default_PCI_trigger(idx);
944 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
945 switch (mp_bus_id_to_type[bus]) {
946 case MP_BUS_ISA: /* ISA pin */
948 /* set before the switch */
951 case MP_BUS_EISA: /* EISA pin */
953 trigger = default_EISA_trigger(idx);
956 case MP_BUS_PCI: /* PCI pin */
958 /* set before the switch */
961 case MP_BUS_MCA: /* MCA pin */
963 trigger = default_MCA_trigger(idx);
968 printk(KERN_WARNING "broken BIOS!!\n");
980 case 2: /* reserved */
982 printk(KERN_WARNING "broken BIOS!!\n");
991 default: /* invalid */
993 printk(KERN_WARNING "broken BIOS!!\n");
1001 static inline int irq_polarity(int idx)
1003 return MPBIOS_polarity(idx);
1006 static inline int irq_trigger(int idx)
1008 return MPBIOS_trigger(idx);
1011 int (*ioapic_renumber_irq)(int ioapic, int irq);
1012 static int pin_2_irq(int idx, int apic, int pin)
1015 int bus = mp_irqs[idx].mp_srcbus;
1018 * Debugging check, we are in big trouble if this message pops up!
1020 if (mp_irqs[idx].mp_dstirq != pin)
1021 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1023 if (test_bit(bus, mp_bus_not_pci)) {
1024 irq = mp_irqs[idx].mp_srcbusirq;
1027 * PCI IRQs are mapped in order
1031 irq += nr_ioapic_registers[i++];
1034 * For MPS mode, so far only needed by ES7000 platform
1036 if (ioapic_renumber_irq)
1037 irq = ioapic_renumber_irq(apic, irq);
1040 #ifdef CONFIG_X86_32
1042 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1044 if ((pin >= 16) && (pin <= 23)) {
1045 if (pirq_entries[pin-16] != -1) {
1046 if (!pirq_entries[pin-16]) {
1047 apic_printk(APIC_VERBOSE, KERN_DEBUG
1048 "disabling PIRQ%d\n", pin-16);
1050 irq = pirq_entries[pin-16];
1051 apic_printk(APIC_VERBOSE, KERN_DEBUG
1052 "using PIRQ%d -> IRQ %d\n",
1062 void lock_vector_lock(void)
1064 /* Used to the online set of cpus does not change
1065 * during assign_irq_vector.
1067 spin_lock(&vector_lock);
1070 void unlock_vector_lock(void)
1072 spin_unlock(&vector_lock);
1075 static int __assign_irq_vector(int irq, cpumask_t mask)
1078 * NOTE! The local APIC isn't very good at handling
1079 * multiple interrupts at the same interrupt level.
1080 * As the interrupt level is determined by taking the
1081 * vector number and shifting that right by 4, we
1082 * want to spread these out a bit so that they don't
1083 * all fall in the same interrupt level.
1085 * Also, we've got to be careful not to trash gate
1086 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1088 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1089 unsigned int old_vector;
1091 struct irq_cfg *cfg;
1095 /* Only try and allocate irqs on cpus that are present */
1096 cpus_and(mask, mask, cpu_online_map);
1098 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1101 old_vector = cfg->vector;
1104 cpus_and(tmp, cfg->domain, mask);
1105 if (!cpus_empty(tmp))
1109 for_each_cpu_mask_nr(cpu, mask) {
1110 cpumask_t domain, new_mask;
1114 domain = vector_allocation_domain(cpu);
1115 cpus_and(new_mask, domain, cpu_online_map);
1117 vector = current_vector;
1118 offset = current_offset;
1121 if (vector >= first_system_vector) {
1122 /* If we run out of vectors on large boxen, must share them. */
1123 offset = (offset + 1) % 8;
1124 vector = FIRST_DEVICE_VECTOR + offset;
1126 if (unlikely(current_vector == vector))
1128 #ifdef CONFIG_X86_64
1129 if (vector == IA32_SYSCALL_VECTOR)
1132 if (vector == SYSCALL_VECTOR)
1135 for_each_cpu_mask_nr(new_cpu, new_mask)
1136 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1139 current_vector = vector;
1140 current_offset = offset;
1142 cfg->move_in_progress = 1;
1143 cfg->old_domain = cfg->domain;
1145 for_each_cpu_mask_nr(new_cpu, new_mask)
1146 per_cpu(vector_irq, new_cpu)[vector] = irq;
1147 cfg->vector = vector;
1148 cfg->domain = domain;
1154 static int assign_irq_vector(int irq, cpumask_t mask)
1157 unsigned long flags;
1159 spin_lock_irqsave(&vector_lock, flags);
1160 err = __assign_irq_vector(irq, mask);
1161 spin_unlock_irqrestore(&vector_lock, flags);
1165 static void __clear_irq_vector(int irq)
1167 struct irq_cfg *cfg;
1172 BUG_ON(!cfg->vector);
1174 vector = cfg->vector;
1175 cpus_and(mask, cfg->domain, cpu_online_map);
1176 for_each_cpu_mask_nr(cpu, mask)
1177 per_cpu(vector_irq, cpu)[vector] = -1;
1180 cpus_clear(cfg->domain);
1182 if (likely(!cfg->move_in_progress))
1184 cpus_and(mask, cfg->old_domain, cpu_online_map);
1185 for_each_cpu_mask_nr(cpu, mask) {
1186 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1188 if (per_cpu(vector_irq, cpu)[vector] != irq)
1190 per_cpu(vector_irq, cpu)[vector] = -1;
1194 cfg->move_in_progress = 0;
1197 void __setup_vector_irq(int cpu)
1199 /* Initialize vector_irq on a new cpu */
1200 /* This function must be called with vector_lock held */
1202 struct irq_cfg *cfg;
1203 struct irq_desc *desc;
1205 /* Mark the inuse vectors */
1206 for_each_irq_desc(irq, desc) {
1209 cfg = desc->chip_data;
1210 if (!cpu_isset(cpu, cfg->domain))
1212 vector = cfg->vector;
1213 per_cpu(vector_irq, cpu)[vector] = irq;
1215 /* Mark the free vectors */
1216 for (vector = 0; vector < NR_VECTORS; ++vector) {
1217 irq = per_cpu(vector_irq, cpu)[vector];
1222 if (!cpu_isset(cpu, cfg->domain))
1223 per_cpu(vector_irq, cpu)[vector] = -1;
1227 static struct irq_chip ioapic_chip;
1228 #ifdef CONFIG_INTR_REMAP
1229 static struct irq_chip ir_ioapic_chip;
1232 #define IOAPIC_AUTO -1
1233 #define IOAPIC_EDGE 0
1234 #define IOAPIC_LEVEL 1
1236 #ifdef CONFIG_X86_32
1237 static inline int IO_APIC_irq_trigger(int irq)
1241 for (apic = 0; apic < nr_ioapics; apic++) {
1242 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1243 idx = find_irq_entry(apic, pin, mp_INT);
1244 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1245 return irq_trigger(idx);
1249 * nonexistent IRQs are edge default
1254 static inline int IO_APIC_irq_trigger(int irq)
1260 static void ioapic_register_intr(int irq, unsigned long trigger)
1262 struct irq_desc *desc;
1264 desc = irq_to_desc(irq);
1266 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1267 trigger == IOAPIC_LEVEL)
1268 desc->status |= IRQ_LEVEL;
1270 desc->status &= ~IRQ_LEVEL;
1272 #ifdef CONFIG_INTR_REMAP
1273 if (irq_remapped(irq)) {
1274 desc->status |= IRQ_MOVE_PCNTXT;
1276 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1280 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1281 handle_edge_irq, "edge");
1285 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1286 trigger == IOAPIC_LEVEL)
1287 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1291 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1292 handle_edge_irq, "edge");
1295 static int setup_ioapic_entry(int apic, int irq,
1296 struct IO_APIC_route_entry *entry,
1297 unsigned int destination, int trigger,
1298 int polarity, int vector)
1301 * add it to the IO-APIC irq-routing table:
1303 memset(entry,0,sizeof(*entry));
1305 #ifdef CONFIG_INTR_REMAP
1306 if (intr_remapping_enabled) {
1307 struct intel_iommu *iommu = map_ioapic_to_ir(apic);
1309 struct IR_IO_APIC_route_entry *ir_entry =
1310 (struct IR_IO_APIC_route_entry *) entry;
1314 panic("No mapping iommu for ioapic %d\n", apic);
1316 index = alloc_irte(iommu, irq, 1);
1318 panic("Failed to allocate IRTE for ioapic %d\n", apic);
1320 memset(&irte, 0, sizeof(irte));
1323 irte.dst_mode = INT_DEST_MODE;
1324 irte.trigger_mode = trigger;
1325 irte.dlvry_mode = INT_DELIVERY_MODE;
1326 irte.vector = vector;
1327 irte.dest_id = IRTE_DEST(destination);
1329 modify_irte(irq, &irte);
1331 ir_entry->index2 = (index >> 15) & 0x1;
1333 ir_entry->format = 1;
1334 ir_entry->index = (index & 0x7fff);
1338 entry->delivery_mode = INT_DELIVERY_MODE;
1339 entry->dest_mode = INT_DEST_MODE;
1340 entry->dest = destination;
1343 entry->mask = 0; /* enable IRQ */
1344 entry->trigger = trigger;
1345 entry->polarity = polarity;
1346 entry->vector = vector;
1348 /* Mask level triggered irqs.
1349 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1356 static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1357 int trigger, int polarity)
1359 struct irq_cfg *cfg;
1360 struct IO_APIC_route_entry entry;
1363 if (!IO_APIC_IRQ(irq))
1369 if (assign_irq_vector(irq, mask))
1372 cpus_and(mask, cfg->domain, mask);
1374 apic_printk(APIC_VERBOSE,KERN_DEBUG
1375 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1376 "IRQ %d Mode:%i Active:%i)\n",
1377 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
1378 irq, trigger, polarity);
1381 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1382 cpu_mask_to_apicid(mask), trigger, polarity,
1384 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1385 mp_ioapics[apic].mp_apicid, pin);
1386 __clear_irq_vector(irq);
1390 ioapic_register_intr(irq, trigger);
1391 if (irq < NR_IRQS_LEGACY)
1392 disable_8259A_irq(irq);
1394 ioapic_write_entry(apic, pin, entry);
1397 static void __init setup_IO_APIC_irqs(void)
1399 int apic, pin, idx, irq;
1401 struct irq_desc *desc;
1402 int cpu = boot_cpu_id;
1404 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1406 for (apic = 0; apic < nr_ioapics; apic++) {
1407 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1409 idx = find_irq_entry(apic, pin, mp_INT);
1413 apic_printk(APIC_VERBOSE,
1414 KERN_DEBUG " %d-%d",
1415 mp_ioapics[apic].mp_apicid,
1418 apic_printk(APIC_VERBOSE, " %d-%d",
1419 mp_ioapics[apic].mp_apicid,
1424 apic_printk(APIC_VERBOSE,
1425 " (apicid-pin) not connected\n");
1429 irq = pin_2_irq(idx, apic, pin);
1430 #ifdef CONFIG_X86_32
1431 if (multi_timer_check(apic, irq))
1434 desc = irq_to_desc_alloc_cpu(irq, cpu);
1436 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1439 add_pin_to_irq_cpu(irq, cpu, apic, pin);
1441 setup_IO_APIC_irq(apic, pin, irq,
1442 irq_trigger(idx), irq_polarity(idx));
1447 apic_printk(APIC_VERBOSE,
1448 " (apicid-pin) not connected\n");
1452 * Set up the timer pin, possibly with the 8259A-master behind.
1454 static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1457 struct IO_APIC_route_entry entry;
1459 #ifdef CONFIG_INTR_REMAP
1460 if (intr_remapping_enabled)
1464 memset(&entry, 0, sizeof(entry));
1467 * We use logical delivery to get the timer IRQ
1470 entry.dest_mode = INT_DEST_MODE;
1471 entry.mask = 1; /* mask IRQ now */
1472 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
1473 entry.delivery_mode = INT_DELIVERY_MODE;
1476 entry.vector = vector;
1479 * The timer IRQ doesn't have to know that behind the
1480 * scene we may have a 8259A-master in AEOI mode ...
1482 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1485 * Add it to the IO-APIC irq-routing table:
1487 ioapic_write_entry(apic, pin, entry);
1491 __apicdebuginit(void) print_IO_APIC(void)
1494 union IO_APIC_reg_00 reg_00;
1495 union IO_APIC_reg_01 reg_01;
1496 union IO_APIC_reg_02 reg_02;
1497 union IO_APIC_reg_03 reg_03;
1498 unsigned long flags;
1499 struct irq_cfg *cfg;
1500 struct irq_desc *desc;
1503 if (apic_verbosity == APIC_QUIET)
1506 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1507 for (i = 0; i < nr_ioapics; i++)
1508 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1509 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1512 * We are a bit conservative about what we expect. We have to
1513 * know about every hardware change ASAP.
1515 printk(KERN_INFO "testing the IO APIC.......................\n");
1517 for (apic = 0; apic < nr_ioapics; apic++) {
1519 spin_lock_irqsave(&ioapic_lock, flags);
1520 reg_00.raw = io_apic_read(apic, 0);
1521 reg_01.raw = io_apic_read(apic, 1);
1522 if (reg_01.bits.version >= 0x10)
1523 reg_02.raw = io_apic_read(apic, 2);
1524 if (reg_01.bits.version >= 0x20)
1525 reg_03.raw = io_apic_read(apic, 3);
1526 spin_unlock_irqrestore(&ioapic_lock, flags);
1529 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1530 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1531 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1532 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1533 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1535 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1536 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1538 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1539 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1542 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1543 * but the value of reg_02 is read as the previous read register
1544 * value, so ignore it if reg_02 == reg_01.
1546 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1547 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1548 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1552 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1553 * or reg_03, but the value of reg_0[23] is read as the previous read
1554 * register value, so ignore it if reg_03 == reg_0[12].
1556 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1557 reg_03.raw != reg_01.raw) {
1558 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1559 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1562 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1564 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1565 " Stat Dmod Deli Vect: \n");
1567 for (i = 0; i <= reg_01.bits.entries; i++) {
1568 struct IO_APIC_route_entry entry;
1570 entry = ioapic_read_entry(apic, i);
1572 printk(KERN_DEBUG " %02x %03X ",
1577 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1582 entry.delivery_status,
1584 entry.delivery_mode,
1589 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1590 for_each_irq_desc(irq, desc) {
1591 struct irq_pin_list *entry;
1595 cfg = desc->chip_data;
1596 entry = cfg->irq_2_pin;
1599 printk(KERN_DEBUG "IRQ%d ", irq);
1601 printk("-> %d:%d", entry->apic, entry->pin);
1604 entry = entry->next;
1609 printk(KERN_INFO ".................................... done.\n");
1614 __apicdebuginit(void) print_APIC_bitfield(int base)
1619 if (apic_verbosity == APIC_QUIET)
1622 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1623 for (i = 0; i < 8; i++) {
1624 v = apic_read(base + i*0x10);
1625 for (j = 0; j < 32; j++) {
1635 __apicdebuginit(void) print_local_APIC(void *dummy)
1637 unsigned int v, ver, maxlvt;
1640 if (apic_verbosity == APIC_QUIET)
1643 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1644 smp_processor_id(), hard_smp_processor_id());
1645 v = apic_read(APIC_ID);
1646 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1647 v = apic_read(APIC_LVR);
1648 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1649 ver = GET_APIC_VERSION(v);
1650 maxlvt = lapic_get_maxlvt();
1652 v = apic_read(APIC_TASKPRI);
1653 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1655 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1656 if (!APIC_XAPIC(ver)) {
1657 v = apic_read(APIC_ARBPRI);
1658 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1659 v & APIC_ARBPRI_MASK);
1661 v = apic_read(APIC_PROCPRI);
1662 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1666 * Remote read supported only in the 82489DX and local APIC for
1667 * Pentium processors.
1669 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1670 v = apic_read(APIC_RRR);
1671 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1674 v = apic_read(APIC_LDR);
1675 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1676 if (!x2apic_enabled()) {
1677 v = apic_read(APIC_DFR);
1678 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1680 v = apic_read(APIC_SPIV);
1681 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1683 printk(KERN_DEBUG "... APIC ISR field:\n");
1684 print_APIC_bitfield(APIC_ISR);
1685 printk(KERN_DEBUG "... APIC TMR field:\n");
1686 print_APIC_bitfield(APIC_TMR);
1687 printk(KERN_DEBUG "... APIC IRR field:\n");
1688 print_APIC_bitfield(APIC_IRR);
1690 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1691 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1692 apic_write(APIC_ESR, 0);
1694 v = apic_read(APIC_ESR);
1695 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1698 icr = apic_icr_read();
1699 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1700 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1702 v = apic_read(APIC_LVTT);
1703 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1705 if (maxlvt > 3) { /* PC is LVT#4. */
1706 v = apic_read(APIC_LVTPC);
1707 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1709 v = apic_read(APIC_LVT0);
1710 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1711 v = apic_read(APIC_LVT1);
1712 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1714 if (maxlvt > 2) { /* ERR is LVT#3. */
1715 v = apic_read(APIC_LVTERR);
1716 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1719 v = apic_read(APIC_TMICT);
1720 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1721 v = apic_read(APIC_TMCCT);
1722 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1723 v = apic_read(APIC_TDCR);
1724 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1728 __apicdebuginit(void) print_all_local_APICs(void)
1733 for_each_online_cpu(cpu)
1734 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1738 __apicdebuginit(void) print_PIC(void)
1741 unsigned long flags;
1743 if (apic_verbosity == APIC_QUIET)
1746 printk(KERN_DEBUG "\nprinting PIC contents\n");
1748 spin_lock_irqsave(&i8259A_lock, flags);
1750 v = inb(0xa1) << 8 | inb(0x21);
1751 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1753 v = inb(0xa0) << 8 | inb(0x20);
1754 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1758 v = inb(0xa0) << 8 | inb(0x20);
1762 spin_unlock_irqrestore(&i8259A_lock, flags);
1764 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1766 v = inb(0x4d1) << 8 | inb(0x4d0);
1767 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1770 __apicdebuginit(int) print_all_ICs(void)
1773 print_all_local_APICs();
1779 fs_initcall(print_all_ICs);
1782 /* Where if anywhere is the i8259 connect in external int mode */
1783 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1785 void __init enable_IO_APIC(void)
1787 union IO_APIC_reg_01 reg_01;
1788 int i8259_apic, i8259_pin;
1790 unsigned long flags;
1792 #ifdef CONFIG_X86_32
1795 for (i = 0; i < MAX_PIRQS; i++)
1796 pirq_entries[i] = -1;
1800 * The number of IO-APIC IRQ registers (== #pins):
1802 for (apic = 0; apic < nr_ioapics; apic++) {
1803 spin_lock_irqsave(&ioapic_lock, flags);
1804 reg_01.raw = io_apic_read(apic, 1);
1805 spin_unlock_irqrestore(&ioapic_lock, flags);
1806 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1808 for(apic = 0; apic < nr_ioapics; apic++) {
1810 /* See if any of the pins is in ExtINT mode */
1811 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1812 struct IO_APIC_route_entry entry;
1813 entry = ioapic_read_entry(apic, pin);
1815 /* If the interrupt line is enabled and in ExtInt mode
1816 * I have found the pin where the i8259 is connected.
1818 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1819 ioapic_i8259.apic = apic;
1820 ioapic_i8259.pin = pin;
1826 /* Look to see what if the MP table has reported the ExtINT */
1827 /* If we could not find the appropriate pin by looking at the ioapic
1828 * the i8259 probably is not connected the ioapic but give the
1829 * mptable a chance anyway.
1831 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1832 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1833 /* Trust the MP table if nothing is setup in the hardware */
1834 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1835 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1836 ioapic_i8259.pin = i8259_pin;
1837 ioapic_i8259.apic = i8259_apic;
1839 /* Complain if the MP table and the hardware disagree */
1840 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1841 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1843 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1847 * Do not trust the IO-APIC being empty at bootup
1853 * Not an __init, needed by the reboot code
1855 void disable_IO_APIC(void)
1858 * Clear the IO-APIC before rebooting:
1863 * If the i8259 is routed through an IOAPIC
1864 * Put that IOAPIC in virtual wire mode
1865 * so legacy interrupts can be delivered.
1867 if (ioapic_i8259.pin != -1) {
1868 struct IO_APIC_route_entry entry;
1870 memset(&entry, 0, sizeof(entry));
1871 entry.mask = 0; /* Enabled */
1872 entry.trigger = 0; /* Edge */
1874 entry.polarity = 0; /* High */
1875 entry.delivery_status = 0;
1876 entry.dest_mode = 0; /* Physical */
1877 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1879 entry.dest = read_apic_id();
1882 * Add it to the IO-APIC irq-routing table:
1884 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1887 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1890 #ifdef CONFIG_X86_32
1892 * function to set the IO-APIC physical IDs based on the
1893 * values stored in the MPC table.
1895 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1898 static void __init setup_ioapic_ids_from_mpc(void)
1900 union IO_APIC_reg_00 reg_00;
1901 physid_mask_t phys_id_present_map;
1904 unsigned char old_id;
1905 unsigned long flags;
1907 if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
1911 * Don't check I/O APIC IDs for xAPIC systems. They have
1912 * no meaning without the serial APIC bus.
1914 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1915 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
1918 * This is broken; anything with a real cpu count has to
1919 * circumvent this idiocy regardless.
1921 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
1924 * Set the IOAPIC ID to the value stored in the MPC table.
1926 for (apic = 0; apic < nr_ioapics; apic++) {
1928 /* Read the register 0 value */
1929 spin_lock_irqsave(&ioapic_lock, flags);
1930 reg_00.raw = io_apic_read(apic, 0);
1931 spin_unlock_irqrestore(&ioapic_lock, flags);
1933 old_id = mp_ioapics[apic].mp_apicid;
1935 if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
1936 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1937 apic, mp_ioapics[apic].mp_apicid);
1938 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1940 mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
1944 * Sanity check, is the ID really free? Every APIC in a
1945 * system must have a unique ID or we get lots of nice
1946 * 'stuck on smp_invalidate_needed IPI wait' messages.
1948 if (check_apicid_used(phys_id_present_map,
1949 mp_ioapics[apic].mp_apicid)) {
1950 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1951 apic, mp_ioapics[apic].mp_apicid);
1952 for (i = 0; i < get_physical_broadcast(); i++)
1953 if (!physid_isset(i, phys_id_present_map))
1955 if (i >= get_physical_broadcast())
1956 panic("Max APIC ID exceeded!\n");
1957 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1959 physid_set(i, phys_id_present_map);
1960 mp_ioapics[apic].mp_apicid = i;
1963 tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
1964 apic_printk(APIC_VERBOSE, "Setting %d in the "
1965 "phys_id_present_map\n",
1966 mp_ioapics[apic].mp_apicid);
1967 physids_or(phys_id_present_map, phys_id_present_map, tmp);
1972 * We need to adjust the IRQ routing table
1973 * if the ID changed.
1975 if (old_id != mp_ioapics[apic].mp_apicid)
1976 for (i = 0; i < mp_irq_entries; i++)
1977 if (mp_irqs[i].mp_dstapic == old_id)
1978 mp_irqs[i].mp_dstapic
1979 = mp_ioapics[apic].mp_apicid;
1982 * Read the right value from the MPC table and
1983 * write it into the ID register.
1985 apic_printk(APIC_VERBOSE, KERN_INFO
1986 "...changing IO-APIC physical APIC ID to %d ...",
1987 mp_ioapics[apic].mp_apicid);
1989 reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
1990 spin_lock_irqsave(&ioapic_lock, flags);
1991 io_apic_write(apic, 0, reg_00.raw);
1992 spin_unlock_irqrestore(&ioapic_lock, flags);
1997 spin_lock_irqsave(&ioapic_lock, flags);
1998 reg_00.raw = io_apic_read(apic, 0);
1999 spin_unlock_irqrestore(&ioapic_lock, flags);
2000 if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
2001 printk("could not set ID!\n");
2003 apic_printk(APIC_VERBOSE, " ok.\n");
2008 int no_timer_check __initdata;
2010 static int __init notimercheck(char *s)
2015 __setup("no_timer_check", notimercheck);
2018 * There is a nasty bug in some older SMP boards, their mptable lies
2019 * about the timer IRQ. We do the following to work around the situation:
2021 * - timer IRQ defaults to IO-APIC IRQ
2022 * - if this function detects that timer IRQs are defunct, then we fall
2023 * back to ISA timer IRQs
2025 static int __init timer_irq_works(void)
2027 unsigned long t1 = jiffies;
2028 unsigned long flags;
2033 local_save_flags(flags);
2035 /* Let ten ticks pass... */
2036 mdelay((10 * 1000) / HZ);
2037 local_irq_restore(flags);
2040 * Expect a few ticks at least, to be sure some possible
2041 * glue logic does not lock up after one or two first
2042 * ticks in a non-ExtINT mode. Also the local APIC
2043 * might have cached one ExtINT interrupt. Finally, at
2044 * least one tick may be lost due to delays.
2048 if (time_after(jiffies, t1 + 4))
2054 * In the SMP+IOAPIC case it might happen that there are an unspecified
2055 * number of pending IRQ events unhandled. These cases are very rare,
2056 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2057 * better to do it this way as thus we do not have to be aware of
2058 * 'pending' interrupts in the IRQ path, except at this point.
2061 * Edge triggered needs to resend any interrupt
2062 * that was delayed but this is now handled in the device
2067 * Starting up a edge-triggered IO-APIC interrupt is
2068 * nasty - we need to make sure that we get the edge.
2069 * If it is already asserted for some reason, we need
2070 * return 1 to indicate that is was pending.
2072 * This is not complete - we should be able to fake
2073 * an edge even if it isn't on the 8259A...
2076 static unsigned int startup_ioapic_irq(unsigned int irq)
2078 int was_pending = 0;
2079 unsigned long flags;
2080 struct irq_cfg *cfg;
2082 spin_lock_irqsave(&ioapic_lock, flags);
2083 if (irq < NR_IRQS_LEGACY) {
2084 disable_8259A_irq(irq);
2085 if (i8259A_irq_pending(irq))
2089 __unmask_IO_APIC_irq(irq);
2090 spin_unlock_irqrestore(&ioapic_lock, flags);
2095 #ifdef CONFIG_X86_64
2096 static int ioapic_retrigger_irq(unsigned int irq)
2099 struct irq_cfg *cfg = irq_cfg(irq);
2100 unsigned long flags;
2102 spin_lock_irqsave(&vector_lock, flags);
2103 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
2104 spin_unlock_irqrestore(&vector_lock, flags);
2109 static int ioapic_retrigger_irq(unsigned int irq)
2111 send_IPI_self(irq_cfg(irq)->vector);
2118 * Level and edge triggered IO-APIC interrupts need different handling,
2119 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2120 * handled with the level-triggered descriptor, but that one has slightly
2121 * more overhead. Level-triggered interrupts cannot be handled with the
2122 * edge-triggered handler, without risking IRQ storms and other ugly
2128 #ifdef CONFIG_INTR_REMAP
2129 static void ir_irq_migration(struct work_struct *work);
2131 static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2134 * Migrate the IO-APIC irq in the presence of intr-remapping.
2136 * For edge triggered, irq migration is a simple atomic update(of vector
2137 * and cpu destination) of IRTE and flush the hardware cache.
2139 * For level triggered, we need to modify the io-apic RTE aswell with the update
2140 * vector information, along with modifying IRTE with vector and destination.
2141 * So irq migration for level triggered is little bit more complex compared to
2142 * edge triggered migration. But the good news is, we use the same algorithm
2143 * for level triggered migration as we have today, only difference being,
2144 * we now initiate the irq migration from process context instead of the
2145 * interrupt context.
2147 * In future, when we do a directed EOI (combined with cpu EOI broadcast
2148 * suppression) to the IO-APIC, level triggered irq migration will also be
2149 * as simple as edge triggered migration and we can do the irq migration
2150 * with a simple atomic update to IO-APIC RTE.
2152 static void migrate_ioapic_irq(int irq, cpumask_t mask)
2154 struct irq_cfg *cfg;
2155 struct irq_desc *desc;
2156 cpumask_t tmp, cleanup_mask;
2158 int modify_ioapic_rte;
2160 unsigned long flags;
2162 cpus_and(tmp, mask, cpu_online_map);
2163 if (cpus_empty(tmp))
2166 if (get_irte(irq, &irte))
2169 if (assign_irq_vector(irq, mask))
2173 cpus_and(tmp, cfg->domain, mask);
2174 dest = cpu_mask_to_apicid(tmp);
2176 desc = irq_to_desc(irq);
2177 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2178 if (modify_ioapic_rte) {
2179 spin_lock_irqsave(&ioapic_lock, flags);
2180 __target_IO_APIC_irq(irq, dest, cfg->vector);
2181 spin_unlock_irqrestore(&ioapic_lock, flags);
2184 irte.vector = cfg->vector;
2185 irte.dest_id = IRTE_DEST(dest);
2188 * Modified the IRTE and flushes the Interrupt entry cache.
2190 modify_irte(irq, &irte);
2192 if (cfg->move_in_progress) {
2193 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2194 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2195 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2196 cfg->move_in_progress = 0;
2199 desc->affinity = mask;
2202 static int migrate_irq_remapped_level(int irq)
2205 struct irq_desc *desc = irq_to_desc(irq);
2207 mask_IO_APIC_irq(irq);
2209 if (io_apic_level_ack_pending(irq)) {
2211 * Interrupt in progress. Migrating irq now will change the
2212 * vector information in the IO-APIC RTE and that will confuse
2213 * the EOI broadcast performed by cpu.
2214 * So, delay the irq migration to the next instance.
2216 schedule_delayed_work(&ir_migration_work, 1);
2220 /* everthing is clear. we have right of way */
2221 migrate_ioapic_irq(irq, desc->pending_mask);
2224 desc->status &= ~IRQ_MOVE_PENDING;
2225 cpus_clear(desc->pending_mask);
2228 unmask_IO_APIC_irq(irq);
2232 static void ir_irq_migration(struct work_struct *work)
2235 struct irq_desc *desc;
2237 for_each_irq_desc(irq, desc) {
2241 if (desc->status & IRQ_MOVE_PENDING) {
2242 unsigned long flags;
2244 spin_lock_irqsave(&desc->lock, flags);
2245 if (!desc->chip->set_affinity ||
2246 !(desc->status & IRQ_MOVE_PENDING)) {
2247 desc->status &= ~IRQ_MOVE_PENDING;
2248 spin_unlock_irqrestore(&desc->lock, flags);
2252 desc->chip->set_affinity(irq, desc->pending_mask);
2253 spin_unlock_irqrestore(&desc->lock, flags);
2259 * Migrates the IRQ destination in the process context.
2261 static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
2263 struct irq_desc *desc = irq_to_desc(irq);
2265 if (desc->status & IRQ_LEVEL) {
2266 desc->status |= IRQ_MOVE_PENDING;
2267 desc->pending_mask = mask;
2268 migrate_irq_remapped_level(irq);
2272 migrate_ioapic_irq(irq, mask);
2276 asmlinkage void smp_irq_move_cleanup_interrupt(void)
2278 unsigned vector, me;
2280 #ifdef CONFIG_X86_64
2285 me = smp_processor_id();
2286 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2288 struct irq_desc *desc;
2289 struct irq_cfg *cfg;
2290 irq = __get_cpu_var(vector_irq)[vector];
2295 desc = irq_to_desc(irq);
2300 spin_lock(&desc->lock);
2301 if (!cfg->move_cleanup_count)
2304 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
2307 __get_cpu_var(vector_irq)[vector] = -1;
2308 cfg->move_cleanup_count--;
2310 spin_unlock(&desc->lock);
2316 static void irq_complete_move(unsigned int irq)
2318 struct irq_cfg *cfg = irq_cfg(irq);
2319 unsigned vector, me;
2321 if (likely(!cfg->move_in_progress))
2324 vector = ~get_irq_regs()->orig_ax;
2325 me = smp_processor_id();
2326 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
2327 cpumask_t cleanup_mask;
2329 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2330 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2331 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2332 cfg->move_in_progress = 0;
2336 static inline void irq_complete_move(unsigned int irq) {}
2338 #ifdef CONFIG_INTR_REMAP
2339 static void ack_x2apic_level(unsigned int irq)
2344 static void ack_x2apic_edge(unsigned int irq)
2350 static void ack_apic_edge(unsigned int irq)
2352 irq_complete_move(irq);
2353 move_native_irq(irq);
2357 atomic_t irq_mis_count;
2359 static void ack_apic_level(unsigned int irq)
2361 #ifdef CONFIG_X86_32
2365 int do_unmask_irq = 0;
2367 irq_complete_move(irq);
2368 #ifdef CONFIG_GENERIC_PENDING_IRQ
2369 /* If we are moving the irq we need to mask it */
2370 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
2372 mask_IO_APIC_irq(irq);
2376 #ifdef CONFIG_X86_32
2378 * It appears there is an erratum which affects at least version 0x11
2379 * of I/O APIC (that's the 82093AA and cores integrated into various
2380 * chipsets). Under certain conditions a level-triggered interrupt is
2381 * erroneously delivered as edge-triggered one but the respective IRR
2382 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2383 * message but it will never arrive and further interrupts are blocked
2384 * from the source. The exact reason is so far unknown, but the
2385 * phenomenon was observed when two consecutive interrupt requests
2386 * from a given source get delivered to the same CPU and the source is
2387 * temporarily disabled in between.
2389 * A workaround is to simulate an EOI message manually. We achieve it
2390 * by setting the trigger mode to edge and then to level when the edge
2391 * trigger mode gets detected in the TMR of a local APIC for a
2392 * level-triggered interrupt. We mask the source for the time of the
2393 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2394 * The idea is from Manfred Spraul. --macro
2396 i = irq_cfg(irq)->vector;
2398 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2402 * We must acknowledge the irq before we move it or the acknowledge will
2403 * not propagate properly.
2407 /* Now we can move and renable the irq */
2408 if (unlikely(do_unmask_irq)) {
2409 /* Only migrate the irq if the ack has been received.
2411 * On rare occasions the broadcast level triggered ack gets
2412 * delayed going to ioapics, and if we reprogram the
2413 * vector while Remote IRR is still set the irq will never
2416 * To prevent this scenario we read the Remote IRR bit
2417 * of the ioapic. This has two effects.
2418 * - On any sane system the read of the ioapic will
2419 * flush writes (and acks) going to the ioapic from
2421 * - We get to see if the ACK has actually been delivered.
2423 * Based on failed experiments of reprogramming the
2424 * ioapic entry from outside of irq context starting
2425 * with masking the ioapic entry and then polling until
2426 * Remote IRR was clear before reprogramming the
2427 * ioapic I don't trust the Remote IRR bit to be
2428 * completey accurate.
2430 * However there appears to be no other way to plug
2431 * this race, so if the Remote IRR bit is not
2432 * accurate and is causing problems then it is a hardware bug
2433 * and you can go talk to the chipset vendor about it.
2435 if (!io_apic_level_ack_pending(irq))
2436 move_masked_irq(irq);
2437 unmask_IO_APIC_irq(irq);
2440 #ifdef CONFIG_X86_32
2441 if (!(v & (1 << (i & 0x1f)))) {
2442 atomic_inc(&irq_mis_count);
2443 spin_lock(&ioapic_lock);
2444 __mask_and_edge_IO_APIC_irq(irq);
2445 __unmask_and_level_IO_APIC_irq(irq);
2446 spin_unlock(&ioapic_lock);
2451 static struct irq_chip ioapic_chip __read_mostly = {
2453 .startup = startup_ioapic_irq,
2454 .mask = mask_IO_APIC_irq,
2455 .unmask = unmask_IO_APIC_irq,
2456 .ack = ack_apic_edge,
2457 .eoi = ack_apic_level,
2459 .set_affinity = set_ioapic_affinity_irq,
2461 .retrigger = ioapic_retrigger_irq,
2464 #ifdef CONFIG_INTR_REMAP
2465 static struct irq_chip ir_ioapic_chip __read_mostly = {
2466 .name = "IR-IO-APIC",
2467 .startup = startup_ioapic_irq,
2468 .mask = mask_IO_APIC_irq,
2469 .unmask = unmask_IO_APIC_irq,
2470 .ack = ack_x2apic_edge,
2471 .eoi = ack_x2apic_level,
2473 .set_affinity = set_ir_ioapic_affinity_irq,
2475 .retrigger = ioapic_retrigger_irq,
2479 static inline void init_IO_APIC_traps(void)
2482 struct irq_desc *desc;
2483 struct irq_cfg *cfg;
2486 * NOTE! The local APIC isn't very good at handling
2487 * multiple interrupts at the same interrupt level.
2488 * As the interrupt level is determined by taking the
2489 * vector number and shifting that right by 4, we
2490 * want to spread these out a bit so that they don't
2491 * all fall in the same interrupt level.
2493 * Also, we've got to be careful not to trash gate
2494 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2496 for_each_irq_desc(irq, desc) {
2500 cfg = desc->chip_data;
2501 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2503 * Hmm.. We don't have an entry for this,
2504 * so default to an old-fashioned 8259
2505 * interrupt if we can..
2507 if (irq < NR_IRQS_LEGACY)
2508 make_8259A_irq(irq);
2510 /* Strange. Oh, well.. */
2511 desc->chip = &no_irq_chip;
2517 * The local APIC irq-chip implementation:
2520 static void mask_lapic_irq(unsigned int irq)
2524 v = apic_read(APIC_LVT0);
2525 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2528 static void unmask_lapic_irq(unsigned int irq)
2532 v = apic_read(APIC_LVT0);
2533 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2536 static void ack_lapic_irq (unsigned int irq)
2541 static struct irq_chip lapic_chip __read_mostly = {
2542 .name = "local-APIC",
2543 .mask = mask_lapic_irq,
2544 .unmask = unmask_lapic_irq,
2545 .ack = ack_lapic_irq,
2548 static void lapic_register_intr(int irq)
2550 struct irq_desc *desc;
2552 desc = irq_to_desc(irq);
2553 desc->status &= ~IRQ_LEVEL;
2554 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2558 static void __init setup_nmi(void)
2561 * Dirty trick to enable the NMI watchdog ...
2562 * We put the 8259A master into AEOI mode and
2563 * unmask on all local APICs LVT0 as NMI.
2565 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2566 * is from Maciej W. Rozycki - so we do not have to EOI from
2567 * the NMI handler or the timer interrupt.
2569 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2571 enable_NMI_through_LVT0();
2573 apic_printk(APIC_VERBOSE, " done.\n");
2577 * This looks a bit hackish but it's about the only one way of sending
2578 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2579 * not support the ExtINT mode, unfortunately. We need to send these
2580 * cycles as some i82489DX-based boards have glue logic that keeps the
2581 * 8259A interrupt line asserted until INTA. --macro
2583 static inline void __init unlock_ExtINT_logic(void)
2586 struct IO_APIC_route_entry entry0, entry1;
2587 unsigned char save_control, save_freq_select;
2589 pin = find_isa_irq_pin(8, mp_INT);
2594 apic = find_isa_irq_apic(8, mp_INT);
2600 entry0 = ioapic_read_entry(apic, pin);
2601 clear_IO_APIC_pin(apic, pin);
2603 memset(&entry1, 0, sizeof(entry1));
2605 entry1.dest_mode = 0; /* physical delivery */
2606 entry1.mask = 0; /* unmask IRQ now */
2607 entry1.dest = hard_smp_processor_id();
2608 entry1.delivery_mode = dest_ExtINT;
2609 entry1.polarity = entry0.polarity;
2613 ioapic_write_entry(apic, pin, entry1);
2615 save_control = CMOS_READ(RTC_CONTROL);
2616 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2617 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2619 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2624 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2628 CMOS_WRITE(save_control, RTC_CONTROL);
2629 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2630 clear_IO_APIC_pin(apic, pin);
2632 ioapic_write_entry(apic, pin, entry0);
2635 static int disable_timer_pin_1 __initdata;
2636 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2637 static int __init disable_timer_pin_setup(char *arg)
2639 disable_timer_pin_1 = 1;
2642 early_param("disable_timer_pin_1", disable_timer_pin_setup);
2644 int timer_through_8259 __initdata;
2647 * This code may look a bit paranoid, but it's supposed to cooperate with
2648 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2649 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2650 * fanatically on his truly buggy board.
2652 * FIXME: really need to revamp this for all platforms.
2654 static inline void __init check_timer(void)
2656 struct irq_cfg *cfg = irq_cfg(0);
2657 int apic1, pin1, apic2, pin2;
2658 unsigned long flags;
2662 local_irq_save(flags);
2664 ver = apic_read(APIC_LVR);
2665 ver = GET_APIC_VERSION(ver);
2668 * get/set the timer IRQ vector:
2670 disable_8259A_irq(0);
2671 assign_irq_vector(0, TARGET_CPUS);
2674 * As IRQ0 is to be enabled in the 8259A, the virtual
2675 * wire has to be disabled in the local APIC. Also
2676 * timer interrupts need to be acknowledged manually in
2677 * the 8259A for the i82489DX when using the NMI
2678 * watchdog as that APIC treats NMIs as level-triggered.
2679 * The AEOI mode will finish them in the 8259A
2682 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2684 #ifdef CONFIG_X86_32
2685 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2688 pin1 = find_isa_irq_pin(0, mp_INT);
2689 apic1 = find_isa_irq_apic(0, mp_INT);
2690 pin2 = ioapic_i8259.pin;
2691 apic2 = ioapic_i8259.apic;
2693 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2694 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2695 cfg->vector, apic1, pin1, apic2, pin2);
2698 * Some BIOS writers are clueless and report the ExtINTA
2699 * I/O APIC input from the cascaded 8259A as the timer
2700 * interrupt input. So just in case, if only one pin
2701 * was found above, try it both directly and through the
2705 #ifdef CONFIG_INTR_REMAP
2706 if (intr_remapping_enabled)
2707 panic("BIOS bug: timer not connected to IO-APIC");
2712 } else if (pin2 == -1) {
2719 * Ok, does IRQ0 through the IOAPIC work?
2722 add_pin_to_irq_cpu(0, boot_cpu_id, apic1, pin1);
2723 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2725 unmask_IO_APIC_irq(0);
2726 if (timer_irq_works()) {
2727 if (nmi_watchdog == NMI_IO_APIC) {
2729 enable_8259A_irq(0);
2731 if (disable_timer_pin_1 > 0)
2732 clear_IO_APIC_pin(0, pin1);
2735 #ifdef CONFIG_INTR_REMAP
2736 if (intr_remapping_enabled)
2737 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2739 clear_IO_APIC_pin(apic1, pin1);
2741 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2742 "8254 timer not connected to IO-APIC\n");
2744 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2745 "(IRQ0) through the 8259A ...\n");
2746 apic_printk(APIC_QUIET, KERN_INFO
2747 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2749 * legacy devices should be connected to IO APIC #0
2751 replace_pin_at_irq(0, boot_cpu_id, apic1, pin1, apic2, pin2);
2752 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2753 unmask_IO_APIC_irq(0);
2754 enable_8259A_irq(0);
2755 if (timer_irq_works()) {
2756 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2757 timer_through_8259 = 1;
2758 if (nmi_watchdog == NMI_IO_APIC) {
2759 disable_8259A_irq(0);
2761 enable_8259A_irq(0);
2766 * Cleanup, just in case ...
2768 disable_8259A_irq(0);
2769 clear_IO_APIC_pin(apic2, pin2);
2770 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2773 if (nmi_watchdog == NMI_IO_APIC) {
2774 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2775 "through the IO-APIC - disabling NMI Watchdog!\n");
2776 nmi_watchdog = NMI_NONE;
2778 #ifdef CONFIG_X86_32
2782 apic_printk(APIC_QUIET, KERN_INFO
2783 "...trying to set up timer as Virtual Wire IRQ...\n");
2785 lapic_register_intr(0);
2786 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2787 enable_8259A_irq(0);
2789 if (timer_irq_works()) {
2790 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2793 disable_8259A_irq(0);
2794 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2795 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2797 apic_printk(APIC_QUIET, KERN_INFO
2798 "...trying to set up timer as ExtINT IRQ...\n");
2802 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2804 unlock_ExtINT_logic();
2806 if (timer_irq_works()) {
2807 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2810 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2811 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2812 "report. Then try booting with the 'noapic' option.\n");
2814 local_irq_restore(flags);
2818 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2819 * to devices. However there may be an I/O APIC pin available for
2820 * this interrupt regardless. The pin may be left unconnected, but
2821 * typically it will be reused as an ExtINT cascade interrupt for
2822 * the master 8259A. In the MPS case such a pin will normally be
2823 * reported as an ExtINT interrupt in the MP table. With ACPI
2824 * there is no provision for ExtINT interrupts, and in the absence
2825 * of an override it would be treated as an ordinary ISA I/O APIC
2826 * interrupt, that is edge-triggered and unmasked by default. We
2827 * used to do this, but it caused problems on some systems because
2828 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2829 * the same ExtINT cascade interrupt to drive the local APIC of the
2830 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2831 * the I/O APIC in all cases now. No actual device should request
2832 * it anyway. --macro
2834 #define PIC_IRQS (1 << PIC_CASCADE_IR)
2836 void __init setup_IO_APIC(void)
2839 #ifdef CONFIG_X86_32
2843 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2847 io_apic_irqs = ~PIC_IRQS;
2849 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2851 * Set up IO-APIC IRQ routing.
2853 #ifdef CONFIG_X86_32
2855 setup_ioapic_ids_from_mpc();
2858 setup_IO_APIC_irqs();
2859 init_IO_APIC_traps();
2864 * Called after all the initialization is done. If we didnt find any
2865 * APIC bugs then we can allow the modify fast path
2868 static int __init io_apic_bug_finalize(void)
2870 if (sis_apic_bug == -1)
2875 late_initcall(io_apic_bug_finalize);
2877 struct sysfs_ioapic_data {
2878 struct sys_device dev;
2879 struct IO_APIC_route_entry entry[0];
2881 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
2883 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
2885 struct IO_APIC_route_entry *entry;
2886 struct sysfs_ioapic_data *data;
2889 data = container_of(dev, struct sysfs_ioapic_data, dev);
2890 entry = data->entry;
2891 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2892 *entry = ioapic_read_entry(dev->id, i);
2897 static int ioapic_resume(struct sys_device *dev)
2899 struct IO_APIC_route_entry *entry;
2900 struct sysfs_ioapic_data *data;
2901 unsigned long flags;
2902 union IO_APIC_reg_00 reg_00;
2905 data = container_of(dev, struct sysfs_ioapic_data, dev);
2906 entry = data->entry;
2908 spin_lock_irqsave(&ioapic_lock, flags);
2909 reg_00.raw = io_apic_read(dev->id, 0);
2910 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
2911 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
2912 io_apic_write(dev->id, 0, reg_00.raw);
2914 spin_unlock_irqrestore(&ioapic_lock, flags);
2915 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2916 ioapic_write_entry(dev->id, i, entry[i]);
2921 static struct sysdev_class ioapic_sysdev_class = {
2923 .suspend = ioapic_suspend,
2924 .resume = ioapic_resume,
2927 static int __init ioapic_init_sysfs(void)
2929 struct sys_device * dev;
2932 error = sysdev_class_register(&ioapic_sysdev_class);
2936 for (i = 0; i < nr_ioapics; i++ ) {
2937 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
2938 * sizeof(struct IO_APIC_route_entry);
2939 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
2940 if (!mp_ioapic_data[i]) {
2941 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2944 dev = &mp_ioapic_data[i]->dev;
2946 dev->cls = &ioapic_sysdev_class;
2947 error = sysdev_register(dev);
2949 kfree(mp_ioapic_data[i]);
2950 mp_ioapic_data[i] = NULL;
2951 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2959 device_initcall(ioapic_init_sysfs);
2962 * Dynamic irq allocate and deallocation
2964 unsigned int create_irq_nr(unsigned int irq_want)
2966 /* Allocate an unused irq */
2969 unsigned long flags;
2970 struct irq_cfg *cfg_new = NULL;
2971 int cpu = boot_cpu_id;
2972 struct irq_desc *desc_new = NULL;
2975 spin_lock_irqsave(&vector_lock, flags);
2976 for (new = irq_want; new < NR_IRQS; new++) {
2977 if (platform_legacy_irq(new))
2980 desc_new = irq_to_desc_alloc_cpu(new, cpu);
2982 printk(KERN_INFO "can not get irq_desc for %d\n", new);
2985 cfg_new = desc_new->chip_data;
2987 if (cfg_new->vector != 0)
2989 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
2993 spin_unlock_irqrestore(&vector_lock, flags);
2996 dynamic_irq_init(irq);
2997 /* restore it, in case dynamic_irq_init clear it */
2999 desc_new->chip_data = cfg_new;
3004 static int nr_irqs_gsi = NR_IRQS_LEGACY;
3005 int create_irq(void)
3007 unsigned int irq_want;
3010 irq_want = nr_irqs_gsi;
3011 irq = create_irq_nr(irq_want);
3019 void destroy_irq(unsigned int irq)
3021 unsigned long flags;
3022 struct irq_cfg *cfg;
3023 struct irq_desc *desc;
3025 /* store it, in case dynamic_irq_cleanup clear it */
3026 desc = irq_to_desc(irq);
3027 cfg = desc->chip_data;
3028 dynamic_irq_cleanup(irq);
3029 /* connect back irq_cfg */
3031 desc->chip_data = cfg;
3033 #ifdef CONFIG_INTR_REMAP
3036 spin_lock_irqsave(&vector_lock, flags);
3037 __clear_irq_vector(irq);
3038 spin_unlock_irqrestore(&vector_lock, flags);
3042 * MSI message composition
3044 #ifdef CONFIG_PCI_MSI
3045 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
3047 struct irq_cfg *cfg;
3053 err = assign_irq_vector(irq, tmp);
3058 cpus_and(tmp, cfg->domain, tmp);
3059 dest = cpu_mask_to_apicid(tmp);
3061 #ifdef CONFIG_INTR_REMAP
3062 if (irq_remapped(irq)) {
3067 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
3068 BUG_ON(ir_index == -1);
3070 memset (&irte, 0, sizeof(irte));
3073 irte.dst_mode = INT_DEST_MODE;
3074 irte.trigger_mode = 0; /* edge */
3075 irte.dlvry_mode = INT_DELIVERY_MODE;
3076 irte.vector = cfg->vector;
3077 irte.dest_id = IRTE_DEST(dest);
3079 modify_irte(irq, &irte);
3081 msg->address_hi = MSI_ADDR_BASE_HI;
3082 msg->data = sub_handle;
3083 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
3085 MSI_ADDR_IR_INDEX1(ir_index) |
3086 MSI_ADDR_IR_INDEX2(ir_index);
3090 msg->address_hi = MSI_ADDR_BASE_HI;
3093 ((INT_DEST_MODE == 0) ?
3094 MSI_ADDR_DEST_MODE_PHYSICAL:
3095 MSI_ADDR_DEST_MODE_LOGICAL) |
3096 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3097 MSI_ADDR_REDIRECTION_CPU:
3098 MSI_ADDR_REDIRECTION_LOWPRI) |
3099 MSI_ADDR_DEST_ID(dest);
3102 MSI_DATA_TRIGGER_EDGE |
3103 MSI_DATA_LEVEL_ASSERT |
3104 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3105 MSI_DATA_DELIVERY_FIXED:
3106 MSI_DATA_DELIVERY_LOWPRI) |
3107 MSI_DATA_VECTOR(cfg->vector);
3113 static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3115 struct irq_cfg *cfg;
3119 struct irq_desc *desc;
3121 cpus_and(tmp, mask, cpu_online_map);
3122 if (cpus_empty(tmp))
3125 if (assign_irq_vector(irq, mask))
3129 cpus_and(tmp, cfg->domain, mask);
3130 dest = cpu_mask_to_apicid(tmp);
3132 read_msi_msg(irq, &msg);
3134 msg.data &= ~MSI_DATA_VECTOR_MASK;
3135 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3136 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3137 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3139 write_msi_msg(irq, &msg);
3140 desc = irq_to_desc(irq);
3141 desc->affinity = mask;
3144 #ifdef CONFIG_INTR_REMAP
3146 * Migrate the MSI irq to another cpumask. This migration is
3147 * done in the process context using interrupt-remapping hardware.
3149 static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3151 struct irq_cfg *cfg;
3153 cpumask_t tmp, cleanup_mask;
3155 struct irq_desc *desc;
3157 cpus_and(tmp, mask, cpu_online_map);
3158 if (cpus_empty(tmp))
3161 if (get_irte(irq, &irte))
3164 if (assign_irq_vector(irq, mask))
3168 cpus_and(tmp, cfg->domain, mask);
3169 dest = cpu_mask_to_apicid(tmp);
3171 irte.vector = cfg->vector;
3172 irte.dest_id = IRTE_DEST(dest);
3175 * atomically update the IRTE with the new destination and vector.
3177 modify_irte(irq, &irte);
3180 * After this point, all the interrupts will start arriving
3181 * at the new destination. So, time to cleanup the previous
3182 * vector allocation.
3184 if (cfg->move_in_progress) {
3185 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
3186 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
3187 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
3188 cfg->move_in_progress = 0;
3191 desc = irq_to_desc(irq);
3192 desc->affinity = mask;
3195 #endif /* CONFIG_SMP */
3198 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3199 * which implement the MSI or MSI-X Capability Structure.
3201 static struct irq_chip msi_chip = {
3203 .unmask = unmask_msi_irq,
3204 .mask = mask_msi_irq,
3205 .ack = ack_apic_edge,
3207 .set_affinity = set_msi_irq_affinity,
3209 .retrigger = ioapic_retrigger_irq,
3212 #ifdef CONFIG_INTR_REMAP
3213 static struct irq_chip msi_ir_chip = {
3214 .name = "IR-PCI-MSI",
3215 .unmask = unmask_msi_irq,
3216 .mask = mask_msi_irq,
3217 .ack = ack_x2apic_edge,
3219 .set_affinity = ir_set_msi_irq_affinity,
3221 .retrigger = ioapic_retrigger_irq,
3225 * Map the PCI dev to the corresponding remapping hardware unit
3226 * and allocate 'nvec' consecutive interrupt-remapping table entries
3229 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3231 struct intel_iommu *iommu;
3234 iommu = map_dev_to_ir(dev);
3237 "Unable to map PCI %s to iommu\n", pci_name(dev));
3241 index = alloc_irte(iommu, irq, nvec);
3244 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3252 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
3257 ret = msi_compose_msg(dev, irq, &msg);
3261 set_irq_msi(irq, desc);
3262 write_msi_msg(irq, &msg);
3264 #ifdef CONFIG_INTR_REMAP
3265 if (irq_remapped(irq)) {
3266 struct irq_desc *desc = irq_to_desc(irq);
3268 * irq migration in process context
3270 desc->status |= IRQ_MOVE_PCNTXT;
3271 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3274 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3276 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
3281 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
3285 unsigned int irq_want;
3287 irq_want = nr_irqs_gsi;
3288 irq = create_irq_nr(irq_want);
3292 #ifdef CONFIG_INTR_REMAP
3293 if (!intr_remapping_enabled)
3296 ret = msi_alloc_irte(dev, irq, 1);
3301 ret = setup_msi_irq(dev, msidesc, irq);
3308 #ifdef CONFIG_INTR_REMAP
3315 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3318 int ret, sub_handle;
3319 struct msi_desc *msidesc;
3320 unsigned int irq_want;
3322 #ifdef CONFIG_INTR_REMAP
3323 struct intel_iommu *iommu = 0;
3327 irq_want = nr_irqs_gsi;
3329 list_for_each_entry(msidesc, &dev->msi_list, list) {
3330 irq = create_irq_nr(irq_want);
3334 #ifdef CONFIG_INTR_REMAP
3335 if (!intr_remapping_enabled)
3340 * allocate the consecutive block of IRTE's
3343 index = msi_alloc_irte(dev, irq, nvec);
3349 iommu = map_dev_to_ir(dev);
3355 * setup the mapping between the irq and the IRTE
3356 * base index, the sub_handle pointing to the
3357 * appropriate interrupt remap table entry.
3359 set_irte_irq(irq, iommu, index, sub_handle);
3363 ret = setup_msi_irq(dev, msidesc, irq);
3375 void arch_teardown_msi_irq(unsigned int irq)
3382 static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3384 struct irq_cfg *cfg;
3388 struct irq_desc *desc;
3390 cpus_and(tmp, mask, cpu_online_map);
3391 if (cpus_empty(tmp))
3394 if (assign_irq_vector(irq, mask))
3398 cpus_and(tmp, cfg->domain, mask);
3399 dest = cpu_mask_to_apicid(tmp);
3401 dmar_msi_read(irq, &msg);
3403 msg.data &= ~MSI_DATA_VECTOR_MASK;
3404 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3405 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3406 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3408 dmar_msi_write(irq, &msg);
3409 desc = irq_to_desc(irq);
3410 desc->affinity = mask;
3412 #endif /* CONFIG_SMP */
3414 struct irq_chip dmar_msi_type = {
3416 .unmask = dmar_msi_unmask,
3417 .mask = dmar_msi_mask,
3418 .ack = ack_apic_edge,
3420 .set_affinity = dmar_msi_set_affinity,
3422 .retrigger = ioapic_retrigger_irq,
3425 int arch_setup_dmar_msi(unsigned int irq)
3430 ret = msi_compose_msg(NULL, irq, &msg);
3433 dmar_msi_write(irq, &msg);
3434 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3440 #ifdef CONFIG_HPET_TIMER
3443 static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3445 struct irq_cfg *cfg;
3446 struct irq_desc *desc;
3451 cpus_and(tmp, mask, cpu_online_map);
3452 if (cpus_empty(tmp))
3455 if (assign_irq_vector(irq, mask))
3459 cpus_and(tmp, cfg->domain, mask);
3460 dest = cpu_mask_to_apicid(tmp);
3462 hpet_msi_read(irq, &msg);
3464 msg.data &= ~MSI_DATA_VECTOR_MASK;
3465 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3466 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3467 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3469 hpet_msi_write(irq, &msg);
3470 desc = irq_to_desc(irq);
3471 desc->affinity = mask;
3473 #endif /* CONFIG_SMP */
3475 struct irq_chip hpet_msi_type = {
3477 .unmask = hpet_msi_unmask,
3478 .mask = hpet_msi_mask,
3479 .ack = ack_apic_edge,
3481 .set_affinity = hpet_msi_set_affinity,
3483 .retrigger = ioapic_retrigger_irq,
3486 int arch_setup_hpet_msi(unsigned int irq)
3491 ret = msi_compose_msg(NULL, irq, &msg);
3495 hpet_msi_write(irq, &msg);
3496 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
3503 #endif /* CONFIG_PCI_MSI */
3505 * Hypertransport interrupt support
3507 #ifdef CONFIG_HT_IRQ
3511 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3513 struct ht_irq_msg msg;
3514 fetch_ht_irq_msg(irq, &msg);
3516 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3517 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3519 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3520 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3522 write_ht_irq_msg(irq, &msg);
3525 static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
3527 struct irq_cfg *cfg;
3530 struct irq_desc *desc;
3532 cpus_and(tmp, mask, cpu_online_map);
3533 if (cpus_empty(tmp))
3536 if (assign_irq_vector(irq, mask))
3540 cpus_and(tmp, cfg->domain, mask);
3541 dest = cpu_mask_to_apicid(tmp);
3543 target_ht_irq(irq, dest, cfg->vector);
3544 desc = irq_to_desc(irq);
3545 desc->affinity = mask;
3549 static struct irq_chip ht_irq_chip = {
3551 .mask = mask_ht_irq,
3552 .unmask = unmask_ht_irq,
3553 .ack = ack_apic_edge,
3555 .set_affinity = set_ht_irq_affinity,
3557 .retrigger = ioapic_retrigger_irq,
3560 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3562 struct irq_cfg *cfg;
3567 err = assign_irq_vector(irq, tmp);
3569 struct ht_irq_msg msg;
3573 cpus_and(tmp, cfg->domain, tmp);
3574 dest = cpu_mask_to_apicid(tmp);
3576 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3580 HT_IRQ_LOW_DEST_ID(dest) |
3581 HT_IRQ_LOW_VECTOR(cfg->vector) |
3582 ((INT_DEST_MODE == 0) ?
3583 HT_IRQ_LOW_DM_PHYSICAL :
3584 HT_IRQ_LOW_DM_LOGICAL) |
3585 HT_IRQ_LOW_RQEOI_EDGE |
3586 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3587 HT_IRQ_LOW_MT_FIXED :
3588 HT_IRQ_LOW_MT_ARBITRATED) |
3589 HT_IRQ_LOW_IRQ_MASKED;
3591 write_ht_irq_msg(irq, &msg);
3593 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3594 handle_edge_irq, "edge");
3596 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3600 #endif /* CONFIG_HT_IRQ */
3602 #ifdef CONFIG_X86_64
3604 * Re-target the irq to the specified CPU and enable the specified MMR located
3605 * on the specified blade to allow the sending of MSIs to the specified CPU.
3607 int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3608 unsigned long mmr_offset)
3610 const cpumask_t *eligible_cpu = get_cpu_mask(cpu);
3611 struct irq_cfg *cfg;
3613 unsigned long mmr_value;
3614 struct uv_IO_APIC_route_entry *entry;
3615 unsigned long flags;
3618 err = assign_irq_vector(irq, *eligible_cpu);
3622 spin_lock_irqsave(&vector_lock, flags);
3623 set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
3625 spin_unlock_irqrestore(&vector_lock, flags);
3630 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3631 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3633 entry->vector = cfg->vector;
3634 entry->delivery_mode = INT_DELIVERY_MODE;
3635 entry->dest_mode = INT_DEST_MODE;
3636 entry->polarity = 0;
3639 entry->dest = cpu_mask_to_apicid(*eligible_cpu);
3641 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3642 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3648 * Disable the specified MMR located on the specified blade so that MSIs are
3649 * longer allowed to be sent.
3651 void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset)
3653 unsigned long mmr_value;
3654 struct uv_IO_APIC_route_entry *entry;
3658 entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
3659 BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
3663 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3664 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
3666 #endif /* CONFIG_X86_64 */
3668 int __init io_apic_get_redir_entries (int ioapic)
3670 union IO_APIC_reg_01 reg_01;
3671 unsigned long flags;
3673 spin_lock_irqsave(&ioapic_lock, flags);
3674 reg_01.raw = io_apic_read(ioapic, 1);
3675 spin_unlock_irqrestore(&ioapic_lock, flags);
3677 return reg_01.bits.entries;
3680 void __init probe_nr_irqs_gsi(void)
3685 for (idx = 0; idx < nr_ioapics; idx++)
3686 nr += io_apic_get_redir_entries(idx) + 1;
3688 if (nr > nr_irqs_gsi)
3692 /* --------------------------------------------------------------------------
3693 ACPI-based IOAPIC Configuration
3694 -------------------------------------------------------------------------- */
3698 #ifdef CONFIG_X86_32
3699 int __init io_apic_get_unique_id(int ioapic, int apic_id)
3701 union IO_APIC_reg_00 reg_00;
3702 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3704 unsigned long flags;
3708 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3709 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3710 * supports up to 16 on one shared APIC bus.
3712 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3713 * advantage of new APIC bus architecture.
3716 if (physids_empty(apic_id_map))
3717 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
3719 spin_lock_irqsave(&ioapic_lock, flags);
3720 reg_00.raw = io_apic_read(ioapic, 0);
3721 spin_unlock_irqrestore(&ioapic_lock, flags);
3723 if (apic_id >= get_physical_broadcast()) {
3724 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3725 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3726 apic_id = reg_00.bits.ID;
3730 * Every APIC in a system must have a unique ID or we get lots of nice
3731 * 'stuck on smp_invalidate_needed IPI wait' messages.
3733 if (check_apicid_used(apic_id_map, apic_id)) {
3735 for (i = 0; i < get_physical_broadcast(); i++) {
3736 if (!check_apicid_used(apic_id_map, i))
3740 if (i == get_physical_broadcast())
3741 panic("Max apic_id exceeded!\n");
3743 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3744 "trying %d\n", ioapic, apic_id, i);
3749 tmp = apicid_to_cpu_present(apic_id);
3750 physids_or(apic_id_map, apic_id_map, tmp);
3752 if (reg_00.bits.ID != apic_id) {
3753 reg_00.bits.ID = apic_id;
3755 spin_lock_irqsave(&ioapic_lock, flags);
3756 io_apic_write(ioapic, 0, reg_00.raw);
3757 reg_00.raw = io_apic_read(ioapic, 0);
3758 spin_unlock_irqrestore(&ioapic_lock, flags);
3761 if (reg_00.bits.ID != apic_id) {
3762 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
3767 apic_printk(APIC_VERBOSE, KERN_INFO
3768 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
3773 int __init io_apic_get_version(int ioapic)
3775 union IO_APIC_reg_01 reg_01;
3776 unsigned long flags;
3778 spin_lock_irqsave(&ioapic_lock, flags);
3779 reg_01.raw = io_apic_read(ioapic, 1);
3780 spin_unlock_irqrestore(&ioapic_lock, flags);
3782 return reg_01.bits.version;
3786 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
3788 struct irq_desc *desc;
3789 struct irq_cfg *cfg;
3790 int cpu = boot_cpu_id;
3792 if (!IO_APIC_IRQ(irq)) {
3793 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3798 desc = irq_to_desc_alloc_cpu(irq, cpu);
3800 printk(KERN_INFO "can not get irq_desc %d\n", irq);
3805 * IRQs < 16 are already in the irq_2_pin[] map
3807 if (irq >= NR_IRQS_LEGACY) {
3808 cfg = desc->chip_data;
3809 add_pin_to_irq_cpu(irq, cpu, ioapic, pin);
3812 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
3818 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3822 if (skip_ioapic_setup)
3825 for (i = 0; i < mp_irq_entries; i++)
3826 if (mp_irqs[i].mp_irqtype == mp_INT &&
3827 mp_irqs[i].mp_srcbusirq == bus_irq)
3829 if (i >= mp_irq_entries)
3832 *trigger = irq_trigger(i);
3833 *polarity = irq_polarity(i);
3837 #endif /* CONFIG_ACPI */
3840 * This function currently is only a helper for the i386 smp boot process where
3841 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3842 * so mask in all cases should simply be TARGET_CPUS
3845 void __init setup_ioapic_dest(void)
3847 int pin, ioapic, irq, irq_entry;
3848 struct irq_desc *desc;
3849 struct irq_cfg *cfg;
3852 if (skip_ioapic_setup == 1)
3855 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
3856 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
3857 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3858 if (irq_entry == -1)
3860 irq = pin_2_irq(irq_entry, ioapic, pin);
3862 /* setup_IO_APIC_irqs could fail to get vector for some device
3863 * when you have too many devices, because at that time only boot
3866 desc = irq_to_desc(irq);
3867 cfg = desc->chip_data;
3869 setup_IO_APIC_irq(ioapic, pin, irq,
3870 irq_trigger(irq_entry),
3871 irq_polarity(irq_entry));
3877 * Honour affinities which have been set in early boot
3880 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
3881 mask = desc->affinity;
3885 #ifdef CONFIG_INTR_REMAP
3886 if (intr_remapping_enabled)
3887 set_ir_ioapic_affinity_irq(irq, mask);
3890 set_ioapic_affinity_irq(irq, mask);
3897 #define IOAPIC_RESOURCE_NAME_SIZE 11
3899 static struct resource *ioapic_resources;
3901 static struct resource * __init ioapic_setup_resources(void)
3904 struct resource *res;
3908 if (nr_ioapics <= 0)
3911 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3914 mem = alloc_bootmem(n);
3918 mem += sizeof(struct resource) * nr_ioapics;
3920 for (i = 0; i < nr_ioapics; i++) {
3922 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3923 sprintf(mem, "IOAPIC %u", i);
3924 mem += IOAPIC_RESOURCE_NAME_SIZE;
3928 ioapic_resources = res;
3933 void __init ioapic_init_mappings(void)
3935 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3936 struct resource *ioapic_res;
3939 ioapic_res = ioapic_setup_resources();
3940 for (i = 0; i < nr_ioapics; i++) {
3941 if (smp_found_config) {
3942 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3943 #ifdef CONFIG_X86_32
3946 "WARNING: bogus zero IO-APIC "
3947 "address found in MPTABLE, "
3948 "disabling IO/APIC support!\n");
3949 smp_found_config = 0;
3950 skip_ioapic_setup = 1;
3951 goto fake_ioapic_page;
3955 #ifdef CONFIG_X86_32
3958 ioapic_phys = (unsigned long)
3959 alloc_bootmem_pages(PAGE_SIZE);
3960 ioapic_phys = __pa(ioapic_phys);
3962 set_fixmap_nocache(idx, ioapic_phys);
3963 apic_printk(APIC_VERBOSE,
3964 "mapped IOAPIC to %08lx (%08lx)\n",
3965 __fix_to_virt(idx), ioapic_phys);
3968 if (ioapic_res != NULL) {
3969 ioapic_res->start = ioapic_phys;
3970 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
3976 static int __init ioapic_insert_resources(void)
3979 struct resource *r = ioapic_resources;
3983 "IO APIC resources could be not be allocated.\n");
3987 for (i = 0; i < nr_ioapics; i++) {
3988 insert_resource(&iomem_resource, r);
3995 /* Insert the IO APIC resources after PCI initialization has occured to handle
3996 * IO APICS that are mapped in on a BAR in PCI space. */
3997 late_initcall(ioapic_insert_resources);