]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kernel/io_apic.c
Merge branch 'irq-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / io_apic.c
index 4ee270d30358a894a3bbd70c432766d199d230f5..9043251210fba4dc32ea7db009a1db6196216453 100644 (file)
@@ -58,6 +58,8 @@
 #include <asm/setup.h>
 #include <asm/irq_remapping.h>
 #include <asm/hpet.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/uv_irq.h>
 
 #include <mach_ipi.h>
 #include <mach_apic.h>
@@ -105,13 +107,9 @@ static int __init parse_noapic(char *str)
 }
 early_param("noapic", parse_noapic);
 
-struct irq_cfg;
 struct irq_pin_list;
 struct irq_cfg {
        unsigned int irq;
-#ifdef CONFIG_HAVE_SPARSE_IRQ
-       struct irq_cfg *next;
-#endif
        struct irq_pin_list *irq_2_pin;
        cpumask_t domain;
        cpumask_t old_domain;
@@ -121,7 +119,7 @@ struct irq_cfg {
 };
 
 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
-static struct irq_cfg irq_cfg_legacy[] __initdata = {
+static struct irq_cfg irq_cfgx[NR_IRQS] = {
        [0]  = { .irq =  0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR,  },
        [1]  = { .irq =  1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR,  },
        [2]  = { .irq =  2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR,  },
@@ -140,165 +138,26 @@ static struct irq_cfg irq_cfg_legacy[] __initdata = {
        [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
 };
 
-static struct irq_cfg irq_cfg_init = { .irq =  -1U, };
-
-static void init_one_irq_cfg(struct irq_cfg *cfg)
-{
-       memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
-}
-
-static struct irq_cfg *irq_cfgx;
-
-#ifdef CONFIG_HAVE_SPARSE_IRQ
-/*
- * Protect the irq_cfgx_free freelist:
- */
-static DEFINE_SPINLOCK(irq_cfg_lock);
-
-static struct irq_cfg *irq_cfgx_free;
-#endif
-
-static void __init init_work(void *data)
-{
-       struct dyn_array *da = data;
-       struct irq_cfg *cfg;
-       int legacy_count;
-       int i;
-
-       cfg = *da->name;
-
-       memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
-
-       legacy_count = ARRAY_SIZE(irq_cfg_legacy);
-       for (i = legacy_count; i < *da->nr; i++)
-               init_one_irq_cfg(&cfg[i]);
-
-#ifdef CONFIG_HAVE_SPARSE_IRQ
-       for (i = 1; i < *da->nr; i++)
-               cfg[i-1].next = &cfg[i];
-
-       irq_cfgx_free = &irq_cfgx[legacy_count];
-       irq_cfgx[legacy_count - 1].next = NULL;
-#endif
-}
-
-#ifdef CONFIG_HAVE_SPARSE_IRQ
-/* need to be biger than size of irq_cfg_legacy */
-static int nr_irq_cfg = 32;
-
-static int __init parse_nr_irq_cfg(char *arg)
-{
-       if (arg) {
-               nr_irq_cfg = simple_strtoul(arg, NULL, 0);
-               if (nr_irq_cfg < 32)
-                       nr_irq_cfg = 32;
-       }
-       return 0;
-}
-
-early_param("nr_irq_cfg", parse_nr_irq_cfg);
-
-#define for_each_irq_cfg(irqX, cfg)           \
-        for (cfg = irq_cfgx, irqX = cfg->irq; cfg; cfg = cfg->next, irqX = cfg ? cfg->irq : -1U)
-
-
-DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work);
+#define for_each_irq_cfg(irq, cfg)             \
+       for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++)
 
 static struct irq_cfg *irq_cfg(unsigned int irq)
 {
-       struct irq_cfg *cfg;
-
-       cfg = irq_cfgx;
-       while (cfg) {
-               if (cfg->irq == irq)
-                       return cfg;
-
-               cfg = cfg->next;
-       }
-
-       return NULL;
+       return irq < nr_irqs ? irq_cfgx + irq : NULL;
 }
 
 static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
 {
-       struct irq_cfg *cfg, *cfg_pri;
-       unsigned long flags;
-       int count = 0;
-       int i;
-
-       cfg_pri = cfg = irq_cfgx;
-       while (cfg) {
-               if (cfg->irq == irq)
-                       return cfg;
-
-               cfg_pri = cfg;
-               cfg = cfg->next;
-               count++;
-       }
-
-       spin_lock_irqsave(&irq_cfg_lock, flags);
-       if (!irq_cfgx_free) {
-               unsigned long phys;
-               unsigned long total_bytes;
-               /*
-                *  we run out of pre-allocate ones, allocate more
-                */
-               printk(KERN_DEBUG "try to get more irq_cfg %d\n", nr_irq_cfg);
-
-               total_bytes = sizeof(struct irq_cfg) * nr_irq_cfg;
-               if (after_bootmem)
-                       cfg = kzalloc(total_bytes, GFP_ATOMIC);
-               else
-                       cfg = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
-
-               if (!cfg)
-                       panic("please boot with nr_irq_cfg= %d\n", count * 2);
-
-               phys = __pa(cfg);
-               printk(KERN_DEBUG "irq_irq ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
-
-               for (i = 0; i < nr_irq_cfg; i++)
-                       init_one_irq_cfg(&cfg[i]);
-
-               for (i = 1; i < nr_irq_cfg; i++)
-                       cfg[i-1].next = &cfg[i];
-
-               irq_cfgx_free = cfg;
-       }
-
-       cfg = irq_cfgx_free;
-       irq_cfgx_free = irq_cfgx_free->next;
-       cfg->next = NULL;
-       if (cfg_pri)
-               cfg_pri->next = cfg;
-       else
-               irq_cfgx = cfg;
-       cfg->irq = irq;
-
-       spin_unlock_irqrestore(&irq_cfg_lock, flags);
-
-       return cfg;
+       return irq_cfg(irq);
 }
-#else
-
-#define for_each_irq_cfg(irq, cfg)             \
-       for (irq = 0, cfg = &irq_cfgx[irq]; irq < nr_irqs; irq++, cfg = &irq_cfgx[irq])
 
-DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irqs, PAGE_SIZE, init_work);
-
-struct irq_cfg *irq_cfg(unsigned int irq)
-{
-        if (irq < nr_irqs)
-                return &irq_cfgx[irq];
-
-        return NULL;
-}
-struct irq_cfg *irq_cfg_alloc(unsigned int irq)
-{
-        return irq_cfg(irq);
-}
+/*
+ * Rough estimation of how many shared IRQs there are, can be changed
+ * anytime.
+ */
+#define MAX_PLUS_SHARED_IRQS NR_IRQS
+#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
 
-#endif
 /*
  * This is performance-critical, we want to do it O(1)
  *
@@ -311,59 +170,29 @@ struct irq_pin_list {
        struct irq_pin_list *next;
 };
 
-static struct irq_pin_list *irq_2_pin_head;
-/* fill one page ? */
-static int nr_irq_2_pin = 0x100;
+static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE];
 static struct irq_pin_list *irq_2_pin_ptr;
-static void __init irq_2_pin_init_work(void *data)
+
+static void __init irq_2_pin_init(void)
 {
-       struct dyn_array *da = data;
-       struct irq_pin_list *pin;
+       struct irq_pin_list *pin = irq_2_pin_head;
        int i;
 
-       pin = *da->name;
-
-       for (i = 1; i < *da->nr; i++)
+       for (i = 1; i < PIN_MAP_SIZE; i++)
                pin[i-1].next = &pin[i];
 
        irq_2_pin_ptr = &pin[0];
 }
-DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work);
 
 static struct irq_pin_list *get_one_free_irq_2_pin(void)
 {
-       struct irq_pin_list *pin;
-       int i;
-
-       pin = irq_2_pin_ptr;
-
-       if (pin) {
-               irq_2_pin_ptr = pin->next;
-               pin->next = NULL;
-               return pin;
-       }
-
-       /*
-        *  we run out of pre-allocate ones, allocate more
-        */
-       printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin);
-
-       if (after_bootmem)
-               pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin,
-                                GFP_ATOMIC);
-       else
-               pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) *
-                               nr_irq_2_pin, PAGE_SIZE, 0);
+       struct irq_pin_list *pin = irq_2_pin_ptr;
 
        if (!pin)
                panic("can not get more irq_2_pin\n");
 
-       for (i = 1; i < nr_irq_2_pin; i++)
-               pin[i-1].next = &pin[i];
-
        irq_2_pin_ptr = pin->next;
        pin->next = NULL;
-
        return pin;
 }
 
@@ -402,8 +231,9 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i
 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
 {
        struct io_apic __iomem *io_apic = io_apic_base(apic);
-        if (sis_apic_bug)
-                writel(reg, &io_apic->index);
+
+       if (sis_apic_bug)
+               writel(reg, &io_apic->index);
        writel(value, &io_apic->data);
 }
 
@@ -1162,11 +992,11 @@ static int pin_2_irq(int idx, int apic, int pin)
                while (i < apic)
                        irq += nr_ioapic_registers[i++];
                irq += pin;
-                /*
+               /*
                  * For MPS mode, so far only needed by ES7000 platform
                  */
-                if (ioapic_renumber_irq)
-                        irq = ioapic_renumber_irq(apic, irq);
+               if (ioapic_renumber_irq)
+                       irq = ioapic_renumber_irq(apic, irq);
        }
 
 #ifdef CONFIG_X86_32
@@ -1310,6 +1140,20 @@ static void __clear_irq_vector(int irq)
 
        cfg->vector = 0;
        cpus_clear(cfg->domain);
+
+       if (likely(!cfg->move_in_progress))
+               return;
+       cpus_and(mask, cfg->old_domain, cpu_online_map);
+       for_each_cpu_mask_nr(cpu, mask) {
+               for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
+                                                               vector++) {
+                       if (per_cpu(vector_irq, cpu)[vector] != irq)
+                               continue;
+                       per_cpu(vector_irq, cpu)[vector] = -1;
+                       break;
+               }
+       }
+       cfg->move_in_progress = 0;
 }
 
 void __setup_vector_irq(int cpu)
@@ -1350,19 +1194,19 @@ static struct irq_chip ir_ioapic_chip;
 #ifdef CONFIG_X86_32
 static inline int IO_APIC_irq_trigger(int irq)
 {
-        int apic, idx, pin;
+       int apic, idx, pin;
 
-        for (apic = 0; apic < nr_ioapics; apic++) {
-                for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-                        idx = find_irq_entry(apic, pin, mp_INT);
-                        if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
-                                return irq_trigger(idx);
-                }
-        }
-        /*
+       for (apic = 0; apic < nr_ioapics; apic++) {
+               for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+                       idx = find_irq_entry(apic, pin, mp_INT);
+                       if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
+                               return irq_trigger(idx);
+               }
+       }
+       /*
          * nonexistent IRQs are edge default
          */
-        return 0;
+       return 0;
 }
 #else
 static inline int IO_APIC_irq_trigger(int irq)
@@ -1375,11 +1219,7 @@ static void ioapic_register_intr(int irq, unsigned long trigger)
 {
        struct irq_desc *desc;
 
-       /* first time to use this irq_desc */
-       if (irq < 16)
-               desc = irq_to_desc(irq);
-       else
-               desc = irq_to_desc_alloc(irq);
+       desc = irq_to_desc(irq);
 
        if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
            trigger == IOAPIC_LEVEL)
@@ -1631,8 +1471,8 @@ __apicdebuginit(void) print_IO_APIC(void)
        reg_01.raw = io_apic_read(apic, 1);
        if (reg_01.bits.version >= 0x10)
                reg_02.raw = io_apic_read(apic, 2);
-        if (reg_01.bits.version >= 0x20)
-                reg_03.raw = io_apic_read(apic, 3);
+       if (reg_01.bits.version >= 0x20)
+               reg_03.raw = io_apic_read(apic, 3);
        spin_unlock_irqrestore(&ioapic_lock, flags);
 
        printk("\n");
@@ -2211,9 +2051,9 @@ static int ioapic_retrigger_irq(unsigned int irq)
 #else
 static int ioapic_retrigger_irq(unsigned int irq)
 {
-        send_IPI_self(irq_cfg(irq)->vector);
+       send_IPI_self(irq_cfg(irq)->vector);
 
-        return 1;
+       return 1;
 }
 #endif
 
@@ -2311,7 +2151,7 @@ static int migrate_irq_remapped_level(int irq)
 
        if (io_apic_level_ack_pending(irq)) {
                /*
-                * Interrupt in progress. Migrating irq now will change the
+                * Interrupt in progress. Migrating irq now will change the
                 * vector information in the IO-APIC RTE and that will confuse
                 * the EOI broadcast performed by cpu.
                 * So, delay the irq migration to the next instance.
@@ -2451,9 +2291,7 @@ static void ack_apic_edge(unsigned int irq)
        ack_APIC_irq();
 }
 
-#ifdef CONFIG_X86_32
 atomic_t irq_mis_count;
-#endif
 
 static void ack_apic_level(unsigned int irq)
 {
@@ -2548,28 +2386,28 @@ static void ack_apic_level(unsigned int irq)
 }
 
 static struct irq_chip ioapic_chip __read_mostly = {
-       .name           = "IO-APIC",
-       .startup        = startup_ioapic_irq,
-       .mask           = mask_IO_APIC_irq,
-       .unmask         = unmask_IO_APIC_irq,
-       .ack            = ack_apic_edge,
-       .eoi            = ack_apic_level,
+       .name           = "IO-APIC",
+       .startup        = startup_ioapic_irq,
+       .mask           = mask_IO_APIC_irq,
+       .unmask         = unmask_IO_APIC_irq,
+       .ack            = ack_apic_edge,
+       .eoi            = ack_apic_level,
 #ifdef CONFIG_SMP
-       .set_affinity   = set_ioapic_affinity_irq,
+       .set_affinity   = set_ioapic_affinity_irq,
 #endif
        .retrigger      = ioapic_retrigger_irq,
 };
 
 #ifdef CONFIG_INTR_REMAP
 static struct irq_chip ir_ioapic_chip __read_mostly = {
-       .name           = "IR-IO-APIC",
-       .startup        = startup_ioapic_irq,
-       .mask           = mask_IO_APIC_irq,
-       .unmask         = unmask_IO_APIC_irq,
-       .ack            = ack_x2apic_edge,
-       .eoi            = ack_x2apic_level,
+       .name           = "IR-IO-APIC",
+       .startup        = startup_ioapic_irq,
+       .mask           = mask_IO_APIC_irq,
+       .unmask         = unmask_IO_APIC_irq,
+       .ack            = ack_x2apic_edge,
+       .eoi            = ack_x2apic_level,
 #ifdef CONFIG_SMP
-       .set_affinity   = set_ir_ioapic_affinity_irq,
+       .set_affinity   = set_ir_ioapic_affinity_irq,
 #endif
        .retrigger      = ioapic_retrigger_irq,
 };
@@ -2758,8 +2596,8 @@ static inline void __init check_timer(void)
 
        local_irq_save(flags);
 
-        ver = apic_read(APIC_LVR);
-        ver = GET_APIC_VERSION(ver);
+       ver = apic_read(APIC_LVR);
+       ver = GET_APIC_VERSION(ver);
 
        /*
         * get/set the timer IRQ vector:
@@ -2944,12 +2782,12 @@ void __init setup_IO_APIC(void)
        io_apic_irqs = ~PIC_IRQS;
 
        apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
-        /*
+       /*
          * Set up IO-APIC IRQ routing.
          */
 #ifdef CONFIG_X86_32
-        if (!acpi_ioapic)
-                setup_ioapic_ids_from_mpc();
+       if (!acpi_ioapic)
+               setup_ioapic_ids_from_mpc();
 #endif
        sync_Arb_IDs();
        setup_IO_APIC_irqs();
@@ -2964,9 +2802,9 @@ void __init setup_IO_APIC(void)
 
 static int __init io_apic_bug_finalize(void)
 {
-        if (sis_apic_bug == -1)
-                sis_apic_bug = 0;
-        return 0;
+       if (sis_apic_bug == -1)
+               sis_apic_bug = 0;
+       return 0;
 }
 
 late_initcall(io_apic_bug_finalize);
@@ -3066,9 +2904,7 @@ unsigned int create_irq_nr(unsigned int irq_want)
        unsigned long flags;
        struct irq_cfg *cfg_new;
 
-#ifndef CONFIG_HAVE_SPARSE_IRQ
        irq_want = nr_irqs - 1;
-#endif
 
        irq = 0;
        spin_lock_irqsave(&vector_lock, flags);
@@ -3323,7 +3159,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
        if (index < 0) {
                printk(KERN_ERR
                       "Unable to allocate %d IRTE for PCI %s\n", nvec,
-                       pci_name(dev));
+                      pci_name(dev));
                return -ENOSPC;
        }
        return index;
@@ -3692,6 +3528,72 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
 }
 #endif /* CONFIG_HT_IRQ */
 
+#ifdef CONFIG_X86_64
+/*
+ * Re-target the irq to the specified CPU and enable the specified MMR located
+ * on the specified blade to allow the sending of MSIs to the specified CPU.
+ */
+int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
+                      unsigned long mmr_offset)
+{
+       const cpumask_t *eligible_cpu = get_cpu_mask(cpu);
+       struct irq_cfg *cfg;
+       int mmr_pnode;
+       unsigned long mmr_value;
+       struct uv_IO_APIC_route_entry *entry;
+       unsigned long flags;
+       int err;
+
+       err = assign_irq_vector(irq, *eligible_cpu);
+       if (err != 0)
+               return err;
+
+       spin_lock_irqsave(&vector_lock, flags);
+       set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
+                                     irq_name);
+       spin_unlock_irqrestore(&vector_lock, flags);
+
+       cfg = irq_cfg(irq);
+
+       mmr_value = 0;
+       entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
+       BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
+
+       entry->vector = cfg->vector;
+       entry->delivery_mode = INT_DELIVERY_MODE;
+       entry->dest_mode = INT_DEST_MODE;
+       entry->polarity = 0;
+       entry->trigger = 0;
+       entry->mask = 0;
+       entry->dest = cpu_mask_to_apicid(*eligible_cpu);
+
+       mmr_pnode = uv_blade_to_pnode(mmr_blade);
+       uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+
+       return irq;
+}
+
+/*
+ * Disable the specified MMR located on the specified blade so that MSIs are
+ * longer allowed to be sent.
+ */
+void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset)
+{
+       unsigned long mmr_value;
+       struct uv_IO_APIC_route_entry *entry;
+       int mmr_pnode;
+
+       mmr_value = 0;
+       entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
+       BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
+
+       entry->mask = 1;
+
+       mmr_pnode = uv_blade_to_pnode(mmr_blade);
+       uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+}
+#endif /* CONFIG_X86_64 */
+
 int __init io_apic_get_redir_entries (int ioapic)
 {
        union IO_APIC_reg_01    reg_01;
@@ -3706,25 +3608,7 @@ int __init io_apic_get_redir_entries (int ioapic)
 
 int __init probe_nr_irqs(void)
 {
-       int idx;
-       int nr = 0;
-#ifndef CONFIG_XEN
-       int nr_min = 32;
-#else
-       int nr_min = NR_IRQS;
-#endif
-
-       for (idx = 0; idx < nr_ioapics; idx++)
-               nr += io_apic_get_redir_entries(idx) + 1;
-
-       /* double it for hotplug and msi and nmi */
-       nr <<= 1;
-
-       /* something wrong ? */
-       if (nr < nr_min)
-               nr = nr_min;
-
-       return nr;
+       return NR_IRQS;
 }
 
 /* --------------------------------------------------------------------------
@@ -3871,7 +3755,9 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
 void __init setup_ioapic_dest(void)
 {
        int pin, ioapic, irq, irq_entry;
+       struct irq_desc *desc;
        struct irq_cfg *cfg;
+       cpumask_t mask;
 
        if (skip_ioapic_setup == 1)
                return;
@@ -3888,16 +3774,30 @@ void __init setup_ioapic_dest(void)
                         * cpu is online.
                         */
                        cfg = irq_cfg(irq);
-                       if (!cfg->vector)
+                       if (!cfg->vector) {
                                setup_IO_APIC_irq(ioapic, pin, irq,
                                                  irq_trigger(irq_entry),
                                                  irq_polarity(irq_entry));
+                               continue;
+
+                       }
+
+                       /*
+                        * Honour affinities which have been set in early boot
+                        */
+                       desc = irq_to_desc(irq);
+                       if (desc->status &
+                           (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
+                               mask = desc->affinity;
+                       else
+                               mask = TARGET_CPUS;
+
 #ifdef CONFIG_INTR_REMAP
-                       else if (intr_remapping_enabled)
-                               set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
-#endif
+                       if (intr_remapping_enabled)
+                               set_ir_ioapic_affinity_irq(irq, mask);
                        else
-                               set_ioapic_affinity_irq(irq, TARGET_CPUS);
+#endif
+                               set_ioapic_affinity_irq(irq, mask);
                }
 
        }
@@ -3943,23 +3843,24 @@ static struct resource * __init ioapic_setup_resources(void)
 void __init ioapic_init_mappings(void)
 {
        unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
-       int i;
        struct resource *ioapic_res;
+       int i;
 
+       irq_2_pin_init();
        ioapic_res = ioapic_setup_resources();
        for (i = 0; i < nr_ioapics; i++) {
                if (smp_found_config) {
                        ioapic_phys = mp_ioapics[i].mp_apicaddr;
 #ifdef CONFIG_X86_32
-                        if (!ioapic_phys) {
-                                printk(KERN_ERR
-                                       "WARNING: bogus zero IO-APIC "
-                                       "address found in MPTABLE, "
-                                       "disabling IO/APIC support!\n");
-                                smp_found_config = 0;
-                                skip_ioapic_setup = 1;
-                                goto fake_ioapic_page;
-                        }
+                       if (!ioapic_phys) {
+                               printk(KERN_ERR
+                                      "WARNING: bogus zero IO-APIC "
+                                      "address found in MPTABLE, "
+                                      "disabling IO/APIC support!\n");
+                               smp_found_config = 0;
+                               skip_ioapic_setup = 1;
+                               goto fake_ioapic_page;
+                       }
 #endif
                } else {
 #ifdef CONFIG_X86_32