]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'x86/irq' into x86/devel
authorIngo Molnar <mingo@elte.hu>
Tue, 8 Jul 2008 07:53:57 +0000 (09:53 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 8 Jul 2008 07:53:57 +0000 (09:53 +0200)
Conflicts:

arch/x86/kernel/i8259.c
arch/x86/kernel/irqinit_64.c

Signed-off-by: Ingo Molnar <mingo@elte.hu>
1  2 
arch/x86/kernel/apic_32.c
arch/x86/kernel/io_apic_32.c
arch/x86/kernel/io_apic_64.c
arch/x86/kernel/irq_32.c

index 45d8da405ad9d7ea448d9d6a858320c0bfca421a,d5767cb19d565c421890273eabe465f7fa51505f..ce4538ebb7fe81420d7d9b749e5248f4137276ed
@@@ -64,12 -64,17 +64,16 @@@ static int enable_local_apic __initdata
  
  /* Local APIC timer verification ok */
  static int local_apic_timer_verify_ok;
 -/* Disable local APIC timer from the kernel commandline or via dmi quirk
 -   or using CPU MSR check */
 -int local_apic_timer_disabled;
 +/* Disable local APIC timer from the kernel commandline or via dmi quirk */
 +static int local_apic_timer_disabled;
  /* Local APIC timer works in C2 */
  int local_apic_timer_c2_ok;
  EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
  
+ int first_system_vector = 0xfe;
+ char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
  /*
   * Debug level, exported for io_apic.c
   */
@@@ -1153,6 -1158,9 +1157,6 @@@ static int __init detect_init_APIC(void
        if (l & MSR_IA32_APICBASE_ENABLE)
                mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
  
 -      if (nmi_watchdog != NMI_NONE && nmi_watchdog != NMI_DISABLED)
 -              nmi_watchdog = NMI_LOCAL_APIC;
 -
        printk(KERN_INFO "Found and enabled local APIC!\n");
  
        apic_pm_activate();
@@@ -1265,10 -1273,6 +1269,10 @@@ int __init APIC_init_uniprocessor(void
  
        setup_local_APIC();
  
 +#ifdef CONFIG_X86_IO_APIC
 +      if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
 +#endif
 +              localise_nmi_watchdog();
        end_local_APIC_setup();
  #ifdef CONFIG_X86_IO_APIC
        if (smp_found_config)
@@@ -1351,13 -1355,13 +1355,13 @@@ void __init smp_intr_init(void
         * The reschedule interrupt is a CPU-to-CPU reschedule-helper
         * IPI, driven by wakeup.
         */
-       set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
+       alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
  
        /* IPI for invalidation */
-       set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
  
        /* IPI for generic function call */
-       set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+       alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
  }
  #endif
  
@@@ -1370,15 -1374,15 +1374,15 @@@ void __init apic_intr_init(void
        smp_intr_init();
  #endif
        /* self generated IPI for local APIC timer */
-       set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
+       alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
  
        /* IPI vectors for APIC spurious and error interrupts */
-       set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
-       set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
+       alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
+       alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
  
        /* thermal monitor LVT interrupt */
  #ifdef CONFIG_X86_MCE_P4THERMAL
-       set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
+       alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
  #endif
  }
  
index d4f9df2b022aaf29dddd5d1f2045494961043f84,0774b231a28be66d589c198349ce3b0f97009122..dac47d61d2be9866e28c3ded5f9c4ea2232c277d
@@@ -58,7 -58,7 +58,7 @@@ static struct { int pin, apic; } ioapic
  static DEFINE_SPINLOCK(ioapic_lock);
  static DEFINE_SPINLOCK(vector_lock);
  
 -int timer_over_8254 __initdata = 1;
 +int timer_through_8259 __initdata;
  
  /*
   *    Is the SiS APIC rmw bug present ?
@@@ -239,7 -239,7 +239,7 @@@ static void __init replace_pin_at_irq(u
        }
  }
  
 -static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
 +static void __modify_IO_APIC_irq(unsigned int irq, unsigned long enable, unsigned long disable)
  {
        struct irq_pin_list *entry = irq_2_pin + irq;
        unsigned int pin, reg;
  }
  
  /* mask = 1 */
 -static void __mask_IO_APIC_irq (unsigned int irq)
 +static void __mask_IO_APIC_irq(unsigned int irq)
  {
 -      __modify_IO_APIC_irq(irq, 0x00010000, 0);
 +      __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED, 0);
  }
  
  /* mask = 0 */
 -static void __unmask_IO_APIC_irq (unsigned int irq)
 +static void __unmask_IO_APIC_irq(unsigned int irq)
  {
 -      __modify_IO_APIC_irq(irq, 0, 0x00010000);
 +      __modify_IO_APIC_irq(irq, 0, IO_APIC_REDIR_MASKED);
  }
  
  /* mask = 1, trigger = 0 */
 -static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
 +static void __mask_and_edge_IO_APIC_irq(unsigned int irq)
  {
 -      __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
 +      __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED,
 +                              IO_APIC_REDIR_LEVEL_TRIGGER);
  }
  
  /* mask = 0, trigger = 1 */
 -static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
 +static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
  {
 -      __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
 +      __modify_IO_APIC_irq(irq, IO_APIC_REDIR_LEVEL_TRIGGER,
 +                              IO_APIC_REDIR_MASKED);
  }
  
 -static void mask_IO_APIC_irq (unsigned int irq)
 +static void mask_IO_APIC_irq(unsigned int irq)
  {
        unsigned long flags;
  
        spin_unlock_irqrestore(&ioapic_lock, flags);
  }
  
 -static void unmask_IO_APIC_irq (unsigned int irq)
 +static void unmask_IO_APIC_irq(unsigned int irq)
  {
        unsigned long flags;
  
  static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
  {
        struct IO_APIC_route_entry entry;
 -      
 +
        /* Check delivery_mode to be sure we're not clearing an SMI pin */
        entry = ioapic_read_entry(apic, pin);
        if (entry.delivery_mode == dest_SMI)
        ioapic_mask_entry(apic, pin);
  }
  
 -static void clear_IO_APIC (void)
 +static void clear_IO_APIC(void)
  {
        int apic, pin;
  
@@@ -334,7 -332,7 +334,7 @@@ static void set_ioapic_affinity_irq(uns
        struct irq_pin_list *entry = irq_2_pin + irq;
        unsigned int apicid_value;
        cpumask_t tmp;
 -      
 +
        cpus_and(tmp, cpumask, cpu_online_map);
        if (cpus_empty(tmp))
                tmp = TARGET_CPUS;
  # include <linux/kernel_stat.h>       /* kstat */
  # include <linux/slab.h>              /* kmalloc() */
  # include <linux/timer.h>
 - 
 +
  #define IRQBALANCE_CHECK_ARCH -999
  #define MAX_BALANCED_IRQ_INTERVAL     (5*HZ)
  #define MIN_BALANCED_IRQ_INTERVAL     (HZ/2)
@@@ -375,14 -373,14 +375,14 @@@ static int physical_balance __read_most
  static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
  
  static struct irq_cpu_info {
 -      unsigned long * last_irq;
 -      unsigned long * irq_delta;
 +      unsigned long *last_irq;
 +      unsigned long *irq_delta;
        unsigned long irq;
  } irq_cpu_data[NR_CPUS];
  
  #define CPU_IRQ(cpu)          (irq_cpu_data[cpu].irq)
 -#define LAST_CPU_IRQ(cpu,irq)   (irq_cpu_data[cpu].last_irq[irq])
 -#define IRQ_DELTA(cpu,irq)    (irq_cpu_data[cpu].irq_delta[irq])
 +#define LAST_CPU_IRQ(cpu, irq)   (irq_cpu_data[cpu].last_irq[irq])
 +#define IRQ_DELTA(cpu, irq)   (irq_cpu_data[cpu].irq_delta[irq])
  
  #define IDLE_ENOUGH(cpu,now) \
        (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
@@@ -421,8 -419,8 +421,8 @@@ inside
                        if (cpu == -1)
                                cpu = NR_CPUS-1;
                }
 -      } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
 -                      (search_idle && !IDLE_ENOUGH(cpu,now)));
 +      } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu, allowed_mask) ||
 +                      (search_idle && !IDLE_ENOUGH(cpu, now)));
  
        return cpu;
  }
@@@ -432,14 -430,15 +432,14 @@@ static inline void balance_irq(int cpu
        unsigned long now = jiffies;
        cpumask_t allowed_mask;
        unsigned int new_cpu;
 -              
 +
        if (irqbalance_disabled)
 -              return; 
 +              return;
  
        cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
        new_cpu = move(cpu, allowed_mask, now, 1);
 -      if (cpu != new_cpu) {
 +      if (cpu != new_cpu)
                set_pending_irq(irq, cpumask_of_cpu(new_cpu));
 -      }
  }
  
  static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
                        if (!irq_desc[j].action)
                                continue;
                        /* Is it a significant load ?  */
 -                      if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
 +                      if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i), j) <
                                                useful_load_threshold)
                                continue;
                        balance_irq(i, j);
                }
        }
        balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
 -              balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);       
 +              balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
        return;
  }
  
@@@ -487,22 -486,22 +487,22 @@@ static void do_irq_balance(void
                        /* Is this an active IRQ or balancing disabled ? */
                        if (!irq_desc[j].action || irq_balancing_disabled(j))
                                continue;
 -                      if ( package_index == i )
 -                              IRQ_DELTA(package_index,j) = 0;
 +                      if (package_index == i)
 +                              IRQ_DELTA(package_index, j) = 0;
                        /* Determine the total count per processor per IRQ */
                        value_now = (unsigned long) kstat_cpu(i).irqs[j];
  
                        /* Determine the activity per processor per IRQ */
 -                      delta = value_now - LAST_CPU_IRQ(i,j);
 +                      delta = value_now - LAST_CPU_IRQ(i, j);
  
                        /* Update last_cpu_irq[][] for the next time */
 -                      LAST_CPU_IRQ(i,j) = value_now;
 +                      LAST_CPU_IRQ(i, j) = value_now;
  
                        /* Ignore IRQs whose rate is less than the clock */
                        if (delta < useful_load_threshold)
                                continue;
                        /* update the load for the processor or package total */
 -                      IRQ_DELTA(package_index,j) += delta;
 +                      IRQ_DELTA(package_index, j) += delta;
  
                        /* Keep track of the higher numbered sibling as well */
                        if (i != package_index)
        max_cpu_irq = ULONG_MAX;
  
  tryanothercpu:
 -      /* Look for heaviest loaded processor.
 +      /*
 +       * Look for heaviest loaded processor.
         * We may come back to get the next heaviest loaded processor.
         * Skip processors with trivial loads.
         */
        for_each_online_cpu(i) {
                if (i != CPU_TO_PACKAGEINDEX(i))
                        continue;
 -              if (max_cpu_irq <= CPU_IRQ(i)) 
 +              if (max_cpu_irq <= CPU_IRQ(i))
                        continue;
                if (tmp_cpu_irq < CPU_IRQ(i)) {
                        tmp_cpu_irq = CPU_IRQ(i);
        }
  
        if (tmp_loaded == -1) {
 -       /* In the case of small number of heavy interrupt sources, 
 -        * loading some of the cpus too much. We use Ingo's original 
 +       /*
 +        * In the case of small number of heavy interrupt sources,
 +        * loading some of the cpus too much. We use Ingo's original
          * approach to rotate them around.
          */
                if (!first_attempt && imbalance >= useful_load_threshold) {
                }
                goto not_worth_the_effort;
        }
 -      
 +
        first_attempt = 0;              /* heaviest search */
        max_cpu_irq = tmp_cpu_irq;      /* load */
        max_loaded = tmp_loaded;        /* processor */
        imbalance = (max_cpu_irq - min_cpu_irq) / 2;
 -      
 -      /* if imbalance is less than approx 10% of max load, then
 +
 +      /*
 +       * if imbalance is less than approx 10% of max load, then
         * observe diminishing returns action. - quit
         */
        if (imbalance < (max_cpu_irq >> 3))
@@@ -581,25 -577,26 +581,25 @@@ tryanotherirq
                /* Is this an active IRQ? */
                if (!irq_desc[j].action)
                        continue;
 -              if (imbalance <= IRQ_DELTA(max_loaded,j))
 +              if (imbalance <= IRQ_DELTA(max_loaded, j))
                        continue;
                /* Try to find the IRQ that is closest to the imbalance
                 * without going over.
                 */
 -              if (move_this_load < IRQ_DELTA(max_loaded,j)) {
 -                      move_this_load = IRQ_DELTA(max_loaded,j);
 +              if (move_this_load < IRQ_DELTA(max_loaded, j)) {
 +                      move_this_load = IRQ_DELTA(max_loaded, j);
                        selected_irq = j;
                }
        }
 -      if (selected_irq == -1) {
 +      if (selected_irq == -1)
                goto tryanothercpu;
 -      }
  
        imbalance = move_this_load;
 -      
 +
        /* For physical_balance case, we accumulated both load
         * values in the one of the siblings cpu_irq[],
         * to use the same code for physical and logical processors
 -       * as much as possible. 
 +       * as much as possible.
         *
         * NOTE: the cpu_irq[] array holds the sum of the load for
         * sibling A and sibling B in the slot for the lowest numbered
                /* mark for change destination */
                set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
  
 -              /* Since we made a change, come back sooner to 
 +              /* Since we made a change, come back sooner to
                 * check for more variation.
                 */
                balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
 -                      balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);       
 +                      balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
                return;
        }
        goto tryanotherirq;
@@@ -643,7 -640,7 +643,7 @@@ not_worth_the_effort
         * upward
         */
        balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
 -              balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);       
 +              balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
        return;
  }
  
@@@ -682,13 -679,13 +682,13 @@@ static int __init balanced_irq_init(voi
        cpumask_t tmp;
  
        cpus_shift_right(tmp, cpu_online_map, 2);
 -        c = &boot_cpu_data;
 +      c = &boot_cpu_data;
        /* When not overwritten by the command line ask subarchitecture. */
        if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
                irqbalance_disabled = NO_BALANCE_IRQ;
        if (irqbalance_disabled)
                return 0;
 -      
 +
         /* disable irqbalance completely if there is only one processor online */
        if (num_online_cpus() < 2) {
                irqbalance_disabled = 1;
                physical_balance = 1;
  
        for_each_online_cpu(i) {
 -              irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
 -              irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
 +              irq_cpu_data[i].irq_delta = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
 +              irq_cpu_data[i].last_irq = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
                if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
                        printk(KERN_ERR "balanced_irq_init: out of memory");
                        goto failed;
                }
 -              memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
 -              memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
        }
 -      
 +
        printk(KERN_INFO "Starting balanced_irq\n");
        if (!IS_ERR(kthread_run(balanced_irq, NULL, "kirqd")))
                return 0;
@@@ -844,7 -843,7 +844,7 @@@ static int __init find_isa_irq_apic(in
        }
        if (i < mp_irq_entries) {
                int apic;
 -              for(apic = 0; apic < nr_ioapics; apic++) {
 +              for (apic = 0; apic < nr_ioapics; apic++) {
                        if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
                                return apic;
                }
@@@ -881,7 -880,7 +881,7 @@@ int IO_APIC_get_PCI_irq_vector(int bus
                    !mp_irqs[i].mpc_irqtype &&
                    (bus == lbus) &&
                    (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
 -                      int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
 +                      int irq = pin_2_irq(i, apic, mp_irqs[i].mpc_dstirq);
  
                        if (!(apic || IO_APIC_IRQ(irq)))
                                continue;
  EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
  
  /*
 - * This function currently is only a helper for the i386 smp boot process where 
 + * This function currently is only a helper for the i386 smp boot process where
   * we need to reprogram the ioredtbls to cater for the cpus which have come online
   * so mask in all cases should simply be TARGET_CPUS
   */
@@@ -976,36 -975,37 +976,36 @@@ static int MPBIOS_polarity(int idx
        /*
         * Determine IRQ line polarity (high active or low active):
         */
 -      switch (mp_irqs[idx].mpc_irqflag & 3)
 +      switch (mp_irqs[idx].mpc_irqflag & 3) {
 +      case 0: /* conforms, ie. bus-type dependent polarity */
        {
 -              case 0: /* conforms, ie. bus-type dependent polarity */
 -              {
 -                      polarity = test_bit(bus, mp_bus_not_pci)?
 -                              default_ISA_polarity(idx):
 -                              default_PCI_polarity(idx);
 -                      break;
 -              }
 -              case 1: /* high active */
 -              {
 -                      polarity = 0;
 -                      break;
 -              }
 -              case 2: /* reserved */
 -              {
 -                      printk(KERN_WARNING "broken BIOS!!\n");
 -                      polarity = 1;
 -                      break;
 -              }
 -              case 3: /* low active */
 -              {
 -                      polarity = 1;
 -                      break;
 -              }
 -              default: /* invalid */
 -              {
 -                      printk(KERN_WARNING "broken BIOS!!\n");
 -                      polarity = 1;
 -                      break;
 -              }
 +              polarity = test_bit(bus, mp_bus_not_pci)?
 +                      default_ISA_polarity(idx):
 +                      default_PCI_polarity(idx);
 +              break;
 +      }
 +      case 1: /* high active */
 +      {
 +              polarity = 0;
 +              break;
 +      }
 +      case 2: /* reserved */
 +      {
 +              printk(KERN_WARNING "broken BIOS!!\n");
 +              polarity = 1;
 +              break;
 +      }
 +      case 3: /* low active */
 +      {
 +              polarity = 1;
 +              break;
 +      }
 +      default: /* invalid */
 +      {
 +              printk(KERN_WARNING "broken BIOS!!\n");
 +              polarity = 1;
 +              break;
 +      }
        }
        return polarity;
  }
@@@ -1018,67 -1018,69 +1018,67 @@@ static int MPBIOS_trigger(int idx
        /*
         * Determine IRQ trigger mode (edge or level sensitive):
         */
 -      switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
 +      switch ((mp_irqs[idx].mpc_irqflag>>2) & 3) {
 +      case 0: /* conforms, ie. bus-type dependent */
        {
 -              case 0: /* conforms, ie. bus-type dependent */
 -              {
 -                      trigger = test_bit(bus, mp_bus_not_pci)?
 -                                      default_ISA_trigger(idx):
 -                                      default_PCI_trigger(idx);
 +              trigger = test_bit(bus, mp_bus_not_pci)?
 +                              default_ISA_trigger(idx):
 +                              default_PCI_trigger(idx);
  #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
 -                      switch (mp_bus_id_to_type[bus])
 -                      {
 -                              case MP_BUS_ISA: /* ISA pin */
 -                              {
 -                                      /* set before the switch */
 -                                      break;
 -                              }
 -                              case MP_BUS_EISA: /* EISA pin */
 -                              {
 -                                      trigger = default_EISA_trigger(idx);
 -                                      break;
 -                              }
 -                              case MP_BUS_PCI: /* PCI pin */
 -                              {
 -                                      /* set before the switch */
 -                                      break;
 -                              }
 -                              case MP_BUS_MCA: /* MCA pin */
 -                              {
 -                                      trigger = default_MCA_trigger(idx);
 -                                      break;
 -                              }
 -                              default:
 -                              {
 -                                      printk(KERN_WARNING "broken BIOS!!\n");
 -                                      trigger = 1;
 -                                      break;
 -                              }
 -                      }
 -#endif
 +              switch (mp_bus_id_to_type[bus]) {
 +              case MP_BUS_ISA: /* ISA pin */
 +              {
 +                      /* set before the switch */
                        break;
                }
 -              case 1: /* edge */
 +              case MP_BUS_EISA: /* EISA pin */
                {
 -                      trigger = 0;
 +                      trigger = default_EISA_trigger(idx);
                        break;
                }
 -              case 2: /* reserved */
 +              case MP_BUS_PCI: /* PCI pin */
                {
 -                      printk(KERN_WARNING "broken BIOS!!\n");
 -                      trigger = 1;
 +                      /* set before the switch */
                        break;
                }
 -              case 3: /* level */
 +              case MP_BUS_MCA: /* MCA pin */
                {
 -                      trigger = 1;
 +                      trigger = default_MCA_trigger(idx);
                        break;
                }
 -              default: /* invalid */
 +              default:
                {
                        printk(KERN_WARNING "broken BIOS!!\n");
 -                      trigger = 0;
 +                      trigger = 1;
                        break;
                }
        }
 +#endif
 +              break;
 +      }
 +      case 1: /* edge */
 +      {
 +              trigger = 0;
 +              break;
 +      }
 +      case 2: /* reserved */
 +      {
 +              printk(KERN_WARNING "broken BIOS!!\n");
 +              trigger = 1;
 +              break;
 +      }
 +      case 3: /* level */
 +      {
 +              trigger = 1;
 +              break;
 +      }
 +      default: /* invalid */
 +      {
 +              printk(KERN_WARNING "broken BIOS!!\n");
 +              trigger = 0;
 +              break;
 +      }
 +      }
        return trigger;
  }
  
@@@ -1146,8 -1148,8 +1146,8 @@@ static inline int IO_APIC_irq_trigger(i
  
        for (apic = 0; apic < nr_ioapics; apic++) {
                for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
 -                      idx = find_irq_entry(apic,pin,mp_INT);
 -                      if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
 +                      idx = find_irq_entry(apic, pin, mp_INT);
 +                      if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
                                return irq_trigger(idx);
                }
        }
@@@ -1162,7 -1164,7 +1162,7 @@@ static u8 irq_vector[NR_IRQ_VECTORS] __
  
  static int __assign_irq_vector(int irq)
  {
 -      static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
 +      static int current_vector = FIRST_DEVICE_VECTOR, current_offset;
        int vector, offset;
  
        BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
        offset = current_offset;
  next:
        vector += 8;
-       if (vector >= FIRST_SYSTEM_VECTOR) {
+       if (vector >= first_system_vector) {
                offset = (offset + 1) % 8;
                vector = FIRST_DEVICE_VECTOR + offset;
        }
@@@ -1235,15 -1237,15 +1235,15 @@@ static void __init setup_IO_APIC_irqs(v
                /*
                 * add it to the IO-APIC irq-routing table:
                 */
 -              memset(&entry,0,sizeof(entry));
 +              memset(&entry, 0, sizeof(entry));
  
                entry.delivery_mode = INT_DELIVERY_MODE;
                entry.dest_mode = INT_DEST_MODE;
                entry.mask = 0;                         /* enable IRQ */
 -              entry.dest.logical.logical_dest = 
 +              entry.dest.logical.logical_dest =
                                        cpu_mask_to_apicid(TARGET_CPUS);
  
 -              idx = find_irq_entry(apic,pin,mp_INT);
 +              idx = find_irq_entry(apic, pin, mp_INT);
                if (idx == -1) {
                        if (first_notcon) {
                                apic_printk(APIC_VERBOSE, KERN_DEBUG
                        vector = assign_irq_vector(irq);
                        entry.vector = vector;
                        ioapic_register_intr(irq, vector, IOAPIC_AUTO);
 -              
 +
                        if (!apic && (irq < 16))
                                disable_8259A_irq(irq);
                }
  }
  
  /*
 - * Set up the 8259A-master output pin:
 + * Set up the timer pin, possibly with the 8259A-master behind.
   */
 -static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
 +static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
 +                                      int vector)
  {
        struct IO_APIC_route_entry entry;
  
 -      memset(&entry,0,sizeof(entry));
 -
 -      disable_8259A_irq(0);
 -
 -      /* mask LVT0 */
 -      apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
 +      memset(&entry, 0, sizeof(entry));
  
        /*
         * We use logical delivery to get the timer IRQ
         * to the first CPU.
         */
        entry.dest_mode = INT_DEST_MODE;
 -      entry.mask = 0;                                 /* unmask IRQ now */
 +      entry.mask = 1;                                 /* mask IRQ now */
        entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
        entry.delivery_mode = INT_DELIVERY_MODE;
        entry.polarity = 0;
  
        /*
         * The timer IRQ doesn't have to know that behind the
 -       * scene we have a 8259A-master in AEOI mode ...
 +       * scene we may have a 8259A-master in AEOI mode ...
         */
 -      irq_desc[0].chip = &ioapic_chip;
 -      set_irq_handler(0, handle_edge_irq);
 +      ioapic_register_intr(0, vector, IOAPIC_EDGE);
  
        /*
         * Add it to the IO-APIC irq-routing table:
         */
        ioapic_write_entry(apic, pin, entry);
 -
 -      enable_8259A_irq(0);
  }
  
  void __init print_IO_APIC(void)
        if (apic_verbosity == APIC_QUIET)
                return;
  
 -      printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
 +      printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
        for (i = 0; i < nr_ioapics; i++)
                printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
                       mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
  
  #if 0
  
 -static void print_APIC_bitfield (int base)
 +static void print_APIC_bitfield(int base)
  {
        unsigned int v;
        int i, j;
        }
  }
  
 -void /*__init*/ print_local_APIC(void * dummy)
 +void /*__init*/ print_local_APIC(void *dummy)
  {
        unsigned int v, ver, maxlvt;
  
  
        printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
                smp_processor_id(), hard_smp_processor_id());
 +      v = apic_read(APIC_ID);
        printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v,
                        GET_APIC_ID(read_apic_id()));
        v = apic_read(APIC_LVR);
        printk("\n");
  }
  
 -void print_all_local_APICs (void)
 +void print_all_local_APICs(void)
  {
        on_each_cpu(print_local_APIC, NULL, 1, 1);
  }
@@@ -1578,11 -1586,11 +1578,11 @@@ void /*__init*/ print_PIC(void
        v = inb(0xa0) << 8 | inb(0x20);
        printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
  
 -      outb(0x0b,0xa0);
 -      outb(0x0b,0x20);
 +      outb(0x0b, 0xa0);
 +      outb(0x0b, 0x20);
        v = inb(0xa0) << 8 | inb(0x20);
 -      outb(0x0a,0xa0);
 -      outb(0x0a,0x20);
 +      outb(0x0a, 0xa0);
 +      outb(0x0a, 0x20);
  
        spin_unlock_irqrestore(&i8259A_lock, flags);
  
@@@ -1618,7 -1626,7 +1618,7 @@@ static void __init enable_IO_APIC(void
                spin_unlock_irqrestore(&ioapic_lock, flags);
                nr_ioapic_registers[apic] = reg_01.bits.entries+1;
        }
 -      for(apic = 0; apic < nr_ioapics; apic++) {
 +      for (apic = 0; apic < nr_ioapics; apic++) {
                int pin;
                /* See if any of the pins is in ExtINT mode */
                for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
@@@ -1740,7 -1748,7 +1740,7 @@@ static void __init setup_ioapic_ids_fro
                spin_lock_irqsave(&ioapic_lock, flags);
                reg_00.raw = io_apic_read(apic, 0);
                spin_unlock_irqrestore(&ioapic_lock, flags);
 -              
 +
                old_id = mp_ioapics[apic].mpc_apicid;
  
                if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
                /*
                 * Read the right value from the MPC table and
                 * write it into the ID register.
 -               */
 +               */
                apic_printk(APIC_VERBOSE, KERN_INFO
                        "...changing IO-APIC physical APIC ID to %d ...",
                        mp_ioapics[apic].mpc_apicid);
@@@ -2012,7 -2020,7 +2012,7 @@@ static void ack_apic(unsigned int irq
        ack_APIC_irq();
  }
  
 -static void mask_lapic_irq (unsigned int irq)
 +static void mask_lapic_irq(unsigned int irq)
  {
        unsigned long v;
  
        apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
  }
  
 -static void unmask_lapic_irq (unsigned int irq)
 +static void unmask_lapic_irq(unsigned int irq)
  {
        unsigned long v;
  
  }
  
  static struct irq_chip lapic_chip __read_mostly = {
 -      .name           = "local-APIC-edge",
 +      .name           = "local-APIC",
        .mask           = mask_lapic_irq,
        .unmask         = unmask_lapic_irq,
        .eoi            = ack_apic,
  static void __init setup_nmi(void)
  {
        /*
 -       * Dirty trick to enable the NMI watchdog ...
 +       * Dirty trick to enable the NMI watchdog ...
         * We put the 8259A master into AEOI mode and
         * unmask on all local APICs LVT0 as NMI.
         *
         * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
         * is from Maciej W. Rozycki - so we do not have to EOI from
         * the NMI handler or the timer interrupt.
 -       */ 
 +       */
        apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
  
        enable_NMI_through_LVT0();
@@@ -2121,16 -2129,11 +2121,16 @@@ static inline void __init unlock_ExtINT
  static inline void __init check_timer(void)
  {
        int apic1, pin1, apic2, pin2;
 +      int no_pin1 = 0;
        int vector;
 +      unsigned int ver;
        unsigned long flags;
  
        local_irq_save(flags);
  
 +      ver = apic_read(APIC_LVR);
 +      ver = GET_APIC_VERSION(ver);
 +
        /*
         * get/set the timer IRQ vector:
         */
        set_intr_gate(vector, interrupt[0]);
  
        /*
 -       * Subtle, code in do_timer_interrupt() expects an AEOI
 -       * mode for the 8259A whenever interrupts are routed
 -       * through I/O APICs.  Also IRQ0 has to be enabled in
 -       * the 8259A which implies the virtual wire has to be
 -       * disabled in the local APIC.
 +       * As IRQ0 is to be enabled in the 8259A, the virtual
 +       * wire has to be disabled in the local APIC.  Also
 +       * timer interrupts need to be acknowledged manually in
 +       * the 8259A for the i82489DX when using the NMI
 +       * watchdog as that APIC treats NMIs as level-triggered.
 +       * The AEOI mode will finish them in the 8259A
 +       * automatically.
         */
        apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
        init_8259A(1);
 -      timer_ack = 1;
 -      if (timer_over_8254 > 0)
 -              enable_8259A_irq(0);
 +      timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
  
        pin1  = find_isa_irq_pin(0, mp_INT);
        apic1 = find_isa_irq_apic(0, mp_INT);
        printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
                vector, apic1, pin1, apic2, pin2);
  
 +      /*
 +       * Some BIOS writers are clueless and report the ExtINTA
 +       * I/O APIC input from the cascaded 8259A as the timer
 +       * interrupt input.  So just in case, if only one pin
 +       * was found above, try it both directly and through the
 +       * 8259A.
 +       */
 +      if (pin1 == -1) {
 +              pin1 = pin2;
 +              apic1 = apic2;
 +              no_pin1 = 1;
 +      } else if (pin2 == -1) {
 +              pin2 = pin1;
 +              apic2 = apic1;
 +      }
 +
        if (pin1 != -1) {
                /*
                 * Ok, does IRQ0 through the IOAPIC work?
                 */
 +              if (no_pin1) {
 +                      add_pin_to_irq(0, apic1, pin1);
 +                      setup_timer_IRQ0_pin(apic1, pin1, vector);
 +              }
                unmask_IO_APIC_irq(0);
                if (timer_irq_works()) {
                        if (nmi_watchdog == NMI_IO_APIC) {
 -                              disable_8259A_irq(0);
                                setup_nmi();
                                enable_8259A_irq(0);
                        }
                        goto out;
                }
                clear_IO_APIC_pin(apic1, pin1);
 -              printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
 -                              "IO-APIC\n");
 -      }
 +              if (!no_pin1)
 +                      printk(KERN_ERR "..MP-BIOS bug: "
 +                             "8254 timer not connected to IO-APIC\n");
  
 -      printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
 -      if (pin2 != -1) {
 +              printk(KERN_INFO "...trying to set up timer (IRQ0) "
 +                     "through the 8259A ... ");
                printk("\n..... (found pin %d) ...", pin2);
                /*
                 * legacy devices should be connected to IO APIC #0
                 */
 -              setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
 +              replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
 +              setup_timer_IRQ0_pin(apic2, pin2, vector);
 +              unmask_IO_APIC_irq(0);
 +              enable_8259A_irq(0);
                if (timer_irq_works()) {
                        printk("works.\n");
 -                      if (pin1 != -1)
 -                              replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
 -                      else
 -                              add_pin_to_irq(0, apic2, pin2);
 +                      timer_through_8259 = 1;
                        if (nmi_watchdog == NMI_IO_APIC) {
 +                              disable_8259A_irq(0);
                                setup_nmi();
 +                              enable_8259A_irq(0);
                        }
                        goto out;
                }
                /*
                 * Cleanup, just in case ...
                 */
 +              disable_8259A_irq(0);
                clear_IO_APIC_pin(apic2, pin2);
 +              printk(" failed.\n");
        }
 -      printk(" failed.\n");
  
        if (nmi_watchdog == NMI_IO_APIC) {
                printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
 -              nmi_watchdog = 0;
 +              nmi_watchdog = NMI_NONE;
        }
 +      timer_ack = 0;
  
        printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
  
 -      disable_8259A_irq(0);
        set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq,
                                      "fasteoi");
        apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector);   /* Fixed mode */
                printk(" works.\n");
                goto out;
        }
 +      disable_8259A_irq(0);
        apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
        printk(" failed.\n");
  
        printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
  
 -      timer_ack = 0;
        init_8259A(0);
        make_8259A_irq(0);
        apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
@@@ -2280,7 -2261,7 +2280,7 @@@ void __init setup_IO_APIC(void
        int i;
  
        /* Reserve all the system vectors. */
-       for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++)
+       for (i = first_system_vector; i < NR_VECTORS; i++)
                set_bit(i, used_vectors);
  
        enable_IO_APIC();
                print_IO_APIC();
  }
  
 -static int __init setup_disable_8254_timer(char *s)
 -{
 -      timer_over_8254 = -1;
 -      return 1;
 -}
 -static int __init setup_enable_8254_timer(char *s)
 -{
 -      timer_over_8254 = 2;
 -      return 1;
 -}
 -
 -__setup("disable_8254_timer", setup_disable_8254_timer);
 -__setup("enable_8254_timer", setup_enable_8254_timer);
 -
  /*
   *    Called after all the initialization is done. If we didnt find any
   *    APIC bugs then we can allow the modify fast path
   */
 - 
 +
  static int __init io_apic_bug_finalize(void)
  {
 -      if(sis_apic_bug == -1)
 +      if (sis_apic_bug == -1)
                sis_apic_bug = 0;
        return 0;
  }
@@@ -2323,17 -2318,17 +2323,17 @@@ struct sysfs_ioapic_data 
        struct sys_device dev;
        struct IO_APIC_route_entry entry[0];
  };
 -static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
 +static struct sysfs_ioapic_data *mp_ioapic_data[MAX_IO_APICS];
  
  static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
  {
        struct IO_APIC_route_entry *entry;
        struct sysfs_ioapic_data *data;
        int i;
 -      
 +
        data = container_of(dev, struct sysfs_ioapic_data, dev);
        entry = data->entry;
 -      for (i = 0; i < nr_ioapic_registers[dev->id]; i ++)
 +      for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
                entry[i] = ioapic_read_entry(dev->id, i);
  
        return 0;
@@@ -2346,7 -2341,7 +2346,7 @@@ static int ioapic_resume(struct sys_dev
        unsigned long flags;
        union IO_APIC_reg_00 reg_00;
        int i;
 -      
 +
        data = container_of(dev, struct sysfs_ioapic_data, dev);
        entry = data->entry;
  
                io_apic_write(dev->id, 0, reg_00.raw);
        }
        spin_unlock_irqrestore(&ioapic_lock, flags);
 -      for (i = 0; i < nr_ioapic_registers[dev->id]; i ++)
 +      for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
                ioapic_write_entry(dev->id, i, entry[i]);
  
        return 0;
@@@ -2371,23 -2366,24 +2371,23 @@@ static struct sysdev_class ioapic_sysde
  
  static int __init ioapic_init_sysfs(void)
  {
 -      struct sys_device * dev;
 +      struct sys_device *dev;
        int i, size, error = 0;
  
        error = sysdev_class_register(&ioapic_sysdev_class);
        if (error)
                return error;
  
 -      for (i = 0; i < nr_ioapics; i++ ) {
 -              size = sizeof(struct sys_device) + nr_ioapic_registers[i] 
 +      for (i = 0; i < nr_ioapics; i++) {
 +              size = sizeof(struct sys_device) + nr_ioapic_registers[i]
                        * sizeof(struct IO_APIC_route_entry);
 -              mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
 +              mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
                if (!mp_ioapic_data[i]) {
                        printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
                        continue;
                }
 -              memset(mp_ioapic_data[i], 0, size);
                dev = &mp_ioapic_data[i]->dev;
 -              dev->id = i; 
 +              dev->id = i;
                dev->cls = &ioapic_sysdev_class;
                error = sysdev_register(dev);
                if (error) {
@@@ -2462,7 -2458,7 +2462,7 @@@ static int msi_compose_msg(struct pci_d
                msg->address_lo =
                        MSI_ADDR_BASE_LO |
                        ((INT_DEST_MODE == 0) ?
 -                              MSI_ADDR_DEST_MODE_PHYSICAL:
 +MSI_ADDR_DEST_MODE_PHYSICAL:
                                MSI_ADDR_DEST_MODE_LOGICAL) |
                        ((INT_DELIVERY_MODE != dest_LowestPrio) ?
                                MSI_ADDR_REDIRECTION_CPU:
                        MSI_DATA_TRIGGER_EDGE |
                        MSI_DATA_LEVEL_ASSERT |
                        ((INT_DELIVERY_MODE != dest_LowestPrio) ?
 -                              MSI_DATA_DELIVERY_FIXED:
 +MSI_DATA_DELIVERY_FIXED:
                                MSI_DATA_DELIVERY_LOWPRI) |
                        MSI_DATA_VECTOR(vector);
        }
@@@ -2644,12 -2640,12 +2644,12 @@@ int arch_setup_ht_irq(unsigned int irq
  #endif /* CONFIG_HT_IRQ */
  
  /* --------------------------------------------------------------------------
 -                          ACPI-based IOAPIC Configuration
 +                      ACPI-based IOAPIC Configuration
     -------------------------------------------------------------------------- */
  
  #ifdef CONFIG_ACPI
  
 -int __init io_apic_get_unique_id (int ioapic, int apic_id)
 +int __init io_apic_get_unique_id(int ioapic, int apic_id)
  {
        union IO_APIC_reg_00 reg_00;
        static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
        int i = 0;
  
        /*
 -       * The P4 platform supports up to 256 APIC IDs on two separate APIC 
 -       * buses (one for LAPICs, one for IOAPICs), where predecessors only 
 +       * The P4 platform supports up to 256 APIC IDs on two separate APIC
 +       * buses (one for LAPICs, one for IOAPICs), where predecessors only
         * supports up to 16 on one shared APIC bus.
 -       * 
 +       *
         * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
         *      advantage of new APIC bus architecture.
         */
        }
  
        /*
 -       * Every APIC in a system must have a unique ID or we get lots of nice 
 +       * Every APIC in a system must have a unique ID or we get lots of nice
         * 'stuck on smp_invalidate_needed IPI wait' messages.
         */
        if (check_apicid_used(apic_id_map, apic_id)) {
                        "trying %d\n", ioapic, apic_id, i);
  
                apic_id = i;
 -      } 
 +      }
  
        tmp = apicid_to_cpu_present(apic_id);
        physids_or(apic_id_map, apic_id_map, tmp);
  }
  
  
 -int __init io_apic_get_version (int ioapic)
 +int __init io_apic_get_version(int ioapic)
  {
        union IO_APIC_reg_01    reg_01;
        unsigned long flags;
  }
  
  
 -int __init io_apic_get_redir_entries (int ioapic)
 +int __init io_apic_get_redir_entries(int ioapic)
  {
        union IO_APIC_reg_01    reg_01;
        unsigned long flags;
  }
  
  
 -int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
 +int io_apic_set_pci_routing(int ioapic, int pin, int irq, int edge_level, int active_high_low)
  {
        struct IO_APIC_route_entry entry;
  
         * corresponding device driver registers for this IRQ.
         */
  
 -      memset(&entry,0,sizeof(entry));
 +      memset(&entry, 0, sizeof(entry));
  
        entry.delivery_mode = INT_DELIVERY_MODE;
        entry.dest_mode = INT_DEST_MODE;
index e5ef60303562ea7b671fb99327833280d0ace4ed,f1e1ae3e5c7de1fd81a05c0d2c28e37bb12f44bf..78a3866ab3677bf6b8ab979aa1ea0e64d5977660
@@@ -61,7 -61,7 +61,7 @@@ struct irq_cfg 
  };
  
  /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
 -struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
 +static struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
        [0]  = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR,  },
        [1]  = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR,  },
        [2]  = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR,  },
  
  static int assign_irq_vector(int irq, cpumask_t mask);
  
+ int first_system_vector = 0xfe;
+ char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
  #define __apicdebuginit  __init
  
  int sis_apic_bug; /* not actually supported, dummy for compile */
@@@ -90,7 -94,7 +94,7 @@@ static int no_timer_check
  
  static int disable_timer_pin_1 __initdata;
  
 -int timer_over_8254 __initdata = 1;
 +int timer_through_8259 __initdata;
  
  /* Where if anywhere is the i8259 connect in external int mode */
  static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
@@@ -183,7 -187,7 +187,7 @@@ static bool io_apic_level_ack_pending(u
                        break;
                reg = io_apic_read(entry->apic, 0x10 + pin*2);
                /* Is the remote IRR bit set? */
 -              if ((reg >> 14) & 1) {
 +              if (reg & IO_APIC_REDIR_REMOTE_IRR) {
                        spin_unlock_irqrestore(&ioapic_lock, flags);
                        return true;
                }
@@@ -298,7 -302,7 +302,7 @@@ static void __target_IO_APIC_irq(unsign
                        break;
                io_apic_write(apic, 0x11 + pin*2, dest);
                reg = io_apic_read(apic, 0x10 + pin*2);
 -              reg &= ~0x000000ff;
 +              reg &= ~IO_APIC_REDIR_VECTOR_MASK;
                reg |= vector;
                io_apic_modify(apic, reg);
                if (!entry->next)
@@@ -360,37 -364,16 +364,37 @@@ static void add_pin_to_irq(unsigned in
        entry->pin = pin;
  }
  
 +/*
 + * Reroute an IRQ to a different pin.
 + */
 +static void __init replace_pin_at_irq(unsigned int irq,
 +                                    int oldapic, int oldpin,
 +                                    int newapic, int newpin)
 +{
 +      struct irq_pin_list *entry = irq_2_pin + irq;
 +
 +      while (1) {
 +              if (entry->apic == oldapic && entry->pin == oldpin) {
 +                      entry->apic = newapic;
 +                      entry->pin = newpin;
 +              }
 +              if (!entry->next)
 +                      break;
 +              entry = irq_2_pin + entry->next;
 +      }
 +}
 +
  
  #define DO_ACTION(name,R,ACTION, FINAL)                                       \
                                                                        \
        static void name##_IO_APIC_irq (unsigned int irq)               \
        __DO_ACTION(R, ACTION, FINAL)
  
 -DO_ACTION( __mask,             0, |= 0x00010000, io_apic_sync(entry->apic) )
 -                                              /* mask = 1 */
 -DO_ACTION( __unmask,           0, &= 0xfffeffff, )
 -                                              /* mask = 0 */
 +/* mask = 1 */
 +DO_ACTION(__mask,     0, |= IO_APIC_REDIR_MASKED, io_apic_sync(entry->apic))
 +
 +/* mask = 0 */
 +DO_ACTION(__unmask,   0, &= ~IO_APIC_REDIR_MASKED, )
  
  static void mask_IO_APIC_irq (unsigned int irq)
  {
@@@ -451,6 -434,20 +455,6 @@@ static int __init disable_timer_pin_set
  }
  __setup("disable_timer_pin_1", disable_timer_pin_setup);
  
 -static int __init setup_disable_8254_timer(char *s)
 -{
 -      timer_over_8254 = -1;
 -      return 1;
 -}
 -static int __init setup_enable_8254_timer(char *s)
 -{
 -      timer_over_8254 = 2;
 -      return 1;
 -}
 -
 -__setup("disable_8254_timer", setup_disable_8254_timer);
 -__setup("enable_8254_timer", setup_enable_8254_timer);
 -
  
  /*
   * Find the IRQ entry number of a certain pin.
@@@ -737,7 -734,7 +741,7 @@@ static int __assign_irq_vector(int irq
                offset = current_offset;
  next:
                vector += 8;
-               if (vector >= FIRST_SYSTEM_VECTOR) {
+               if (vector >= first_system_vector) {
                        /* If we run out of vectors on large boxen, must share them. */
                        offset = (offset + 1) % 8;
                        vector = FIRST_DEVICE_VECTOR + offset;
@@@ -918,21 -915,26 +922,21 @@@ static void __init setup_IO_APIC_irqs(v
  }
  
  /*
 - * Set up the 8259A-master output pin as broadcast to all
 - * CPUs.
 + * Set up the timer pin, possibly with the 8259A-master behind.
   */
 -static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
 +static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
 +                                      int vector)
  {
        struct IO_APIC_route_entry entry;
  
        memset(&entry, 0, sizeof(entry));
  
 -      disable_8259A_irq(0);
 -
 -      /* mask LVT0 */
 -      apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
 -
        /*
         * We use logical delivery to get the timer IRQ
         * to the first CPU.
         */
        entry.dest_mode = INT_DEST_MODE;
 -      entry.mask = 0;                                 /* unmask IRQ now */
 +      entry.mask = 1;                                 /* mask IRQ now */
        entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
        entry.delivery_mode = INT_DELIVERY_MODE;
        entry.polarity = 0;
  
        /*
         * The timer IRQ doesn't have to know that behind the
 -       * scene we have a 8259A-master in AEOI mode ...
 +       * scene we may have a 8259A-master in AEOI mode ...
         */
        set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
  
         * Add it to the IO-APIC irq-routing table:
         */
        ioapic_write_entry(apic, pin, entry);
 -
 -      enable_8259A_irq(0);
  }
  
  void __apicdebuginit print_IO_APIC(void)
@@@ -1077,7 -1081,6 +1081,7 @@@ void __apicdebuginit print_local_APIC(v
  
        printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
                smp_processor_id(), hard_smp_processor_id());
 +      v = apic_read(APIC_ID);
        printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(read_apic_id()));
        v = apic_read(APIC_LVR);
        printk(KERN_INFO "... APIC VERSION: %08x\n", v);
@@@ -1660,7 -1663,6 +1664,7 @@@ static inline void __init check_timer(v
        struct irq_cfg *cfg = irq_cfg + 0;
        int apic1, pin1, apic2, pin2;
        unsigned long flags;
 +      int no_pin1 = 0;
  
        local_irq_save(flags);
  
        assign_irq_vector(0, TARGET_CPUS);
  
        /*
 -       * Subtle, code in do_timer_interrupt() expects an AEOI
 -       * mode for the 8259A whenever interrupts are routed
 -       * through I/O APICs.  Also IRQ0 has to be enabled in
 -       * the 8259A which implies the virtual wire has to be
 -       * disabled in the local APIC.
 +       * As IRQ0 is to be enabled in the 8259A, the virtual
 +       * wire has to be disabled in the local APIC.
         */
        apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
        init_8259A(1);
 -      if (timer_over_8254 > 0)
 -              enable_8259A_irq(0);
  
        pin1  = find_isa_irq_pin(0, mp_INT);
        apic1 = find_isa_irq_apic(0, mp_INT);
        apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
                cfg->vector, apic1, pin1, apic2, pin2);
  
 +      /*
 +       * Some BIOS writers are clueless and report the ExtINTA
 +       * I/O APIC input from the cascaded 8259A as the timer
 +       * interrupt input.  So just in case, if only one pin
 +       * was found above, try it both directly and through the
 +       * 8259A.
 +       */
 +      if (pin1 == -1) {
 +              pin1 = pin2;
 +              apic1 = apic2;
 +              no_pin1 = 1;
 +      } else if (pin2 == -1) {
 +              pin2 = pin1;
 +              apic2 = apic1;
 +      }
 +
 +      replace_pin_at_irq(0, 0, 0, apic1, pin1);
 +      apic1 = 0;
 +      pin1 = 0;
 +      setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
 +
        if (pin1 != -1) {
                /*
                 * Ok, does IRQ0 through the IOAPIC work?
                 */
 +              if (no_pin1) {
 +                      add_pin_to_irq(0, apic1, pin1);
 +                      setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
 +              }
                unmask_IO_APIC_irq(0);
                if (!no_timer_check && timer_irq_works()) {
                        nmi_watchdog_default();
                        if (nmi_watchdog == NMI_IO_APIC) {
 -                              disable_8259A_irq(0);
                                setup_nmi();
                                enable_8259A_irq(0);
                        }
                        goto out;
                }
                clear_IO_APIC_pin(apic1, pin1);
 -              apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
 -                              "connected to IO-APIC\n");
 -      }
 +              if (!no_pin1)
 +                      apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: "
 +                                  "8254 timer not connected to IO-APIC\n");
  
 -      apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
 -                              "through the 8259A ... ");
 -      if (pin2 != -1) {
 +              apic_printk(APIC_VERBOSE,KERN_INFO
 +                      "...trying to set up timer (IRQ0) "
 +                      "through the 8259A ... ");
                apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
                        apic2, pin2);
                /*
                 * legacy devices should be connected to IO APIC #0
                 */
 -              setup_ExtINT_IRQ0_pin(apic2, pin2, cfg->vector);
 +              replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
 +              setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
 +              unmask_IO_APIC_irq(0);
 +              enable_8259A_irq(0);
                if (timer_irq_works()) {
                        apic_printk(APIC_VERBOSE," works.\n");
 +                      timer_through_8259 = 1;
                        nmi_watchdog_default();
                        if (nmi_watchdog == NMI_IO_APIC) {
 +                              disable_8259A_irq(0);
                                setup_nmi();
 +                              enable_8259A_irq(0);
                        }
                        goto out;
                }
                /*
                 * Cleanup, just in case ...
                 */
 +              disable_8259A_irq(0);
                clear_IO_APIC_pin(apic2, pin2);
 +              apic_printk(APIC_VERBOSE," failed.\n");
        }
 -      apic_printk(APIC_VERBOSE," failed.\n");
  
        if (nmi_watchdog == NMI_IO_APIC) {
                printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
 -              nmi_watchdog = 0;
 +              nmi_watchdog = NMI_NONE;
        }
  
        apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
  
 -      disable_8259A_irq(0);
        irq_desc[0].chip = &lapic_irq_type;
        apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);     /* Fixed mode */
        enable_8259A_irq(0);
                apic_printk(APIC_VERBOSE," works.\n");
                goto out;
        }
 +      disable_8259A_irq(0);
        apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
        apic_printk(APIC_VERBOSE," failed.\n");
  
diff --combined arch/x86/kernel/irq_32.c
index 468acd04aa2ec12d3907145e005e28dd9776337f,4e3e8ec60276a99467c3319b3736c8a4fe5e26f9..47a6f6f124789a32179172920e4f6a436700ec50
@@@ -48,6 -48,29 +48,29 @@@ void ack_bad_irq(unsigned int irq
  #endif
  }
  
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+ /* Debugging check for stack overflow: is there less than 1KB free? */
+ static int check_stack_overflow(void)
+ {
+       long sp;
+       __asm__ __volatile__("andl %%esp,%0" :
+                            "=r" (sp) : "0" (THREAD_SIZE - 1));
+       return sp < (sizeof(struct thread_info) + STACK_WARN);
+ }
+ static void print_stack_overflow(void)
+ {
+       printk(KERN_WARNING "low stack detected by irq handler\n");
+       dump_stack();
+ }
+ #else
+ static inline int check_stack_overflow(void) { return 0; }
+ static inline void print_stack_overflow(void) { }
+ #endif
  #ifdef CONFIG_4KSTACKS
  /*
   * per-CPU IRQ handling contexts (thread information and stack)
@@@ -59,48 -82,29 +82,29 @@@ union irq_ctx 
  
  static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
  static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
- #endif
  
- /*
-  * do_IRQ handles all normal device IRQ's (the special
-  * SMP cross-CPU interrupts have their own specific
-  * handlers).
-  */
- unsigned int do_IRQ(struct pt_regs *regs)
- {     
-       struct pt_regs *old_regs;
-       /* high bit used in ret_from_ code */
-       int irq = ~regs->orig_ax;
-       struct irq_desc *desc = irq_desc + irq;
- #ifdef CONFIG_4KSTACKS
-       union irq_ctx *curctx, *irqctx;
-       u32 *isp;
- #endif
+ static char softirq_stack[NR_CPUS * THREAD_SIZE]
+               __attribute__((__section__(".bss.page_aligned")));
  
-       if (unlikely((unsigned)irq >= NR_IRQS)) {
-               printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
-                                       __func__, irq);
-               BUG();
-       }
+ static char hardirq_stack[NR_CPUS * THREAD_SIZE]
+               __attribute__((__section__(".bss.page_aligned")));
  
-       old_regs = set_irq_regs(regs);
-       irq_enter();
- #ifdef CONFIG_DEBUG_STACKOVERFLOW
-       /* Debugging check for stack overflow: is there less than 1KB free? */
-       {
-               long sp;
-               __asm__ __volatile__("andl %%esp,%0" :
-                                       "=r" (sp) : "0" (THREAD_SIZE - 1));
-               if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
-                       printk("do_IRQ: stack overflow: %ld\n",
-                               sp - sizeof(struct thread_info));
-                       dump_stack();
-               }
-       }
- #endif
+ static void call_on_stack(void *func, void *stack)
+ {
+       asm volatile("xchgl     %%ebx,%%esp     \n"
+                    "call      *%%edi          \n"
+                    "movl      %%ebx,%%esp     \n"
+                    : "=b" (stack)
+                    : "0" (stack),
+                      "D"(func)
+                    : "memory", "cc", "edx", "ecx", "eax");
+ }
  
- #ifdef CONFIG_4KSTACKS
+ static inline int
+ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+ {
+       union irq_ctx *curctx, *irqctx;
+       u32 *isp, arg1, arg2;
  
        curctx = (union irq_ctx *) current_thread_info();
        irqctx = hardirq_ctx[smp_processor_id()];
         * handler) we can't do that and just have to keep using the
         * current stack (which is the irq stack already after all)
         */
-       if (curctx != irqctx) {
-               int arg1, arg2, bx;
-               /* build the stack frame on the IRQ stack */
-               isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-               irqctx->tinfo.task = curctx->tinfo.task;
-               irqctx->tinfo.previous_esp = current_stack_pointer;
+       if (unlikely(curctx == irqctx))
+               return 0;
  
-               /*
-                * Copy the softirq bits in preempt_count so that the
-                * softirq checks work in the hardirq context.
-                */
-               irqctx->tinfo.preempt_count =
-                       (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
-                       (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
-               asm volatile(
-                       "       xchgl  %%ebx,%%esp    \n"
-                       "       call   *%%edi         \n"
-                       "       movl   %%ebx,%%esp    \n"
-                       : "=a" (arg1), "=d" (arg2), "=b" (bx)
-                       :  "0" (irq),   "1" (desc),  "2" (isp),
-                          "D" (desc->handle_irq)
-                       : "memory", "cc", "ecx"
-               );
-       } else
- #endif
-               desc->handle_irq(irq, desc);
+       /* build the stack frame on the IRQ stack */
+       isp = (u32 *) ((char*)irqctx + sizeof(*irqctx));
+       irqctx->tinfo.task = curctx->tinfo.task;
+       irqctx->tinfo.previous_esp = current_stack_pointer;
  
-       irq_exit();
-       set_irq_regs(old_regs);
+       /*
+        * Copy the softirq bits in preempt_count so that the
+        * softirq checks work in the hardirq context.
+        */
+       irqctx->tinfo.preempt_count =
+               (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+               (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+       if (unlikely(overflow))
+               call_on_stack(print_stack_overflow, isp);
+       asm volatile("xchgl     %%ebx,%%esp     \n"
+                    "call      *%%edi          \n"
+                    "movl      %%ebx,%%esp     \n"
+                    : "=a" (arg1), "=d" (arg2), "=b" (isp)
+                    :  "0" (irq),   "1" (desc),  "2" (isp),
+                       "D" (desc->handle_irq)
+                    : "memory", "cc", "ecx");
        return 1;
  }
  
- #ifdef CONFIG_4KSTACKS
- static char softirq_stack[NR_CPUS * THREAD_SIZE]
-               __attribute__((__section__(".bss.page_aligned")));
- static char hardirq_stack[NR_CPUS * THREAD_SIZE]
-               __attribute__((__section__(".bss.page_aligned")));
  /*
   * allocate per-cpu stacks for hardirq and for softirq processing
   */
- void irq_ctx_init(int cpu)
+ void __cpuinit irq_ctx_init(int cpu)
  {
        union irq_ctx *irqctx;
  
                return;
  
        irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
-       irqctx->tinfo.task              = NULL;
-       irqctx->tinfo.exec_domain       = NULL;
-       irqctx->tinfo.cpu               = cpu;
-       irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
-       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+       irqctx->tinfo.task              = NULL;
+       irqctx->tinfo.exec_domain       = NULL;
+       irqctx->tinfo.cpu               = cpu;
+       irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
+       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
  
        hardirq_ctx[cpu] = irqctx;
  
        irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
-       irqctx->tinfo.task              = NULL;
-       irqctx->tinfo.exec_domain       = NULL;
-       irqctx->tinfo.cpu               = cpu;
-       irqctx->tinfo.preempt_count     = 0;
-       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
+       irqctx->tinfo.task              = NULL;
+       irqctx->tinfo.exec_domain       = NULL;
+       irqctx->tinfo.cpu               = cpu;
+       irqctx->tinfo.preempt_count     = 0;
+       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
  
        softirq_ctx[cpu] = irqctx;
  
-       printk("CPU %u irqstacks, hard=%p soft=%p\n",
-               cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
+       printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
+              cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
  }
  
  void irq_ctx_exit(int cpu)
@@@ -211,24 -202,55 +202,55 @@@ asmlinkage void do_softirq(void
                /* build the stack frame on the softirq stack */
                isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
  
-               asm volatile(
-                       "       xchgl   %%ebx,%%esp     \n"
-                       "       call    __do_softirq    \n"
-                       "       movl    %%ebx,%%esp     \n"
-                       : "=b"(isp)
-                       : "0"(isp)
-                       : "memory", "cc", "edx", "ecx", "eax"
-               );
+               call_on_stack(__do_softirq, isp);
                /*
                 * Shouldnt happen, we returned above if in_interrupt():
-                */
+                */
                WARN_ON_ONCE(softirq_count());
        }
  
        local_irq_restore(flags);
  }
+ #else
+ static inline int
+ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
  #endif
  
+ /*
+  * do_IRQ handles all normal device IRQ's (the special
+  * SMP cross-CPU interrupts have their own specific
+  * handlers).
+  */
+ unsigned int do_IRQ(struct pt_regs *regs)
+ {
+       struct pt_regs *old_regs;
+       /* high bit used in ret_from_ code */
+       int overflow, irq = ~regs->orig_ax;
+       struct irq_desc *desc = irq_desc + irq;
+       if (unlikely((unsigned)irq >= NR_IRQS)) {
+               printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
+                                       __func__, irq);
+               BUG();
+       }
+       old_regs = set_irq_regs(regs);
+       irq_enter();
+       overflow = check_stack_overflow();
+       if (!execute_on_irq_stack(overflow, desc, irq)) {
+               if (unlikely(overflow))
+                       print_stack_overflow();
+               desc->handle_irq(irq, desc);
+       }
+       irq_exit();
+       set_irq_regs(old_regs);
+       return 1;
+ }
  /*
   * Interrupt statistics:
   */
@@@ -313,20 -335,16 +335,20 @@@ skip
                                per_cpu(irq_stat,j).irq_tlb_count);
                seq_printf(p, "  TLB shootdowns\n");
  #endif
 +#ifdef CONFIG_X86_MCE
                seq_printf(p, "TRM: ");
                for_each_online_cpu(j)
                        seq_printf(p, "%10u ",
                                per_cpu(irq_stat,j).irq_thermal_count);
                seq_printf(p, "  Thermal event interrupts\n");
 +#endif
 +#ifdef CONFIG_X86_LOCAL_APIC
                seq_printf(p, "SPU: ");
                for_each_online_cpu(j)
                        seq_printf(p, "%10u ",
                                per_cpu(irq_stat,j).irq_spurious_count);
                seq_printf(p, "  Spurious interrupts\n");
 +#endif
                seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
  #if defined(CONFIG_X86_IO_APIC)
                seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
        return 0;
  }
  
 +/*
 + * /proc/stat helpers
 + */
 +u64 arch_irq_stat_cpu(unsigned int cpu)
 +{
 +      u64 sum = nmi_count(cpu);
 +
 +#ifdef CONFIG_X86_LOCAL_APIC
 +      sum += per_cpu(irq_stat, cpu).apic_timer_irqs;
 +#endif
 +#ifdef CONFIG_SMP
 +      sum += per_cpu(irq_stat, cpu).irq_resched_count;
 +      sum += per_cpu(irq_stat, cpu).irq_call_count;
 +      sum += per_cpu(irq_stat, cpu).irq_tlb_count;
 +#endif
 +#ifdef CONFIG_X86_MCE
 +      sum += per_cpu(irq_stat, cpu).irq_thermal_count;
 +#endif
 +#ifdef CONFIG_X86_LOCAL_APIC
 +      sum += per_cpu(irq_stat, cpu).irq_spurious_count;
 +#endif
 +      return sum;
 +}
 +
 +u64 arch_irq_stat(void)
 +{
 +      u64 sum = atomic_read(&irq_err_count);
 +
 +#ifdef CONFIG_X86_IO_APIC
 +      sum += atomic_read(&irq_mis_count);
 +#endif
 +      return sum;
 +}
 +
  #ifdef CONFIG_HOTPLUG_CPU
  #include <mach_apic.h>