]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'cpus4096' into irq/threaded
authorThomas Gleixner <tglx@linutronix.de>
Mon, 23 Mar 2009 13:50:03 +0000 (14:50 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 23 Mar 2009 20:20:20 +0000 (21:20 +0100)
Conflicts:
arch/parisc/kernel/irq.c
kernel/irq/handle.c

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
22 files changed:
1  2 
Makefile
arch/alpha/kernel/irq.c
arch/arm/kernel/irq.c
arch/blackfin/kernel/irqchip.c
arch/ia64/kernel/irq.c
arch/parisc/kernel/irq.c
arch/powerpc/kernel/irq.c
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/time_64.c
arch/x86/kernel/tsc.c
drivers/acpi/osl.c
drivers/pci/intr_remapping.c
include/linux/interrupt.h
include/linux/irq.h
include/linux/irqnr.h
kernel/irq/chip.c
kernel/irq/handle.c
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/numa_migrate.c
kernel/module.c
mm/vmscan.c

diff --combined Makefile
index a2c2efe9e82edd7b9df112ce3aad7e76b8cb9501,46c04c546ee2d4676693289f54270391fb15ad97..828028d4a448094a36ee98f7e422177eed49a7f0
+++ b/Makefile
@@@ -533,8 -533,9 +533,9 @@@ KBUILD_CFLAGS += $(call cc-option,-Wfra
  endif
  
  # Force gcc to behave correct even for buggy distributions
- # Arch Makefiles may override this setting
+ ifndef CONFIG_CC_STACKPROTECTOR
  KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
+ endif
  
  ifdef CONFIG_FRAME_POINTER
  KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
@@@ -566,9 -567,6 +567,9 @@@ KBUILD_CFLAGS += $(call cc-option,-Wdec
  # disable pointer signed / unsigned warnings in gcc 4.0
  KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,)
  
 +# disable invalid "can't wrap" optimzations for signed / pointers
 +KBUILD_CFLAGS += $(call cc-option,-fwrapv)
 +
  # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
  # But warn user when we do so
  warn-assign = \
diff --combined arch/alpha/kernel/irq.c
index d3812eb84015bcf5f9d573d1db382fb2d04e3fbf,7bc7489223f3e03b77aec838838202e8aed3f3b4..cc783466142754b97475522de5fd39aaa5b85f37
@@@ -55,7 -55,7 +55,7 @@@ int irq_select_affinity(unsigned int ir
                cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
        last_cpu = cpu;
  
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
        irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
        return 0;
  }
@@@ -90,7 -90,7 +90,7 @@@ show_interrupts(struct seq_file *p, voi
                seq_printf(p, "%10u ", kstat_irqs(irq));
  #else
                for_each_online_cpu(j)
 -                      seq_printf(p, "%10u ", kstat_cpu(j).irqs[irq]);
 +                      seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j));
  #endif
                seq_printf(p, " %14s", irq_desc[irq].chip->typename);
                seq_printf(p, "  %c%s",
diff --combined arch/arm/kernel/irq.c
index 7296f041628663f2c88258b90f2e411d99d3a4aa,45eacb5a2ecd80fb7a30dc56d6f3386f1c842f19..6874c7dca75aeb287319019ebff639a61f473a5a
@@@ -76,7 -76,7 +76,7 @@@ int show_interrupts(struct seq_file *p
  
                seq_printf(p, "%3d: ", i);
                for_each_present_cpu(cpu)
 -                      seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
 +                      seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
                seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-");
                seq_printf(p, "  %s", action->name);
                for (action = action->next; action; action = action->next)
@@@ -104,6 -104,11 +104,11 @@@ static struct irq_desc bad_irq_desc = 
        .lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
  };
  
+ #ifdef CONFIG_CPUMASK_OFFSTACK
+ /* We are not allocating bad_irq_desc.affinity or .pending_mask */
+ #error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
+ #endif
  /*
   * do_IRQ handles all hardware IRQ's.  Decoded IRQs should not
   * come via this function.  Instead, they should provide their
@@@ -161,7 -166,7 +166,7 @@@ void __init init_IRQ(void
                irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
  
  #ifdef CONFIG_SMP
-       bad_irq_desc.affinity = CPU_MASK_ALL;
+       cpumask_setall(bad_irq_desc.affinity);
        bad_irq_desc.cpu = smp_processor_id();
  #endif
        init_arch_irq();
@@@ -191,15 -196,16 +196,16 @@@ void migrate_irqs(void
                struct irq_desc *desc = irq_desc + i;
  
                if (desc->cpu == cpu) {
-                       unsigned int newcpu = any_online_cpu(desc->affinity);
-                       if (newcpu == NR_CPUS) {
+                       unsigned int newcpu = cpumask_any_and(desc->affinity,
+                                                             cpu_online_mask);
+                       if (newcpu >= nr_cpu_ids) {
                                if (printk_ratelimit())
                                        printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
                                               i, cpu);
  
-                               cpus_setall(desc->affinity);
-                               newcpu = any_online_cpu(desc->affinity);
+                               cpumask_setall(desc->affinity);
+                               newcpu = cpumask_any_and(desc->affinity,
+                                                        cpu_online_mask);
                        }
  
                        route_irq(desc, i, newcpu);
index bd052a67032e467bfef3a4aa30eb6dd1b4bc02e7,1ab5b532ec724c97e02dc1f71282aa9918d9a83f..401bd32aa499f10be2009a4a622b0e1a530994b4
@@@ -70,6 -70,11 +70,11 @@@ static struct irq_desc bad_irq_desc = 
  #endif
  };
  
+ #ifdef CONFIG_CPUMASK_OFFSTACK
+ /* We are not allocating a variable-sized bad_irq_desc.affinity */
+ #error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
+ #endif
  int show_interrupts(struct seq_file *p, void *v)
  {
        int i = *(loff_t *) v, j;
@@@ -83,7 -88,7 +88,7 @@@
                        goto skip;
                seq_printf(p, "%3d: ", i);
                for_each_online_cpu(j)
 -                      seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 +                      seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
                seq_printf(p, " %8s", irq_desc[i].chip->name);
                seq_printf(p, "  %s", action->name);
                for (action = action->next; action; action = action->next)
diff --combined arch/ia64/kernel/irq.c
index 4f596613bffd86ad842d781b1143928782465a09,226233a6fa19a2d3d8eb9f9ab5684dfda50b2c9d..7429752ef5ade56035e39b3bdd295bbd573497e3
@@@ -80,7 -80,7 +80,7 @@@ int show_interrupts(struct seq_file *p
                seq_printf(p, "%10u ", kstat_irqs(i));
  #else
                for_each_online_cpu(j) {
 -                      seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 +                      seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
                }
  #endif
                seq_printf(p, " %14s", irq_desc[i].chip->name);
@@@ -103,7 -103,7 +103,7 @@@ static char irq_redir [NR_IRQS]; // = 
  void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
  {
        if (irq < NR_IRQS) {
-               cpumask_copy(&irq_desc[irq].affinity,
+               cpumask_copy(irq_desc[irq].affinity,
                             cpumask_of(cpu_logical_id(hwid)));
                irq_redir[irq] = (char) (redir & 0xff);
        }
@@@ -148,7 -148,7 +148,7 @@@ static void migrate_irqs(void
                if (desc->status == IRQ_PER_CPU)
                        continue;
  
-               if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
+               if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
                    >= nr_cpu_ids) {
                        /*
                         * Save it for phase 2 processing
diff --combined arch/parisc/kernel/irq.c
index adfd617b4c18c66dfdfca0202b6a960157f4bf73,49482806863fa522d71e6e37d90a5c470e1e4023..1c740f5cbd6347f0046dc1560fb52dc806fd3103
@@@ -112,7 -112,7 +112,7 @@@ void cpu_end_irq(unsigned int irq
  }
  
  #ifdef CONFIG_SMP
 -int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
 +int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
  {
        int cpu_dest;
  
        if (CHECK_IRQ_PER_CPU(irq)) {
                /* Bad linux design decision.  The mask has already
                 * been set; we must reset it */
 -              cpumask_setall(irq_desc[irq].affinity);
 +              cpumask_setall(&irq_desc[irq].affinity);
                return -EINVAL;
        }
  
        /* whatever mask they set, we just allow one CPU */
        cpu_dest = first_cpu(*dest);
 -      *dest = cpumask_of_cpu(cpu_dest);
  
 -      return 0;
 +      return cpu_dest;
  }
  
  static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
  {
 -      if (cpu_check_affinity(irq, dest))
 +      int cpu_dest;
 +
 +      cpu_dest = cpu_check_affinity(irq, dest);
 +      if (cpu_dest < 0)
                return;
  
-       cpumask_copy(&irq_desc[irq].affinity, &cpumask_of_cpu(cpu_dest));
 -      cpumask_copy(irq_desc[irq].affinity, dest);
++      cpumask_copy(&irq_desc[irq].affinity, dest);
  }
  #endif
  
@@@ -185,7 -183,7 +185,7 @@@ int show_interrupts(struct seq_file *p
                seq_printf(p, "%3d: ", i);
  #ifdef CONFIG_SMP
                for_each_online_cpu(j)
 -                      seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 +                      seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
  #else
                seq_printf(p, "%10u ", kstat_irqs(i));
  #endif
@@@ -297,7 -295,7 +297,7 @@@ int txn_alloc_irq(unsigned int bits_wid
  unsigned long txn_affinity_addr(unsigned int irq, int cpu)
  {
  #ifdef CONFIG_SMP
 -      cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 +      cpumask_copy(&irq_desc[irq].affinity, cpumask_of(cpu));
  #endif
  
        return per_cpu(cpu_data, cpu).txn_addr;
@@@ -354,7 -352,7 +354,7 @@@ void do_cpu_irq_mask(struct pt_regs *re
        irq = eirr_to_irq(eirr_val);
  
  #ifdef CONFIG_SMP
 -      cpumask_copy(&dest, irq_desc[irq].affinity);
 +      cpumask_copy(&dest, &irq_desc[irq].affinity);
        if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
            !cpu_isset(smp_processor_id(), dest)) {
                int cpu = first_cpu(dest);
index 17efb7118db1140296bf8fa447b1b1133867c398,ad1e5ac721d86f557bac20079b407c8929459273..1b55ffdf002652d09376ce7e1a22d32c1101610f
@@@ -190,7 -190,7 +190,7 @@@ int show_interrupts(struct seq_file *p
                seq_printf(p, "%3d: ", i);
  #ifdef CONFIG_SMP
                for_each_online_cpu(j)
 -                      seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 +                      seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
  #else
                seq_printf(p, "%10u ", kstat_irqs(i));
  #endif /* CONFIG_SMP */
@@@ -231,7 -231,7 +231,7 @@@ void fixup_irqs(cpumask_t map
                if (irq_desc[irq].status & IRQ_PER_CPU)
                        continue;
  
-               cpus_and(mask, irq_desc[irq].affinity, map);
+               cpumask_and(&mask, irq_desc[irq].affinity, &map);
                if (any_online_cpu(mask) == NR_CPUS) {
                        printk("Breaking affinity for irq %i\n", irq);
                        mask = map;
index 8ba064f08a6fbdd31bed882e1137ff924bd6a350,233bd87a963744f62641c5e50e1ce2bdf49b2b14..d0d6a515499ac952f487a49e389b2bdd5519bd02
@@@ -185,7 -185,7 +185,7 @@@ int show_interrupts(struct seq_file *p
                seq_printf(p, "%10u ", kstat_irqs(i));
  #else
                for_each_online_cpu(j)
 -                      seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 +                      seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
  #endif
                seq_printf(p, " %9s", irq_desc[i].chip->typename);
                seq_printf(p, "  %s", action->name);
@@@ -252,9 -252,10 +252,10 @@@ struct irq_handler_data 
  #ifdef CONFIG_SMP
  static int irq_choose_cpu(unsigned int virt_irq)
  {
-       cpumask_t mask = irq_desc[virt_irq].affinity;
+       cpumask_t mask;
        int cpuid;
  
+       cpumask_copy(&mask, irq_desc[virt_irq].affinity);
        if (cpus_equal(mask, CPU_MASK_ALL)) {
                static int irq_rover;
                static DEFINE_SPINLOCK(irq_rover_lock);
@@@ -805,7 -806,7 +806,7 @@@ void fixup_irqs(void
                    !(irq_desc[irq].status & IRQ_PER_CPU)) {
                        if (irq_desc[irq].chip->set_affinity)
                                irq_desc[irq].chip->set_affinity(irq,
-                                       &irq_desc[irq].affinity);
+                                       irq_desc[irq].affinity);
                }
                spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
        }
index 642562d83ec44603dddfbc6276fcb7a2e52a211d,db310aa00183cb0b92243cc234f9fcaedc03deeb..f95066b6f8053e8a93e8cfafed0b102408793593
  #include <linux/clocksource.h>
  #include <linux/of_device.h>
  #include <linux/platform_device.h>
 +#include <linux/irq.h>
  
  #include <asm/oplib.h>
  #include <asm/timer.h>
 -#include <asm/irq.h>
  #include <asm/io.h>
  #include <asm/prom.h>
  #include <asm/starfire.h>
@@@ -729,7 -729,7 +729,7 @@@ void timer_interrupt(int irq, struct pt
  
        irq_enter();
  
-       kstat_this_cpu.irqs[0]++;
+       kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
  
        if (unlikely(!evt->event_handler)) {
                printk(KERN_WARNING
diff --combined arch/x86/kernel/tsc.c
index d5cebb52d45ba3e30844b8171e275eb76647493c,83d53ce5d4c4a98aa703032f4f7390292a35536f..462b9ba67e92695ca3049ffde926158f8310322d
@@@ -273,43 -273,30 +273,43 @@@ static unsigned long pit_calibrate_tsc(
   * use the TSC value at the transitions to calculate a pretty
   * good value for the TSC frequencty.
   */
 -static inline int pit_expect_msb(unsigned char val)
 +static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
  {
 -      int count = 0;
 +      int count;
 +      u64 tsc = 0;
  
        for (count = 0; count < 50000; count++) {
                /* Ignore LSB */
                inb(0x42);
                if (inb(0x42) != val)
                        break;
 +              tsc = get_cycles();
        }
 -      return count > 50;
 +      *deltap = get_cycles() - tsc;
 +      *tscp = tsc;
 +
 +      /*
 +       * We require _some_ success, but the quality control
 +       * will be based on the error terms on the TSC values.
 +       */
 +      return count > 5;
  }
  
  /*
 - * How many MSB values do we want to see? We aim for a
 - * 15ms calibration, which assuming a 2us counter read
 - * error should give us roughly 150 ppm precision for
 - * the calibration.
 + * How many MSB values do we want to see? We aim for
 + * a maximum error rate of 500ppm (in practice the
 + * real error is much smaller), but refuse to spend
 + * more than 25ms on it.
   */
 -#define QUICK_PIT_MS 15
 -#define QUICK_PIT_ITERATIONS (QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
 +#define MAX_QUICK_PIT_MS 25
 +#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
  
  static unsigned long quick_pit_calibrate(void)
  {
 +      int i;
 +      u64 tsc, delta;
 +      unsigned long d1, d2;
 +
        /* Set the Gate high, disable speaker */
        outb((inb(0x61) & ~0x02) | 0x01, 0x61);
  
        outb(0xff, 0x42);
        outb(0xff, 0x42);
  
 -      if (pit_expect_msb(0xff)) {
 -              int i;
 -              u64 t1, t2, delta;
 -              unsigned char expect = 0xfe;
 -
 -              t1 = get_cycles();
 -              for (i = 0; i < QUICK_PIT_ITERATIONS; i++, expect--) {
 -                      if (!pit_expect_msb(expect))
 -                              goto failed;
 +      /*
 +       * The PIT starts counting at the next edge, so we
 +       * need to delay for a microsecond. The easiest way
 +       * to do that is to just read back the 16-bit counter
 +       * once from the PIT.
 +       */
 +      inb(0x42);
 +      inb(0x42);
 +
 +      if (pit_expect_msb(0xff, &tsc, &d1)) {
 +              for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
 +                      if (!pit_expect_msb(0xff-i, &delta, &d2))
 +                              break;
 +
 +                      /*
 +                       * Iterate until the error is less than 500 ppm
 +                       */
 +                      delta -= tsc;
 +                      if (d1+d2 < delta >> 11)
 +                              goto success;
                }
 -              t2 = get_cycles();
 -
 -              /*
 -               * Make sure we can rely on the second TSC timestamp:
 -               */
 -              if (!pit_expect_msb(expect))
 -                      goto failed;
 -
 -              /*
 -               * Ok, if we get here, then we've seen the
 -               * MSB of the PIT decrement QUICK_PIT_ITERATIONS
 -               * times, and each MSB had many hits, so we never
 -               * had any sudden jumps.
 -               *
 -               * As a result, we can depend on there not being
 -               * any odd delays anywhere, and the TSC reads are
 -               * reliable.
 -               *
 -               * kHz = ticks / time-in-seconds / 1000;
 -               * kHz = (t2 - t1) / (QPI * 256 / PIT_TICK_RATE) / 1000
 -               * kHz = ((t2 - t1) * PIT_TICK_RATE) / (QPI * 256 * 1000)
 -               */
 -              delta = (t2 - t1)*PIT_TICK_RATE;
 -              do_div(delta, QUICK_PIT_ITERATIONS*256*1000);
 -              printk("Fast TSC calibration using PIT\n");
 -              return delta;
        }
 -failed:
 +      printk("Fast TSC calibration failed\n");
        return 0;
 +
 +success:
 +      /*
 +       * Ok, if we get here, then we've seen the
 +       * MSB of the PIT decrement 'i' times, and the
 +       * error has shrunk to less than 500 ppm.
 +       *
 +       * As a result, we can depend on there not being
 +       * any odd delays anywhere, and the TSC reads are
 +       * reliable (within the error). We also adjust the
 +       * delta to the middle of the error bars, just
 +       * because it looks nicer.
 +       *
 +       * kHz = ticks / time-in-seconds / 1000;
 +       * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
 +       * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
 +       */
 +      delta += (long)(d2 - d1)/2;
 +      delta *= PIT_TICK_RATE;
 +      do_div(delta, i*256*1000);
 +      printk("Fast TSC calibration using PIT\n");
 +      return delta;
  }
  
  /**
@@@ -793,7 -773,7 +793,7 @@@ __cpuinit int unsynchronized_tsc(void
        if (!cpu_has_tsc || tsc_unstable)
                return 1;
  
- #ifdef CONFIG_X86_SMP
+ #ifdef CONFIG_SMP
        if (apic_is_clustered_box())
                return 1;
  #endif
diff --combined drivers/acpi/osl.c
index 1e35f342957c2cf63241433ab30fef0d1cd5459e,2b6c5902825437470d50e7e841a23d3c37b10d61..eb8980d67368e12b45667ddc87cba141bf4024de
@@@ -272,14 -272,21 +272,21 @@@ acpi_os_map_memory(acpi_physical_addres
  }
  EXPORT_SYMBOL_GPL(acpi_os_map_memory);
  
- void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
+ void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
  {
-       if (acpi_gbl_permanent_mmap) {
+       if (acpi_gbl_permanent_mmap)
                iounmap(virt);
-       }
+       else
+               __acpi_unmap_table(virt, size);
  }
  EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
  
+ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
+ {
+       if (!acpi_gbl_permanent_mmap)
+               __acpi_unmap_table(virt, size);
+ }
  #ifdef ACPI_FUTURE_USAGE
  acpi_status
  acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
@@@ -1317,6 -1324,54 +1324,6 @@@ acpi_os_validate_interface (char *inter
        return AE_SUPPORT;
  }
  
 -#ifdef        CONFIG_X86
 -
 -struct aml_port_desc {
 -      uint    start;
 -      uint    end;
 -      char*   name;
 -      char    warned;
 -};
 -
 -static struct aml_port_desc aml_invalid_port_list[] = {
 -      {0x20, 0x21, "PIC0", 0},
 -      {0xA0, 0xA1, "PIC1", 0},
 -      {0x4D0, 0x4D1, "ELCR", 0}
 -};
 -
 -/*
 - * valid_aml_io_address()
 - *
 - * if valid, return true
 - * else invalid, warn once, return false
 - */
 -static bool valid_aml_io_address(uint address, uint length)
 -{
 -      int i;
 -      int entries = sizeof(aml_invalid_port_list) / sizeof(struct aml_port_desc);
 -
 -      for (i = 0; i < entries; ++i) {
 -              if ((address >= aml_invalid_port_list[i].start &&
 -                      address <= aml_invalid_port_list[i].end) ||
 -                      (address + length >= aml_invalid_port_list[i].start &&
 -                      address  + length <= aml_invalid_port_list[i].end))
 -              {
 -                      if (!aml_invalid_port_list[i].warned)
 -                      {
 -                              printk(KERN_ERR "ACPI: Denied BIOS AML access"
 -                                      " to invalid port 0x%x+0x%x (%s)\n",
 -                                      address, length,
 -                                      aml_invalid_port_list[i].name);
 -                              aml_invalid_port_list[i].warned = 1;
 -                      }
 -                      return false;   /* invalid */
 -              }
 -      }
 -      return true;    /* valid */
 -}
 -#else
 -static inline bool valid_aml_io_address(uint address, uint length) { return true; }
 -#endif
  /******************************************************************************
   *
   * FUNCTION:    acpi_os_validate_address
@@@ -1346,6 -1401,8 +1353,6 @@@ acpi_os_validate_address 
  
        switch (space_id) {
        case ACPI_ADR_SPACE_SYSTEM_IO:
 -              if (!valid_aml_io_address(address, length))
 -                      return AE_AML_ILLEGAL_ADDRESS;
        case ACPI_ADR_SPACE_SYSTEM_MEMORY:
                /* Only interference checks against SystemIO and SytemMemory
                   are needed */
index b721c2fbe8f5005c871874cd39c90c5f03ab4d40,8e44db040db7f4dba8cf8c12dc2087012965e16b..9d07a05d26f1125227fa8f37a73ac0c948ed32d2
@@@ -6,6 -6,7 +6,7 @@@
  #include <linux/irq.h>
  #include <asm/io_apic.h>
  #include <asm/smp.h>
+ #include <asm/cpu.h>
  #include <linux/intel-iommu.h>
  #include "intr_remapping.h"
  
@@@ -20,7 -21,7 +21,7 @@@ struct irq_2_iommu 
        u8  irte_mask;
  };
  
 -#ifdef CONFIG_SPARSE_IRQ
 +#ifdef CONFIG_GENERIC_HARDIRQS
  static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
  {
        struct irq_2_iommu *iommu;
index 91658d0765982c04ee79e665411c82a17de10cb7,472f11765f608318093ed82919f5c7fdf4f00a7d..0c9cb63e689530cfbc6a060b8a0b07ff2175bd62
  
  typedef irqreturn_t (*irq_handler_t)(int, void *);
  
 +/**
 + * struct irqaction - per interrupt action descriptor
 + * @handler:  interrupt handler function
 + * @flags:    flags (see IRQF_* above)
 + * @mask:     no comment as it is useless and about to be removed
 + * @name:     name of the device
 + * @dev_id:   cookie to identify the device
 + * @next:     pointer to the next irqaction for shared interrupts
 + * @irq:      interrupt number
 + * @dir:      pointer to the proc/irq/NN/name entry
 + */
  struct irqaction {
        irq_handler_t handler;
        unsigned long flags;
@@@ -473,17 -462,12 +473,18 @@@ static inline void init_irq_proc(void
  }
  #endif
  
 +#if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ)
 +extern void debug_poll_all_shared_irqs(void);
 +#else
 +static inline void debug_poll_all_shared_irqs(void) { }
 +#endif
 +
  int show_interrupts(struct seq_file *p, void *v);
  
  struct irq_desc;
  
  extern int early_irq_init(void);
+ extern int arch_probe_nr_irqs(void);
  extern int arch_early_irq_init(void);
  extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
  
diff --combined include/linux/irq.h
index 6db939a575bd4780fe39bb5a91ff7fa3bac08757,27a67536511ef45907a4108c46983fa1977acb44..873e4ac11b813accc72c6699265b039f62e7c575
@@@ -160,10 -160,12 +160,10 @@@ struct irq_2_iommu
   */
  struct irq_desc {
        unsigned int            irq;
 -#ifdef CONFIG_SPARSE_IRQ
        struct timer_rand_state *timer_rand_state;
        unsigned int            *kstat_irqs;
 -# ifdef CONFIG_INTR_REMAP
 +#ifdef CONFIG_INTR_REMAP
        struct irq_2_iommu      *irq_2_iommu;
 -# endif
  #endif
        irq_flow_handler_t      handle_irq;
        struct irq_chip         *chip;
        unsigned int            irqs_unhandled;
        spinlock_t              lock;
  #ifdef CONFIG_SMP
-       cpumask_t               affinity;
+       cpumask_var_t           affinity;
        unsigned int            cpu;
- #endif
  #ifdef CONFIG_GENERIC_PENDING_IRQ
-       cpumask_t               pending_mask;
+       cpumask_var_t           pending_mask;
+ #endif
  #endif
  #ifdef CONFIG_PROC_FS
        struct proc_dir_entry   *dir;
@@@ -200,6 -202,12 +200,6 @@@ extern void arch_free_chip_data(struct 
  extern struct irq_desc irq_desc[NR_IRQS];
  #else /* CONFIG_SPARSE_IRQ */
  extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
 -
 -#define kstat_irqs_this_cpu(DESC) \
 -      ((DESC)->kstat_irqs[smp_processor_id()])
 -#define kstat_incr_irqs_this_cpu(irqno, DESC) \
 -      ((DESC)->kstat_irqs[smp_processor_id()]++)
 -
  #endif /* CONFIG_SPARSE_IRQ */
  
  extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
@@@ -218,6 -226,7 +218,6 @@@ irq_remap_to_desc(unsigned int irq, str
   * Migration helpers for obsolete names, they will go away:
   */
  #define hw_interrupt_type     irq_chip
 -typedef struct irq_chip               hw_irq_controller;
  #define no_irq_type           no_irq_chip
  typedef struct irq_desc               irq_desc_t;
  
  #include <asm/hw_irq.h>
  
  extern int setup_irq(unsigned int irq, struct irqaction *new);
 +extern void remove_irq(unsigned int irq, struct irqaction *act);
  
  #ifdef CONFIG_GENERIC_HARDIRQS
  
@@@ -272,7 -280,7 +272,7 @@@ static inline int irq_balancing_disable
  }
  
  /* Handle irq action chains: */
 -extern int handle_IRQ_event(unsigned int irq, struct irqaction *action);
 +extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
  
  /*
   * Built-in IRQ handlers for various IRQ types,
@@@ -317,7 -325,7 +317,7 @@@ static inline void generic_handle_irq(u
  
  /* Handling of unhandled and spurious interrupts: */
  extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
 -                         int action_ret);
 +                         irqreturn_t action_ret);
  
  /* Resending of interrupts :*/
  void check_irq_resend(struct irq_desc *desc, unsigned int irq);
@@@ -414,4 -422,84 +414,84 @@@ extern int set_irq_msi(unsigned int irq
  
  #endif /* !CONFIG_S390 */
  
+ #ifdef CONFIG_SMP
+ /**
+  * init_alloc_desc_masks - allocate cpumasks for irq_desc
+  * @desc:     pointer to irq_desc struct
+  * @cpu:      cpu which will be handling the cpumasks
+  * @boot:     true if need bootmem
+  *
+  * Allocates affinity and pending_mask cpumask if required.
+  * Returns true if successful (or not required).
+  * Side effect: affinity has all bits set, pending_mask has all bits clear.
+  */
+ static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
+                                                               bool boot)
+ {
+       int node;
+       if (boot) {
+               alloc_bootmem_cpumask_var(&desc->affinity);
+               cpumask_setall(desc->affinity);
+ #ifdef CONFIG_GENERIC_PENDING_IRQ
+               alloc_bootmem_cpumask_var(&desc->pending_mask);
+               cpumask_clear(desc->pending_mask);
+ #endif
+               return true;
+       }
+       node = cpu_to_node(cpu);
+       if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
+               return false;
+       cpumask_setall(desc->affinity);
+ #ifdef CONFIG_GENERIC_PENDING_IRQ
+       if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
+               free_cpumask_var(desc->affinity);
+               return false;
+       }
+       cpumask_clear(desc->pending_mask);
+ #endif
+       return true;
+ }
+ /**
+  * init_copy_desc_masks - copy cpumasks for irq_desc
+  * @old_desc: pointer to old irq_desc struct
+  * @new_desc: pointer to new irq_desc struct
+  *
+  * Insures affinity and pending_masks are copied to new irq_desc.
+  * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
+  * irq_desc struct so the copy is redundant.
+  */
+ static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+                                       struct irq_desc *new_desc)
+ {
+ #ifdef CONFIG_CPUMASKS_OFFSTACK
+       cpumask_copy(new_desc->affinity, old_desc->affinity);
+ #ifdef CONFIG_GENERIC_PENDING_IRQ
+       cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
+ #endif
+ #endif
+ }
+ #else /* !CONFIG_SMP */
+ static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
+                                                               bool boot)
+ {
+       return true;
+ }
+ static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+                                       struct irq_desc *new_desc)
+ {
+ }
+ #endif        /* CONFIG_SMP */
  #endif /* _LINUX_IRQ_H */
diff --combined include/linux/irqnr.h
index 52ebbb4b161d46372ad3c53be48b2e14cf61811a,887477bc2ab0841287460aef51517beb2377ea58..ec87b212ff7d03c446bc16b0429baeb86c5d645c
@@@ -20,6 -20,7 +20,7 @@@
  
  # define for_each_irq_desc_reverse(irq, desc)                          \
        for (irq = nr_irqs - 1; irq >= 0; irq--)
  #else /* CONFIG_GENERIC_HARDIRQS */
  
  extern int nr_irqs;
@@@ -28,17 -29,13 +29,17 @@@ extern struct irq_desc *irq_to_desc(uns
  # define for_each_irq_desc(irq, desc)                                 \
        for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs;           \
             irq++, desc = irq_to_desc(irq))                            \
 -              if (desc)
 +              if (!desc)                                              \
 +                      ;                                               \
 +              else
  
  
  # define for_each_irq_desc_reverse(irq, desc)                         \
        for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0;      \
             irq--, desc = irq_to_desc(irq))                            \
 -              if (desc)
 +              if (!desc)                                              \
 +                      ;                                               \
 +              else
  
  #endif /* CONFIG_GENERIC_HARDIRQS */
  
diff --combined kernel/irq/chip.c
index 03d0bed2b8d925492a16e84951772330fccdcbd8,122fef4b0bd30d82ade4af7a84f281d019030280..c687ba4363f2b4a95a5c3988998286a1e8ab699b
@@@ -46,7 -46,10 +46,10 @@@ void dynamic_irq_init(unsigned int irq
        desc->irq_count = 0;
        desc->irqs_unhandled = 0;
  #ifdef CONFIG_SMP
-       cpumask_setall(&desc->affinity);
+       cpumask_setall(desc->affinity);
+ #ifdef CONFIG_GENERIC_PENDING_IRQ
+       cpumask_clear(desc->pending_mask);
+ #endif
  #endif
        spin_unlock_irqrestore(&desc->lock, flags);
  }
@@@ -78,7 -81,6 +81,7 @@@ void dynamic_irq_cleanup(unsigned int i
        desc->handle_irq = handle_bad_irq;
        desc->chip = &no_irq_chip;
        desc->name = NULL;
 +      clear_kstat_irqs(desc);
        spin_unlock_irqrestore(&desc->lock, flags);
  }
  
@@@ -291,8 -293,7 +294,8 @@@ static inline void mask_ack_irq(struct 
                desc->chip->mask_ack(irq);
        else {
                desc->chip->mask(irq);
 -              desc->chip->ack(irq);
 +              if (desc->chip->ack)
 +                      desc->chip->ack(irq);
        }
  }
  
@@@ -478,8 -479,7 +481,8 @@@ handle_edge_irq(unsigned int irq, struc
        kstat_incr_irqs_this_cpu(irq, desc);
  
        /* Start handling the irq */
 -      desc->chip->ack(irq);
 +      if (desc->chip->ack)
 +              desc->chip->ack(irq);
        desc = irq_remap_to_desc(irq, desc);
  
        /* Mark the IRQ currently in progress.*/
diff --combined kernel/irq/handle.c
index f6cdda68e5c6c04b4c0da4626db67d05a78af3b0,f51eaee921b603b202bf184cdfdaee3a8da2ca08..9ebf77968871550a365713d7a29a6f131f231ee5
@@@ -17,6 -17,7 +17,7 @@@
  #include <linux/kernel_stat.h>
  #include <linux/rculist.h>
  #include <linux/hash.h>
+ #include <linux/bootmem.h>
  
  #include "internals.h"
  
@@@ -69,6 -70,7 +70,7 @@@ int nr_irqs = NR_IRQS
  EXPORT_SYMBOL_GPL(nr_irqs);
  
  #ifdef CONFIG_SPARSE_IRQ
  static struct irq_desc irq_desc_init = {
        .irq        = -1,
        .status     = IRQ_DISABLED,
        .handle_irq = handle_bad_irq,
        .depth      = 1,
        .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
- #ifdef CONFIG_SMP
-       .affinity   = CPU_MASK_ALL
- #endif
  };
  
  void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
  {
 -      unsigned long bytes;
 -      char *ptr;
        int node;
 -
 -      /* Compute how many bytes we need per irq and allocate them */
 -      bytes = nr * sizeof(unsigned int);
 +      void *ptr;
  
        node = cpu_to_node(cpu);
 -      ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
 -      printk(KERN_DEBUG "  alloc kstat_irqs on cpu %d node %d\n", cpu, node);
 +      ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
  
 -      if (ptr)
 -              desc->kstat_irqs = (unsigned int *)ptr;
 +      /*
 +       * don't overwite if can not get new one
 +       * init_copy_kstat_irqs() could still use old one
 +       */
 +      if (ptr) {
 +              printk(KERN_DEBUG "  alloc kstat_irqs on cpu %d node %d\n",
 +                       cpu, node);
 +              desc->kstat_irqs = ptr;
 +      }
  }
  
  static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
                printk(KERN_ERR "can not alloc kstat_irqs\n");
                BUG_ON(1);
        }
+       if (!init_alloc_desc_masks(desc, cpu, false)) {
+               printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
+               BUG_ON(1);
+       }
        arch_init_chip_data(desc, cpu);
  }
  
   */
  DEFINE_SPINLOCK(sparse_irq_lock);
  
- struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
+ struct irq_desc **irq_desc_ptrs __read_mostly;
  
  static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
        [0 ... NR_IRQS_LEGACY-1] = {
                .handle_irq = handle_bad_irq,
                .depth      = 1,
                .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
- #ifdef CONFIG_SMP
-               .affinity   = CPU_MASK_ALL
- #endif
        }
  };
  
- /* FIXME: use bootmem alloc ...*/
- static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
+ static unsigned int *kstat_irqs_legacy;
  
  int __init early_irq_init(void)
  {
  
        init_irq_default_affinity();
  
+        /* initialize nr_irqs based on nr_cpu_ids */
+       arch_probe_nr_irqs();
+       printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
        desc = irq_desc_legacy;
        legacy_count = ARRAY_SIZE(irq_desc_legacy);
  
+       /* allocate irq_desc_ptrs array based on nr_irqs */
+       irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
+       /* allocate based on nr_cpu_ids */
+       /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
+       kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
+                                         sizeof(int));
        for (i = 0; i < legacy_count; i++) {
                desc[i].irq = i;
-               desc[i].kstat_irqs = kstat_irqs_legacy[i];
+               desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
+               init_alloc_desc_masks(&desc[i], 0, true);
                irq_desc_ptrs[i] = desc + i;
        }
  
-       for (i = legacy_count; i < NR_IRQS; i++)
+       for (i = legacy_count; i < nr_irqs; i++)
                irq_desc_ptrs[i] = NULL;
  
        return arch_early_irq_init();
  
  struct irq_desc *irq_to_desc(unsigned int irq)
  {
-       return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
+       if (irq_desc_ptrs && irq < nr_irqs)
+               return irq_desc_ptrs[irq];
+       return NULL;
  }
  
  struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
        unsigned long flags;
        int node;
  
-       if (irq >= NR_IRQS) {
-               printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
-                               irq, NR_IRQS);
-               WARN_ON(1);
+       if (irq >= nr_irqs) {
+               WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
+                       irq, nr_irqs);
                return NULL;
        }
  
@@@ -223,13 -234,9 +236,10 @@@ struct irq_desc irq_desc[NR_IRQS] __cac
                .handle_irq = handle_bad_irq,
                .depth = 1,
                .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
- #ifdef CONFIG_SMP
-               .affinity = CPU_MASK_ALL
- #endif
        }
  };
  
 +static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
  int __init early_irq_init(void)
  {
        struct irq_desc *desc;
  
        init_irq_default_affinity();
  
+       printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
        desc = irq_desc;
        count = ARRAY_SIZE(irq_desc);
  
        for (i = 0; i < count; i++) {
                desc[i].irq = i;
+               init_alloc_desc_masks(&desc[i], 0, true);
 +              desc[i].kstat_irqs = kstat_irqs_all[i];
        }
        return arch_early_irq_init();
  }
  
@@@ -260,11 -268,6 +272,11 @@@ struct irq_desc *irq_to_desc_alloc_cpu(
  }
  #endif /* !CONFIG_SPARSE_IRQ */
  
 +void clear_kstat_irqs(struct irq_desc *desc)
 +{
 +      memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
 +}
 +
  /*
   * What should we do if we get a hw irq event on an illegal vector?
   * Each architecture has to answer this themself.
@@@ -338,8 -341,6 +350,8 @@@ irqreturn_t handle_IRQ_event(unsigned i
        irqreturn_t ret, retval = IRQ_NONE;
        unsigned int status = 0;
  
 +      WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
 +
        if (!(action->flags & IRQF_DISABLED))
                local_irq_enable_in_hardirq();
  
  }
  
  #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
 +
 +#ifdef CONFIG_ENABLE_WARN_DEPRECATED
 +# warning __do_IRQ is deprecated. Please convert to proper flow handlers
 +#endif
 +
  /**
   * __do_IRQ - original all in one highlevel IRQ handler
   * @irq:      the interrupt number
@@@ -484,10 -480,12 +496,10 @@@ void early_init_irq_lock_class(void
        }
  }
  
 -#ifdef CONFIG_SPARSE_IRQ
  unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
  {
        struct irq_desc *desc = irq_to_desc(irq);
        return desc ? desc->kstat_irqs[cpu] : 0;
  }
 -#endif
  EXPORT_SYMBOL(kstat_irqs_cpu);
  
diff --combined kernel/irq/internals.h
index b60950bf5a16fb57286d95515bb0ceaa2022410a,40416a81a0f5af98b159bf675e3fd18e41648ce1..ee1aa9f8e8b9341af0c81ec9073f52e48375eb5b
@@@ -15,9 -15,15 +15,16 @@@ extern int __irq_set_trigger(struct irq
  
  extern struct lock_class_key irq_desc_lock_class;
  extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
 +extern void clear_kstat_irqs(struct irq_desc *desc);
  extern spinlock_t sparse_irq_lock;
+ #ifdef CONFIG_SPARSE_IRQ
+ /* irq_desc_ptrs allocated at boot time */
+ extern struct irq_desc **irq_desc_ptrs;
+ #else
+ /* irq_desc_ptrs is a fixed size array */
  extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
+ #endif
  
  #ifdef CONFIG_PROC_FS
  extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
diff --combined kernel/irq/manage.c
index ea119effe096b47628c2b8061c2e76e8fcfb5d43,a3a5dc9ef346d813edf3b971926cb649d9c89d67..6458e99984c08f3af7a108ac0f7db03b21a7e5e8
@@@ -90,14 -90,14 +90,14 @@@ int irq_set_affinity(unsigned int irq, 
  
  #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
-               cpumask_copy(&desc->affinity, cpumask);
+               cpumask_copy(desc->affinity, cpumask);
                desc->chip->set_affinity(irq, cpumask);
        } else {
                desc->status |= IRQ_MOVE_PENDING;
-               cpumask_copy(&desc->pending_mask, cpumask);
+               cpumask_copy(desc->pending_mask, cpumask);
        }
  #else
-       cpumask_copy(&desc->affinity, cpumask);
+       cpumask_copy(desc->affinity, cpumask);
        desc->chip->set_affinity(irq, cpumask);
  #endif
        desc->status |= IRQ_AFFINITY_SET;
  /*
   * Generic version of the affinity autoselector.
   */
 -int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
 +static int setup_affinity(unsigned int irq, struct irq_desc *desc)
  {
        if (!irq_can_set_affinity(irq))
                return 0;
         * one of the targets is online.
         */
        if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
-               if (cpumask_any_and(&desc->affinity, cpu_online_mask)
+               if (cpumask_any_and(desc->affinity, cpu_online_mask)
                    < nr_cpu_ids)
                        goto set_affinity;
                else
                        desc->status &= ~IRQ_AFFINITY_SET;
        }
  
-       cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
+       cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
  set_affinity:
-       desc->chip->set_affinity(irq, &desc->affinity);
+       desc->chip->set_affinity(irq, desc->affinity);
  
        return 0;
  }
  #else
 -static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
 +static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
  {
        return irq_select_affinity(irq);
  }
@@@ -149,14 -149,14 +149,14 @@@ int irq_select_affinity_usr(unsigned in
        int ret;
  
        spin_lock_irqsave(&desc->lock, flags);
 -      ret = do_irq_select_affinity(irq, desc);
 +      ret = setup_affinity(irq, desc);
        spin_unlock_irqrestore(&desc->lock, flags);
  
        return ret;
  }
  
  #else
 -static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
 +static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
  {
        return 0;
  }
@@@ -389,9 -389,9 +389,9 @@@ int __irq_set_trigger(struct irq_desc *
   * allocate special interrupts that are part of the architecture.
   */
  static int
 -__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
 +__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
  {
 -      struct irqaction *old, **p;
 +      struct irqaction *old, **old_ptr;
        const char *old_name = NULL;
        unsigned long flags;
        int shared = 0;
         * The following block of code has to be executed atomically
         */
        spin_lock_irqsave(&desc->lock, flags);
 -      p = &desc->action;
 -      old = *p;
 +      old_ptr = &desc->action;
 +      old = *old_ptr;
        if (old) {
                /*
                 * Can't share interrupts unless both agree to and are
  
                /* add new interrupt at end of irq queue */
                do {
 -                      p = &old->next;
 -                      old = *p;
 +                      old_ptr = &old->next;
 +                      old = *old_ptr;
                } while (old);
                shared = 1;
        }
                        desc->status |= IRQ_NO_BALANCING;
  
                /* Set default affinity mask once everything is setup */
 -              do_irq_select_affinity(irq, desc);
 +              setup_affinity(irq, desc);
  
        } else if ((new->flags & IRQF_TRIGGER_MASK)
                        && (new->flags & IRQF_TRIGGER_MASK)
                                (int)(new->flags & IRQF_TRIGGER_MASK));
        }
  
 -      *p = new;
 +      *old_ptr = new;
  
        /* Reset broken irq detection when installing new handler */
        desc->irq_count = 0;
@@@ -549,117 -549,90 +549,117 @@@ int setup_irq(unsigned int irq, struct 
  
        return __setup_irq(irq, desc, act);
  }
 +EXPORT_SYMBOL_GPL(setup_irq);
  
 -/**
 - *    free_irq - free an interrupt
 - *    @irq: Interrupt line to free
 - *    @dev_id: Device identity to free
 - *
 - *    Remove an interrupt handler. The handler is removed and if the
 - *    interrupt line is no longer in use by any driver it is disabled.
 - *    On a shared IRQ the caller must ensure the interrupt is disabled
 - *    on the card it drives before calling this function. The function
 - *    does not return until any executing interrupts for this IRQ
 - *    have completed.
 - *
 - *    This function must not be called from interrupt context.
 + /*
 + * Internal function to unregister an irqaction - used to free
 + * regular and special interrupts that are part of the architecture.
   */
 -void free_irq(unsigned int irq, void *dev_id)
 +static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
  {
        struct irq_desc *desc = irq_to_desc(irq);
 -      struct irqaction **p;
 +      struct irqaction *action, **action_ptr;
        unsigned long flags;
  
 -      WARN_ON(in_interrupt());
 +      WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  
        if (!desc)
 -              return;
 +              return NULL;
  
        spin_lock_irqsave(&desc->lock, flags);
 -      p = &desc->action;
 +
 +      /*
 +       * There can be multiple actions per IRQ descriptor, find the right
 +       * one based on the dev_id:
 +       */
 +      action_ptr = &desc->action;
        for (;;) {
 -              struct irqaction *action = *p;
 +              action = *action_ptr;
  
 -              if (action) {
 -                      struct irqaction **pp = p;
 +              if (!action) {
 +                      WARN(1, "Trying to free already-free IRQ %d\n", irq);
 +                      spin_unlock_irqrestore(&desc->lock, flags);
  
 -                      p = &action->next;
 -                      if (action->dev_id != dev_id)
 -                              continue;
 +                      return NULL;
 +              }
  
 -                      /* Found it - now remove it from the list of entries */
 -                      *pp = action->next;
 +              if (action->dev_id == dev_id)
 +                      break;
 +              action_ptr = &action->next;
 +      }
  
 -                      /* Currently used only by UML, might disappear one day.*/
 +      /* Found it - now remove it from the list of entries: */
 +      *action_ptr = action->next;
 +
 +      /* Currently used only by UML, might disappear one day: */
  #ifdef CONFIG_IRQ_RELEASE_METHOD
 -                      if (desc->chip->release)
 -                              desc->chip->release(irq, dev_id);
 +      if (desc->chip->release)
 +              desc->chip->release(irq, dev_id);
  #endif
  
 -                      if (!desc->action) {
 -                              desc->status |= IRQ_DISABLED;
 -                              if (desc->chip->shutdown)
 -                                      desc->chip->shutdown(irq);
 -                              else
 -                                      desc->chip->disable(irq);
 -                      }
 -                      spin_unlock_irqrestore(&desc->lock, flags);
 -                      unregister_handler_proc(irq, action);
 +      /* If this was the last handler, shut down the IRQ line: */
 +      if (!desc->action) {
 +              desc->status |= IRQ_DISABLED;
 +              if (desc->chip->shutdown)
 +                      desc->chip->shutdown(irq);
 +              else
 +                      desc->chip->disable(irq);
 +      }
 +      spin_unlock_irqrestore(&desc->lock, flags);
 +
 +      unregister_handler_proc(irq, action);
 +
 +      /* Make sure it's not being used on another CPU: */
 +      synchronize_irq(irq);
  
 -                      /* Make sure it's not being used on another CPU */
 -                      synchronize_irq(irq);
 -#ifdef CONFIG_DEBUG_SHIRQ
 -                      /*
 -                       * It's a shared IRQ -- the driver ought to be
 -                       * prepared for it to happen even now it's
 -                       * being freed, so let's make sure....  We do
 -                       * this after actually deregistering it, to
 -                       * make sure that a 'real' IRQ doesn't run in
 -                       * parallel with our fake
 -                       */
 -                      if (action->flags & IRQF_SHARED) {
 -                              local_irq_save(flags);
 -                              action->handler(irq, dev_id);
 -                              local_irq_restore(flags);
 -                      }
 -#endif
 -                      kfree(action);
 -                      return;
 -              }
 -              printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
  #ifdef CONFIG_DEBUG_SHIRQ
 -              dump_stack();
 -#endif
 -              spin_unlock_irqrestore(&desc->lock, flags);
 -              return;
 +      /*
 +       * It's a shared IRQ -- the driver ought to be prepared for an IRQ
 +       * event to happen even now it's being freed, so let's make sure that
 +       * is so by doing an extra call to the handler ....
 +       *
 +       * ( We do this after actually deregistering it, to make sure that a
 +       *   'real' IRQ doesn't run in * parallel with our fake. )
 +       */
 +      if (action->flags & IRQF_SHARED) {
 +              local_irq_save(flags);
 +              action->handler(irq, dev_id);
 +              local_irq_restore(flags);
        }
 +#endif
 +      return action;
 +}
 +
 +/**
 + *    remove_irq - free an interrupt
 + *    @irq: Interrupt line to free
 + *    @act: irqaction for the interrupt
 + *
 + * Used to remove interrupts statically setup by the early boot process.
 + */
 +void remove_irq(unsigned int irq, struct irqaction *act)
 +{
 +      __free_irq(irq, act->dev_id);
 +}
 +EXPORT_SYMBOL_GPL(remove_irq);
 +
 +/**
 + *    free_irq - free an interrupt allocated with request_irq
 + *    @irq: Interrupt line to free
 + *    @dev_id: Device identity to free
 + *
 + *    Remove an interrupt handler. The handler is removed and if the
 + *    interrupt line is no longer in use by any driver it is disabled.
 + *    On a shared IRQ the caller must ensure the interrupt is disabled
 + *    on the card it drives before calling this function. The function
 + *    does not return until any executing interrupts for this IRQ
 + *    have completed.
 + *
 + *    This function must not be called from interrupt context.
 + */
 +void free_irq(unsigned int irq, void *dev_id)
 +{
 +      kfree(__free_irq(irq, dev_id));
  }
  EXPORT_SYMBOL(free_irq);
  
@@@ -706,12 -679,11 +706,12 @@@ int request_irq(unsigned int irq, irq_h
         * the behavior is classified as "will not fix" so we need to
         * start nudging drivers away from using that idiom.
         */
 -      if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
 -                      == (IRQF_SHARED|IRQF_DISABLED))
 -              pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
 -                              "guaranteed on shared IRQs\n",
 -                              irq, devname);
 +      if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
 +                                      (IRQF_SHARED|IRQF_DISABLED)) {
 +              pr_warning(
 +                "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
 +                      irq, devname);
 +      }
  
  #ifdef CONFIG_LOCKDEP
        /*
        if (!handler)
                return -EINVAL;
  
 -      action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
 +      action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
        if (!action)
                return -ENOMEM;
  
        action->handler = handler;
        action->flags = irqflags;
 -      cpus_clear(action->mask);
        action->name = devname;
 -      action->next = NULL;
        action->dev_id = dev_id;
  
        retval = __setup_irq(irq, desc, action);
index aef18ab6b75bf1954f23ccff4a52a948d62b74d2,7f9b80434e32a295b463bc7aaa97457207d56686..243d6121e50e08c1b54972fd3bb61c827a8c3dfe
@@@ -17,11 -17,16 +17,11 @@@ static void init_copy_kstat_irqs(struc
                                 struct irq_desc *desc,
                                 int cpu, int nr)
  {
 -      unsigned long bytes;
 -
        init_kstat_irqs(desc, cpu, nr);
  
 -      if (desc->kstat_irqs != old_desc->kstat_irqs) {
 -              /* Compute how many bytes we need per irq and allocate them */
 -              bytes = nr * sizeof(unsigned int);
 -
 -              memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
 -      }
 +      if (desc->kstat_irqs != old_desc->kstat_irqs)
 +              memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
 +                       nr * sizeof(*desc->kstat_irqs));
  }
  
  static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
        old_desc->kstat_irqs = NULL;
  }
  
- static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
+ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
                 struct irq_desc *desc, int cpu)
  {
        memcpy(desc, old_desc, sizeof(struct irq_desc));
+       if (!init_alloc_desc_masks(desc, cpu, false)) {
+               printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
+                               "for migration.\n", irq);
+               return false;
+       }
        spin_lock_init(&desc->lock);
        desc->cpu = cpu;
        lockdep_set_class(&desc->lock, &irq_desc_lock_class);
        init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
+       init_copy_desc_masks(old_desc, desc);
        arch_init_copy_chip_data(old_desc, desc, cpu);
+       return true;
  }
  
  static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
@@@ -71,12 -83,18 +78,18 @@@ static struct irq_desc *__real_move_irq
        node = cpu_to_node(cpu);
        desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
        if (!desc) {
-               printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq);
+               printk(KERN_ERR "irq %d: can not get new irq_desc "
+                               "for migration.\n", irq);
+               /* still use old one */
+               desc = old_desc;
+               goto out_unlock;
+       }
+       if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
                /* still use old one */
+               kfree(desc);
                desc = old_desc;
                goto out_unlock;
        }
-       init_copy_one_irq_desc(irq, old_desc, desc, cpu);
  
        irq_desc_ptrs[irq] = desc;
        spin_unlock_irqrestore(&sparse_irq_lock, flags);
diff --combined kernel/module.c
index 1196f5d117002face7e8424f87024e94df9e8add,f0e04d6b67d8cb325526dce1fdb456265ba3f926..29f2d7b33dd4ba388a8c681f5a65f925582ff441
@@@ -51,6 -51,7 +51,7 @@@
  #include <linux/tracepoint.h>
  #include <linux/ftrace.h>
  #include <linux/async.h>
+ #include <linux/percpu.h>
  
  #if 0
  #define DEBUGP printk
@@@ -366,6 -367,34 +367,34 @@@ static struct module *find_module(cons
  }
  
  #ifdef CONFIG_SMP
+ #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+ static void *percpu_modalloc(unsigned long size, unsigned long align,
+                            const char *name)
+ {
+       void *ptr;
+       if (align > PAGE_SIZE) {
+               printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
+                      name, align, PAGE_SIZE);
+               align = PAGE_SIZE;
+       }
+       ptr = __alloc_reserved_percpu(size, align);
+       if (!ptr)
+               printk(KERN_WARNING
+                      "Could not allocate %lu bytes percpu data\n", size);
+       return ptr;
+ }
+ static void percpu_modfree(void *freeme)
+ {
+       free_percpu(freeme);
+ }
+ #else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
  /* Number of blocks used and allocated. */
  static unsigned int pcpu_num_used, pcpu_num_allocated;
  /* Size of each block.  -ve means used. */
@@@ -480,21 -509,6 +509,6 @@@ static void percpu_modfree(void *freeme
        }
  }
  
- static unsigned int find_pcpusec(Elf_Ehdr *hdr,
-                                Elf_Shdr *sechdrs,
-                                const char *secstrings)
- {
-       return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
- }
- static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
- {
-       int cpu;
-       for_each_possible_cpu(cpu)
-               memcpy(pcpudest + per_cpu_offset(cpu), from, size);
- }
  static int percpu_modinit(void)
  {
        pcpu_num_used = 2;
        return 0;
  }
  __initcall(percpu_modinit);
+ #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+ static unsigned int find_pcpusec(Elf_Ehdr *hdr,
+                                Elf_Shdr *sechdrs,
+                                const char *secstrings)
+ {
+       return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
+ }
+ static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
+ {
+       int cpu;
+       for_each_possible_cpu(cpu)
+               memcpy(pcpudest + per_cpu_offset(cpu), from, size);
+ }
  #else /* ... !CONFIG_SMP */
  static inline void *percpu_modalloc(unsigned long size, unsigned long align,
                                    const char *name)
  {
@@@ -535,6 -568,7 +568,7 @@@ static inline void percpu_modcopy(void 
        /* pcpusec should be 0, and size of that section should be 0. */
        BUG_ON(size != 0);
  }
  #endif /* CONFIG_SMP */
  
  #define MODINFO_ATTR(field)   \
@@@ -2015,6 -2049,14 +2049,6 @@@ static noinline struct module *load_mod
        if (err < 0)
                goto free_mod;
  
 -#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
 -      mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
 -                                    mod->name);
 -      if (!mod->refptr) {
 -              err = -ENOMEM;
 -              goto free_mod;
 -      }
 -#endif
        if (pcpuindex) {
                /* We have a special allocation for this section. */
                percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
                                         mod->name);
                if (!percpu) {
                        err = -ENOMEM;
 -                      goto free_percpu;
 +                      goto free_mod;
                }
                sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
                mod->percpu = percpu;
        /* Module has been moved. */
        mod = (void *)sechdrs[modindex].sh_addr;
  
 +#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
 +      mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
 +                                    mod->name);
 +      if (!mod->refptr) {
 +              err = -ENOMEM;
 +              goto free_init;
 +      }
 +#endif
        /* Now we've moved module, initialize linked lists, etc. */
        module_unload_init(mod);
  
        ftrace_release(mod->module_core, mod->core_size);
   free_unload:
        module_unload_free(mod);
 + free_init:
 +#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
 +      percpu_modfree(mod->refptr);
 +#endif
        module_free(mod, mod->module_init);
   free_core:
        module_free(mod, mod->module_core);
 +      /* mod will be freed with core. Don't access it beyond this line! */
   free_percpu:
        if (percpu)
                percpu_modfree(percpu);
 -#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
 -      percpu_modfree(mod->refptr);
 -#endif
   free_mod:
        kfree(args);
   free_hdr:
diff --combined mm/vmscan.c
index 56ddf41149eb77a55158ced628c02f817b990618,592bb9619f75ace42bb83aea694185000d499966..1cdbf0b057278e8f157da93bb6eda0dc80644387
@@@ -1262,6 -1262,7 +1262,6 @@@ static void shrink_active_list(unsigne
         * Move the pages to the [file or anon] inactive list.
         */
        pagevec_init(&pvec, 1);
 -      pgmoved = 0;
        lru = LRU_BASE + file * LRU_FILE;
  
        spin_lock_irq(&zone->lru_lock);
         */
        reclaim_stat->recent_rotated[!!file] += pgmoved;
  
 +      pgmoved = 0;
        while (!list_empty(&l_inactive)) {
                page = lru_to_page(&l_inactive);
                prefetchw_prev_lru_page(page, &l_inactive, flags);
@@@ -1963,7 -1963,7 +1963,7 @@@ static int kswapd(void *p
        struct reclaim_state reclaim_state = {
                .reclaimed_slab = 0,
        };
-       node_to_cpumask_ptr(cpumask, pgdat->node_id);
+       const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  
        if (!cpumask_empty(cpumask))
                set_cpus_allowed_ptr(tsk, cpumask);
@@@ -2198,7 -2198,9 +2198,9 @@@ static int __devinit cpu_callback(struc
        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
                for_each_node_state(nid, N_HIGH_MEMORY) {
                        pg_data_t *pgdat = NODE_DATA(nid);
-                       node_to_cpumask_ptr(mask, pgdat->node_id);
+                       const struct cpumask *mask;
+                       mask = cpumask_of_node(pgdat->node_id);
  
                        if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
                                /* One of our CPUs online: restore mask */