]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 10 Jan 2009 14:13:09 +0000 (06:13 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 10 Jan 2009 14:13:09 +0000 (06:13 -0800)
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (36 commits)
  x86: fix section mismatch warnings in mcheck/mce_amd_64.c
  x86: offer frame pointers in all build modes
  x86: remove duplicated #include's
  x86: k8 numa register active regions later
  x86: update Alan Cox's email addresses
  x86: rename all fields of mpc_table mpc_X to X
  x86: rename all fields of mpc_oemtable oem_X to X
  x86: rename all fields of mpc_bus mpc_X to X
  x86: rename all fields of mpc_cpu mpc_X to X
  x86: rename all fields of mpc_intsrc mpc_X to X
  x86: rename all fields of mpc_lintsrc mpc_X to X
  x86: rename all fields of mpc_iopic mpc_X to X
  x86: irqinit_64.c init_ISA_irqs should be static
  Documentation/x86/boot.txt: payload length was changed to payload_length
  x86: setup_percpu.c fix style problems
  x86: irqinit_64.c fix style problems
  x86: irqinit_32.c fix style problems
  x86: i8259.c fix style problems
  x86: irq_32.c fix style problems
  x86: ioport.c fix style problems
  ...

1  2 
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/traps.c
lib/Kconfig.debug

index aa55764602b1576d34e49638163616391da9a4d6,e4433bf44209a032864a9232aabd49535e3dde18..55c46074eba0fca7738f047d8e90e5ac46c89501
@@@ -5,12 -5,11 +5,11 @@@
  #include <linux/percpu.h>
  #include <linux/kexec.h>
  #include <linux/crash_dump.h>
- #include <asm/smp.h>
- #include <asm/percpu.h>
+ #include <linux/smp.h>
+ #include <linux/topology.h>
  #include <asm/sections.h>
  #include <asm/processor.h>
  #include <asm/setup.h>
- #include <asm/topology.h>
  #include <asm/mpspec.h>
  #include <asm/apicdef.h>
  #include <asm/highmem.h>
@@@ -20,8 -19,8 +19,8 @@@ unsigned int num_processors
  unsigned disabled_cpus __cpuinitdata;
  /* Processor that is doing the boot up */
  unsigned int boot_cpu_physical_apicid = -1U;
- unsigned int max_physical_apicid;
  EXPORT_SYMBOL(boot_cpu_physical_apicid);
+ unsigned int max_physical_apicid;
  
  /* Bitmask of physically existing CPUs */
  physid_mask_t phys_cpu_present_map;
@@@ -131,27 -130,7 +130,27 @@@ static void __init setup_cpu_pda_map(vo
        /* point to new pointer table */
        _cpu_pda = new_cpu_pda;
  }
 -#endif
 +
 +#endif /* CONFIG_SMP && CONFIG_X86_64 */
 +
 +#ifdef CONFIG_X86_64
 +
 +/* correctly size the local cpu masks */
 +static void setup_cpu_local_masks(void)
 +{
 +      alloc_bootmem_cpumask_var(&cpu_initialized_mask);
 +      alloc_bootmem_cpumask_var(&cpu_callin_mask);
 +      alloc_bootmem_cpumask_var(&cpu_callout_mask);
 +      alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
 +}
 +
 +#else /* CONFIG_X86_32 */
 +
 +static inline void setup_cpu_local_masks(void)
 +{
 +}
 +
 +#endif /* CONFIG_X86_32 */
  
  /*
   * Great future plan:
@@@ -207,9 -186,6 +206,9 @@@ void __init setup_per_cpu_areas(void
  
        /* Setup node to cpumask map */
        setup_node_to_cpumask_map();
 +
 +      /* Setup cpu initialized, callin, callout masks */
 +      setup_cpu_local_masks();
  }
  
  #endif
@@@ -303,8 -279,8 +302,8 @@@ static void __cpuinit numa_set_cpumask(
  
        cpulist_scnprintf(buf, sizeof(buf), mask);
        printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
-               enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
 }
+               enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
+ }
  
  void __cpuinit numa_add_cpu(int cpu)
  {
diff --combined arch/x86/kernel/smp.c
index 182135ba1eaf7cf5f3f8fa3106e8d96146cbb75f,cf1f075886b40a6773dc8f1ebe713839bc5ca1ee..e6faa3316bd2e04311313ce4c7c68ef3438ef274
@@@ -1,7 -1,7 +1,7 @@@
  /*
   *    Intel SMP support routines.
   *
-  *    (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+  *    (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
   *    (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
   *      (c) 2002,2003 Andi Kleen, SuSE Labs.
   *
@@@ -128,23 -128,16 +128,23 @@@ void native_send_call_func_single_ipi(i
  
  void native_send_call_func_ipi(const struct cpumask *mask)
  {
 -      cpumask_t allbutself;
 +      cpumask_var_t allbutself;
  
 -      allbutself = cpu_online_map;
 -      cpu_clear(smp_processor_id(), allbutself);
 +      if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
 +              send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 +              return;
 +      }
  
 -      if (cpus_equal(*mask, allbutself) &&
 -          cpus_equal(cpu_online_map, cpu_callout_map))
 +      cpumask_copy(allbutself, cpu_online_mask);
 +      cpumask_clear_cpu(smp_processor_id(), allbutself);
 +
 +      if (cpumask_equal(mask, allbutself) &&
 +          cpumask_equal(cpu_online_mask, cpu_callout_mask))
                send_IPI_allbutself(CALL_FUNCTION_VECTOR);
        else
                send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 +
 +      free_cpumask_var(allbutself);
  }
  
  /*
index 00e17e58948232f1b2d977c52517fd0802187d30,07576bee03ef420cf6492682f216ded903f4ea5f..bb1a3b1fc87faeb43baa876f725a3454673abcdc
@@@ -1,7 -1,7 +1,7 @@@
  /*
   *    x86 SMP booting functions
   *
-  *    (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+  *    (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
   *    (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
   *    Copyright 2001 Andi Kleen, SuSE Labs.
   *
@@@ -102,6 -102,9 +102,6 @@@ EXPORT_SYMBOL(smp_num_siblings)
  /* Last level cache ID of each logical CPU */
  DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
  
 -cpumask_t cpu_callin_map;
 -cpumask_t cpu_callout_map;
 -
  /* representing HT siblings of each logical CPU */
  DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
  EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
@@@ -117,6 -120,9 +117,6 @@@ EXPORT_PER_CPU_SYMBOL(cpu_info)
  static atomic_t init_deasserted;
  
  
 -/* representing cpus for which sibling maps can be computed */
 -static cpumask_t cpu_sibling_setup_map;
 -
  /* Set if we find a B stepping CPU */
  static int __cpuinitdata smp_b_stepping;
  
@@@ -134,7 -140,7 +134,7 @@@ EXPORT_SYMBOL(cpu_to_node_map)
  static void map_cpu_to_node(int cpu, int node)
  {
        printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
 -      cpu_set(cpu, node_to_cpumask_map[node]);
 +      cpumask_set_cpu(cpu, &node_to_cpumask_map[node]);
        cpu_to_node_map[cpu] = node;
  }
  
@@@ -145,7 -151,7 +145,7 @@@ static void unmap_cpu_to_node(int cpu
  
        printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
        for (node = 0; node < MAX_NUMNODES; node++)
 -              cpu_clear(cpu, node_to_cpumask_map[node]);
 +              cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]);
        cpu_to_node_map[cpu] = 0;
  }
  #else /* !(CONFIG_NUMA && CONFIG_X86_32) */
@@@ -203,7 -209,7 +203,7 @@@ static void __cpuinit smp_callin(void
         */
        phys_id = read_apic_id();
        cpuid = smp_processor_id();
 -      if (cpu_isset(cpuid, cpu_callin_map)) {
 +      if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
                panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
                                        phys_id, cpuid);
        }
                /*
                 * Has the boot CPU finished it's STARTUP sequence?
                 */
 -              if (cpu_isset(cpuid, cpu_callout_map))
 +              if (cpumask_test_cpu(cpuid, cpu_callout_mask))
                        break;
                cpu_relax();
        }
        /*
         * Allow the master to continue.
         */
 -      cpu_set(cpuid, cpu_callin_map);
 +      cpumask_set_cpu(cpuid, cpu_callin_mask);
  }
  
  static int __cpuinitdata unsafe_smp;
@@@ -326,7 -332,7 +326,7 @@@ notrace static void __cpuinit start_sec
        ipi_call_lock();
        lock_vector_lock();
        __setup_vector_irq(smp_processor_id());
 -      cpu_set(smp_processor_id(), cpu_online_map);
 +      set_cpu_online(smp_processor_id(), true);
        unlock_vector_lock();
        ipi_call_unlock();
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@@@ -432,52 -438,50 +432,52 @@@ void __cpuinit set_cpu_sibling_map(int 
        int i;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
  
 -      cpu_set(cpu, cpu_sibling_setup_map);
 +      cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
  
        if (smp_num_siblings > 1) {
 -              for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
 -                      if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
 -                          c->cpu_core_id == cpu_data(i).cpu_core_id) {
 -                              cpu_set(i, per_cpu(cpu_sibling_map, cpu));
 -                              cpu_set(cpu, per_cpu(cpu_sibling_map, i));
 -                              cpu_set(i, per_cpu(cpu_core_map, cpu));
 -                              cpu_set(cpu, per_cpu(cpu_core_map, i));
 -                              cpu_set(i, c->llc_shared_map);
 -                              cpu_set(cpu, cpu_data(i).llc_shared_map);
 +              for_each_cpu(i, cpu_sibling_setup_mask) {
 +                      struct cpuinfo_x86 *o = &cpu_data(i);
 +
 +                      if (c->phys_proc_id == o->phys_proc_id &&
 +                          c->cpu_core_id == o->cpu_core_id) {
 +                              cpumask_set_cpu(i, cpu_sibling_mask(cpu));
 +                              cpumask_set_cpu(cpu, cpu_sibling_mask(i));
 +                              cpumask_set_cpu(i, cpu_core_mask(cpu));
 +                              cpumask_set_cpu(cpu, cpu_core_mask(i));
 +                              cpumask_set_cpu(i, &c->llc_shared_map);
 +                              cpumask_set_cpu(cpu, &o->llc_shared_map);
                        }
                }
        } else {
 -              cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
 +              cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
        }
  
 -      cpu_set(cpu, c->llc_shared_map);
 +      cpumask_set_cpu(cpu, &c->llc_shared_map);
  
        if (current_cpu_data.x86_max_cores == 1) {
 -              per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
 +              cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
                c->booted_cores = 1;
                return;
        }
  
 -      for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
 +      for_each_cpu(i, cpu_sibling_setup_mask) {
                if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
 -                      cpu_set(i, c->llc_shared_map);
 -                      cpu_set(cpu, cpu_data(i).llc_shared_map);
 +                      cpumask_set_cpu(i, &c->llc_shared_map);
 +                      cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
                }
                if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
 -                      cpu_set(i, per_cpu(cpu_core_map, cpu));
 -                      cpu_set(cpu, per_cpu(cpu_core_map, i));
 +                      cpumask_set_cpu(i, cpu_core_mask(cpu));
 +                      cpumask_set_cpu(cpu, cpu_core_mask(i));
                        /*
                         *  Does this new cpu bringup a new core?
                         */
 -                      if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
 +                      if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
                                /*
                                 * for each core in package, increment
                                 * the booted_cores for this new cpu
                                 */
 -                              if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
 +                              if (cpumask_first(cpu_sibling_mask(i)) == i)
                                        c->booted_cores++;
                                /*
                                 * increment the core count for all
@@@ -500,7 -504,7 +500,7 @@@ const struct cpumask *cpu_coregroup_mas
         * And for power savings, we return cpu_core_map
         */
        if (sched_mc_power_savings || sched_smt_power_savings)
 -              return &per_cpu(cpu_core_map, cpu);
 +              return cpu_core_mask(cpu);
        else
                return &c->llc_shared_map;
  }
@@@ -519,7 -523,7 +519,7 @@@ static void impress_friends(void
         */
        pr_debug("Before bogomips.\n");
        for_each_possible_cpu(cpu)
 -              if (cpu_isset(cpu, cpu_callout_map))
 +              if (cpumask_test_cpu(cpu, cpu_callout_mask))
                        bogosum += cpu_data(cpu).loops_per_jiffy;
        printk(KERN_INFO
                "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
@@@ -900,19 -904,19 +900,19 @@@ do_rest
                 * allow APs to start initializing.
                 */
                pr_debug("Before Callout %d.\n", cpu);
 -              cpu_set(cpu, cpu_callout_map);
 +              cpumask_set_cpu(cpu, cpu_callout_mask);
                pr_debug("After Callout %d.\n", cpu);
  
                /*
                 * Wait 5s total for a response
                 */
                for (timeout = 0; timeout < 50000; timeout++) {
 -                      if (cpu_isset(cpu, cpu_callin_map))
 +                      if (cpumask_test_cpu(cpu, cpu_callin_mask))
                                break;  /* It has booted */
                        udelay(100);
                }
  
 -              if (cpu_isset(cpu, cpu_callin_map)) {
 +              if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
                        /* number CPUs logically, starting from 1 (BSP is 0) */
                        pr_debug("OK.\n");
                        printk(KERN_INFO "CPU%d: ", cpu);
@@@ -937,14 -941,9 +937,14 @@@ restore_state
        if (boot_error) {
                /* Try to put things back the way they were before ... */
                numa_remove_cpu(cpu); /* was set by numa_add_cpu */
 -              cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */
 -              cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
 -              cpu_clear(cpu, cpu_present_map);
 +
 +              /* was set by do_boot_cpu() */
 +              cpumask_clear_cpu(cpu, cpu_callout_mask);
 +
 +              /* was set by cpu_init() */
 +              cpumask_clear_cpu(cpu, cpu_initialized_mask);
 +
 +              set_cpu_present(cpu, false);
                per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
        }
  
@@@ -978,7 -977,7 +978,7 @@@ int __cpuinit native_cpu_up(unsigned in
        /*
         * Already booted CPU?
         */
 -      if (cpu_isset(cpu, cpu_callin_map)) {
 +      if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
                pr_debug("do_boot_cpu %d Already started\n", cpu);
                return -ENOSYS;
        }
   */
  static __init void disable_smp(void)
  {
 -      cpu_present_map = cpumask_of_cpu(0);
 -      cpu_possible_map = cpumask_of_cpu(0);
 +      /* use the read/write pointers to the present and possible maps */
 +      cpumask_copy(&cpu_present_map, cpumask_of(0));
 +      cpumask_copy(&cpu_possible_map, cpumask_of(0));
        smpboot_clear_io_apic_irqs();
  
        if (smp_found_config)
        else
                physid_set_mask_of_physid(0, &phys_cpu_present_map);
        map_cpu_to_logical_apicid();
 -      cpu_set(0, per_cpu(cpu_sibling_map, 0));
 -      cpu_set(0, per_cpu(cpu_core_map, 0));
 +      cpumask_set_cpu(0, cpu_sibling_mask(0));
 +      cpumask_set_cpu(0, cpu_core_mask(0));
  }
  
  /*
@@@ -1066,14 -1064,14 +1066,14 @@@ static int __init smp_sanity_check(unsi
                nr = 0;
                for_each_present_cpu(cpu) {
                        if (nr >= 8)
 -                              cpu_clear(cpu, cpu_present_map);
 +                              set_cpu_present(cpu, false);
                        nr++;
                }
  
                nr = 0;
                for_each_possible_cpu(cpu) {
                        if (nr >= 8)
 -                              cpu_clear(cpu, cpu_possible_map);
 +                              set_cpu_possible(cpu, false);
                        nr++;
                }
  
@@@ -1169,7 -1167,7 +1169,7 @@@ void __init native_smp_prepare_cpus(uns
        preempt_disable();
        smp_cpu_index_default();
        current_cpu_data = boot_cpu_data;
 -      cpu_callin_map = cpumask_of_cpu(0);
 +      cpumask_copy(cpu_callin_mask, cpumask_of(0));
        mb();
        /*
         * Setup boot CPU information
@@@ -1244,8 -1242,8 +1244,8 @@@ void __init native_smp_prepare_boot_cpu
        init_gdt(me);
  #endif
        switch_to_new_gdt();
 -      /* already set me in cpu_online_map in boot_cpu_init() */
 -      cpu_set(me, cpu_callout_map);
 +      /* already set me in cpu_online_mask in boot_cpu_init() */
 +      cpumask_set_cpu(me, cpu_callout_mask);
        per_cpu(cpu_state, me) = CPU_ONLINE;
  }
  
@@@ -1313,7 -1311,7 +1313,7 @@@ __init void prefill_possible_map(void
                possible, max_t(int, possible - num_processors, 0));
  
        for (i = 0; i < possible; i++)
 -              cpu_set(i, cpu_possible_map);
 +              set_cpu_possible(i, true);
  
        nr_cpu_ids = possible;
  }
@@@ -1325,31 -1323,31 +1325,31 @@@ static void remove_siblinginfo(int cpu
        int sibling;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
  
 -      for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
 -              cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
 +      for_each_cpu(sibling, cpu_core_mask(cpu)) {
 +              cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
                /*/
                 * last thread sibling in this cpu core going down
                 */
 -              if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
 +              if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
                        cpu_data(sibling).booted_cores--;
        }
  
 -      for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
 -              cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
 -      cpus_clear(per_cpu(cpu_sibling_map, cpu));
 -      cpus_clear(per_cpu(cpu_core_map, cpu));
 +      for_each_cpu(sibling, cpu_sibling_mask(cpu))
 +              cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
 +      cpumask_clear(cpu_sibling_mask(cpu));
 +      cpumask_clear(cpu_core_mask(cpu));
        c->phys_proc_id = 0;
        c->cpu_core_id = 0;
 -      cpu_clear(cpu, cpu_sibling_setup_map);
 +      cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
  }
  
  static void __ref remove_cpu_from_maps(int cpu)
  {
 -      cpu_clear(cpu, cpu_online_map);
 -      cpu_clear(cpu, cpu_callout_map);
 -      cpu_clear(cpu, cpu_callin_map);
 +      set_cpu_online(cpu, false);
 +      cpumask_clear_cpu(cpu, cpu_callout_mask);
 +      cpumask_clear_cpu(cpu, cpu_callin_mask);
        /* was set by cpu_init() */
 -      cpu_clear(cpu, cpu_initialized);
 +      cpumask_clear_cpu(cpu, cpu_initialized_mask);
        numa_remove_cpu(cpu);
  }
  
diff --combined arch/x86/kernel/traps.c
index c9a666cdd3db928278679f606c71a8006833065b,25d5c30735820873acddb33216105563ad19dc3b..98c2d055284b32fb2ce6ff6776b9cb049c103ae0
@@@ -20,6 -20,7 +20,6 @@@
  #include <linux/module.h>
  #include <linux/ptrace.h>
  #include <linux/string.h>
 -#include <linux/unwind.h>
  #include <linux/delay.h>
  #include <linux/errno.h>
  #include <linux/kexec.h>
@@@ -50,6 -51,7 +50,6 @@@
  #include <asm/debugreg.h>
  #include <asm/atomic.h>
  #include <asm/system.h>
 -#include <asm/unwind.h>
  #include <asm/traps.h>
  #include <asm/desc.h>
  #include <asm/i387.h>
@@@ -63,9 -65,6 +63,6 @@@
  #else
  #include <asm/processor-flags.h>
  #include <asm/arch_hooks.h>
- #include <asm/nmi.h>
- #include <asm/smp.h>
- #include <asm/io.h>
  #include <asm/traps.h>
  
  #include "cpu/mcheck/mce.h"
diff --combined lib/Kconfig.debug
index d0a32aab03ff66cbda68d3d60b30980d006d86ba,2d0f144901749d615cfd4e09e1e89cdecbf52a20..4c9ae6085c75c4a483bd86e43fcafead3f255450
@@@ -512,13 -512,6 +512,13 @@@ config DEBUG_VIRTUA
  
          If unsure, say N.
  
 +config DEBUG_NOMMU_REGIONS
 +      bool "Debug the global anon/private NOMMU mapping region tree"
 +      depends on DEBUG_KERNEL && !MMU
 +      help
 +        This option causes the global tree of anonymous and private mapping
 +        regions to be regularly checked for invalid topology.
 +
  config DEBUG_WRITECOUNT
        bool "Debug filesystem writers count"
        depends on DEBUG_KERNEL
@@@ -573,14 -566,14 +573,14 @@@ config DEBUG_NOTIFIER
  config FRAME_POINTER
        bool "Compile the kernel with frame pointers"
        depends on DEBUG_KERNEL && \
-               (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || \
-                AVR32 || SUPERH || BLACKFIN || MN10300)
-       default y if DEBUG_INFO && UML
-       help
-         If you say Y here the resulting kernel image will be slightly larger
-         and slower, but it might give very useful debugging information on
-         some architectures or if you use external debuggers.
-         If you don't debug the kernel, you can say N.
+               (CRIS || M68K || M68KNOMMU || FRV || UML || S390 || \
+                AVR32 || SUPERH || BLACKFIN || MN10300) || \
+               ARCH_WANT_FRAME_POINTERS
+       default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
+       help
+         If you say Y here the resulting kernel image will be slightly
+         larger and slower, but it gives very useful debugging information
+         in case of kernel bugs. (precise oopses/stacktraces/warnings)
  
  config BOOT_PRINTK_DELAY
        bool "Delay each boot printk message by N milliseconds"