]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'linus' into x86/cleanups
authorIngo Molnar <mingo@elte.hu>
Sat, 10 Jan 2009 22:56:42 +0000 (23:56 +0100)
committerIngo Molnar <mingo@elte.hu>
Sat, 10 Jan 2009 22:56:42 +0000 (23:56 +0100)
1  2 
arch/x86/include/asm/smp.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/smpboot.c
arch/x86/mm/init_32.c

index 74ad9ef6ae023422cbbde2b86e3aad27e5adfb91,19953df61c52005cdd08772da175c9d389a8a10d..1963e27673c96f53bad7e3f65fa06f365a029973
  #include <asm/pda.h>
  #include <asm/thread_info.h>
  
+ #ifdef CONFIG_X86_64
+ extern cpumask_var_t cpu_callin_mask;
+ extern cpumask_var_t cpu_callout_mask;
+ extern cpumask_var_t cpu_initialized_mask;
+ extern cpumask_var_t cpu_sibling_setup_mask;
+ #else /* CONFIG_X86_32 */
+ extern cpumask_t cpu_callin_map;
  extern cpumask_t cpu_callout_map;
  extern cpumask_t cpu_initialized;
- extern cpumask_t cpu_callin_map;
+ extern cpumask_t cpu_sibling_setup_map;
+ #define cpu_callin_mask               ((struct cpumask *)&cpu_callin_map)
+ #define cpu_callout_mask      ((struct cpumask *)&cpu_callout_map)
+ #define cpu_initialized_mask  ((struct cpumask *)&cpu_initialized)
+ #define cpu_sibling_setup_mask        ((struct cpumask *)&cpu_sibling_setup_map)
+ #endif /* CONFIG_X86_32 */
  
 -extern void (*mtrr_hook)(void);
 -extern void zap_low_mappings(void);
 -
  extern int __cpuinit get_local_pda(int cpu);
  
  extern int smp_num_siblings;
  extern unsigned int num_processors;
- extern cpumask_t cpu_initialized;
  
  DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
  DECLARE_PER_CPU(cpumask_t, cpu_core_map);
@@@ -35,6 -54,16 +51,16 @@@ DECLARE_PER_CPU(u16, cpu_llc_id)
  DECLARE_PER_CPU(int, cpu_number);
  #endif
  
+ static inline struct cpumask *cpu_sibling_mask(int cpu)
+ {
+       return &per_cpu(cpu_sibling_map, cpu);
+ }
+ static inline struct cpumask *cpu_core_mask(int cpu)
+ {
+       return &per_cpu(cpu_core_map, cpu);
+ }
  DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
  DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
  
@@@ -138,14 -167,20 +164,14 @@@ void play_dead_common(void)
  void native_send_call_func_ipi(const struct cpumask *mask);
  void native_send_call_func_single_ipi(int cpu);
  
 -extern void prefill_possible_map(void);
 -
  void smp_store_cpu_info(int id);
  #define cpu_physical_id(cpu)  per_cpu(x86_cpu_to_apicid, cpu)
  
  /* We don't mark CPUs online until __cpu_up(), so we need another measure */
  static inline int num_booting_cpus(void)
  {
-       return cpus_weight(cpu_callout_map);
+       return cpumask_weight(cpu_callout_mask);
  }
 -#else
 -static inline void prefill_possible_map(void)
 -{
 -}
  #endif /* CONFIG_SMP */
  
  extern unsigned disabled_cpus __cpuinitdata;
@@@ -170,6 -205,10 +196,6 @@@ extern int safe_smp_processor_id(void)
  })
  #define safe_smp_processor_id()               smp_processor_id()
  
 -#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
 -#define cpu_physical_id(cpu)          boot_cpu_physical_apicid
 -#define safe_smp_processor_id()               0
 -#define stack_smp_processor_id()      0
  #endif
  
  #ifdef CONFIG_X86_LOCAL_APIC
@@@ -212,5 -251,11 +238,5 @@@ static inline int hard_smp_processor_id
  
  #endif /* CONFIG_X86_LOCAL_APIC */
  
 -#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
 -extern unsigned char boot_cpu_id;
 -#else
 -#define boot_cpu_id   0
 -#endif
 -
  #endif /* __ASSEMBLY__ */
  #endif /* _ASM_X86_SMP_H */
index f7619a2eaffe86f708f4dca58fd27f7d0cf6a297,83492b1f93b11c5e0b851300ffdb3e314eaacc9e..14e543b6fd4f813eb35fe00e48cb80c8c44608e0
@@@ -21,7 -21,6 +21,7 @@@
  #include <asm/asm.h>
  #include <asm/numa.h>
  #include <asm/smp.h>
 +#include <asm/cpu.h>
  #ifdef CONFIG_X86_LOCAL_APIC
  #include <asm/mpspec.h>
  #include <asm/apic.h>
  
  #include "cpu.h"
  
+ #ifdef CONFIG_X86_64
+ /* all of these masks are initialized in setup_cpu_local_masks() */
+ cpumask_var_t cpu_callin_mask;
+ cpumask_var_t cpu_callout_mask;
+ cpumask_var_t cpu_initialized_mask;
+ /* representing cpus for which sibling maps can be computed */
+ cpumask_var_t cpu_sibling_setup_mask;
+ #else /* CONFIG_X86_32 */
+ cpumask_t cpu_callin_map;
+ cpumask_t cpu_callout_map;
+ cpumask_t cpu_initialized;
+ cpumask_t cpu_sibling_setup_map;
+ #endif /* CONFIG_X86_32 */
  static struct cpu_dev *this_cpu __cpuinitdata;
  
  #ifdef CONFIG_X86_64
@@@ -857,8 -876,6 +877,6 @@@ static __init int setup_disablecpuid(ch
  }
  __setup("clearcpuid=", setup_disablecpuid);
  
- cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
  #ifdef CONFIG_X86_64
  struct x8664_pda **_cpu_pda __read_mostly;
  EXPORT_SYMBOL(_cpu_pda);
@@@ -977,7 -994,7 +995,7 @@@ void __cpuinit cpu_init(void
  
        me = current;
  
-       if (cpu_test_and_set(cpu, cpu_initialized))
+       if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
                panic("CPU#%d already initialized!\n", cpu);
  
        printk(KERN_INFO "Initializing CPU#%d\n", cpu);
@@@ -1086,7 -1103,7 +1104,7 @@@ void __cpuinit cpu_init(void
        struct tss_struct *t = &per_cpu(init_tss, cpu);
        struct thread_struct *thread = &curr->thread;
  
-       if (cpu_test_and_set(cpu, cpu_initialized)) {
+       if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
                printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
                for (;;) local_irq_enable();
        }
index f8c885bed18c9cc0a1772ad285389f6d5b3f124f,bb1a3b1fc87faeb43baa876f725a3454673abcdc..6c2b8444b8307b24d0a2e6966eacdf3869077b82
@@@ -53,6 -53,7 +53,6 @@@
  #include <asm/nmi.h>
  #include <asm/irq.h>
  #include <asm/idle.h>
 -#include <asm/smp.h>
  #include <asm/trampoline.h>
  #include <asm/cpu.h>
  #include <asm/numa.h>
@@@ -101,9 -102,6 +101,6 @@@ EXPORT_SYMBOL(smp_num_siblings)
  /* Last level cache ID of each logical CPU */
  DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
  
- cpumask_t cpu_callin_map;
- cpumask_t cpu_callout_map;
  /* representing HT siblings of each logical CPU */
  DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
  EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
@@@ -119,9 -117,6 +116,6 @@@ EXPORT_PER_CPU_SYMBOL(cpu_info)
  static atomic_t init_deasserted;
  
  
- /* representing cpus for which sibling maps can be computed */
- static cpumask_t cpu_sibling_setup_map;
  /* Set if we find a B stepping CPU */
  static int __cpuinitdata smp_b_stepping;
  
@@@ -139,7 -134,7 +133,7 @@@ EXPORT_SYMBOL(cpu_to_node_map)
  static void map_cpu_to_node(int cpu, int node)
  {
        printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
-       cpu_set(cpu, node_to_cpumask_map[node]);
+       cpumask_set_cpu(cpu, &node_to_cpumask_map[node]);
        cpu_to_node_map[cpu] = node;
  }
  
@@@ -150,7 -145,7 +144,7 @@@ static void unmap_cpu_to_node(int cpu
  
        printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
        for (node = 0; node < MAX_NUMNODES; node++)
-               cpu_clear(cpu, node_to_cpumask_map[node]);
+               cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]);
        cpu_to_node_map[cpu] = 0;
  }
  #else /* !(CONFIG_NUMA && CONFIG_X86_32) */
@@@ -208,7 -203,7 +202,7 @@@ static void __cpuinit smp_callin(void
         */
        phys_id = read_apic_id();
        cpuid = smp_processor_id();
-       if (cpu_isset(cpuid, cpu_callin_map)) {
+       if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
                panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
                                        phys_id, cpuid);
        }
                /*
                 * Has the boot CPU finished it's STARTUP sequence?
                 */
-               if (cpu_isset(cpuid, cpu_callout_map))
+               if (cpumask_test_cpu(cpuid, cpu_callout_mask))
                        break;
                cpu_relax();
        }
        /*
         * Allow the master to continue.
         */
-       cpu_set(cpuid, cpu_callin_map);
+       cpumask_set_cpu(cpuid, cpu_callin_mask);
  }
  
  static int __cpuinitdata unsafe_smp;
@@@ -331,7 -326,7 +325,7 @@@ notrace static void __cpuinit start_sec
        ipi_call_lock();
        lock_vector_lock();
        __setup_vector_irq(smp_processor_id());
-       cpu_set(smp_processor_id(), cpu_online_map);
+       set_cpu_online(smp_processor_id(), true);
        unlock_vector_lock();
        ipi_call_unlock();
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@@@ -437,50 -432,52 +431,52 @@@ void __cpuinit set_cpu_sibling_map(int 
        int i;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
  
-       cpu_set(cpu, cpu_sibling_setup_map);
+       cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
  
        if (smp_num_siblings > 1) {
-               for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
-                       if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
-                           c->cpu_core_id == cpu_data(i).cpu_core_id) {
-                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
-                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
-                               cpu_set(i, per_cpu(cpu_core_map, cpu));
-                               cpu_set(cpu, per_cpu(cpu_core_map, i));
-                               cpu_set(i, c->llc_shared_map);
-                               cpu_set(cpu, cpu_data(i).llc_shared_map);
+               for_each_cpu(i, cpu_sibling_setup_mask) {
+                       struct cpuinfo_x86 *o = &cpu_data(i);
+                       if (c->phys_proc_id == o->phys_proc_id &&
+                           c->cpu_core_id == o->cpu_core_id) {
+                               cpumask_set_cpu(i, cpu_sibling_mask(cpu));
+                               cpumask_set_cpu(cpu, cpu_sibling_mask(i));
+                               cpumask_set_cpu(i, cpu_core_mask(cpu));
+                               cpumask_set_cpu(cpu, cpu_core_mask(i));
+                               cpumask_set_cpu(i, &c->llc_shared_map);
+                               cpumask_set_cpu(cpu, &o->llc_shared_map);
                        }
                }
        } else {
-               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
+               cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
        }
  
-       cpu_set(cpu, c->llc_shared_map);
+       cpumask_set_cpu(cpu, &c->llc_shared_map);
  
        if (current_cpu_data.x86_max_cores == 1) {
-               per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
+               cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
                c->booted_cores = 1;
                return;
        }
  
-       for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
+       for_each_cpu(i, cpu_sibling_setup_mask) {
                if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
-                       cpu_set(i, c->llc_shared_map);
-                       cpu_set(cpu, cpu_data(i).llc_shared_map);
+                       cpumask_set_cpu(i, &c->llc_shared_map);
+                       cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
                }
                if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
-                       cpu_set(i, per_cpu(cpu_core_map, cpu));
-                       cpu_set(cpu, per_cpu(cpu_core_map, i));
+                       cpumask_set_cpu(i, cpu_core_mask(cpu));
+                       cpumask_set_cpu(cpu, cpu_core_mask(i));
                        /*
                         *  Does this new cpu bringup a new core?
                         */
-                       if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
+                       if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
                                /*
                                 * for each core in package, increment
                                 * the booted_cores for this new cpu
                                 */
-                               if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
+                               if (cpumask_first(cpu_sibling_mask(i)) == i)
                                        c->booted_cores++;
                                /*
                                 * increment the core count for all
@@@ -503,7 -500,7 +499,7 @@@ const struct cpumask *cpu_coregroup_mas
         * And for power savings, we return cpu_core_map
         */
        if (sched_mc_power_savings || sched_smt_power_savings)
-               return &per_cpu(cpu_core_map, cpu);
+               return cpu_core_mask(cpu);
        else
                return &c->llc_shared_map;
  }
@@@ -522,7 -519,7 +518,7 @@@ static void impress_friends(void
         */
        pr_debug("Before bogomips.\n");
        for_each_possible_cpu(cpu)
-               if (cpu_isset(cpu, cpu_callout_map))
+               if (cpumask_test_cpu(cpu, cpu_callout_mask))
                        bogosum += cpu_data(cpu).loops_per_jiffy;
        printk(KERN_INFO
                "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
@@@ -903,19 -900,19 +899,19 @@@ do_rest
                 * allow APs to start initializing.
                 */
                pr_debug("Before Callout %d.\n", cpu);
-               cpu_set(cpu, cpu_callout_map);
+               cpumask_set_cpu(cpu, cpu_callout_mask);
                pr_debug("After Callout %d.\n", cpu);
  
                /*
                 * Wait 5s total for a response
                 */
                for (timeout = 0; timeout < 50000; timeout++) {
-                       if (cpu_isset(cpu, cpu_callin_map))
+                       if (cpumask_test_cpu(cpu, cpu_callin_mask))
                                break;  /* It has booted */
                        udelay(100);
                }
  
-               if (cpu_isset(cpu, cpu_callin_map)) {
+               if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
                        /* number CPUs logically, starting from 1 (BSP is 0) */
                        pr_debug("OK.\n");
                        printk(KERN_INFO "CPU%d: ", cpu);
@@@ -940,9 -937,14 +936,14 @@@ restore_state
        if (boot_error) {
                /* Try to put things back the way they were before ... */
                numa_remove_cpu(cpu); /* was set by numa_add_cpu */
-               cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */
-               cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
-               cpu_clear(cpu, cpu_present_map);
+               /* was set by do_boot_cpu() */
+               cpumask_clear_cpu(cpu, cpu_callout_mask);
+               /* was set by cpu_init() */
+               cpumask_clear_cpu(cpu, cpu_initialized_mask);
+               set_cpu_present(cpu, false);
                per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
        }
  
@@@ -976,7 -978,7 +977,7 @@@ int __cpuinit native_cpu_up(unsigned in
        /*
         * Already booted CPU?
         */
-       if (cpu_isset(cpu, cpu_callin_map)) {
+       if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
                pr_debug("do_boot_cpu %d Already started\n", cpu);
                return -ENOSYS;
        }
   */
  static __init void disable_smp(void)
  {
-       cpu_present_map = cpumask_of_cpu(0);
-       cpu_possible_map = cpumask_of_cpu(0);
+       /* use the read/write pointers to the present and possible maps */
+       cpumask_copy(&cpu_present_map, cpumask_of(0));
+       cpumask_copy(&cpu_possible_map, cpumask_of(0));
        smpboot_clear_io_apic_irqs();
  
        if (smp_found_config)
        else
                physid_set_mask_of_physid(0, &phys_cpu_present_map);
        map_cpu_to_logical_apicid();
-       cpu_set(0, per_cpu(cpu_sibling_map, 0));
-       cpu_set(0, per_cpu(cpu_core_map, 0));
+       cpumask_set_cpu(0, cpu_sibling_mask(0));
+       cpumask_set_cpu(0, cpu_core_mask(0));
  }
  
  /*
@@@ -1063,14 -1066,14 +1065,14 @@@ static int __init smp_sanity_check(unsi
                nr = 0;
                for_each_present_cpu(cpu) {
                        if (nr >= 8)
-                               cpu_clear(cpu, cpu_present_map);
+                               set_cpu_present(cpu, false);
                        nr++;
                }
  
                nr = 0;
                for_each_possible_cpu(cpu) {
                        if (nr >= 8)
-                               cpu_clear(cpu, cpu_possible_map);
+                               set_cpu_possible(cpu, false);
                        nr++;
                }
  
@@@ -1166,7 -1169,7 +1168,7 @@@ void __init native_smp_prepare_cpus(uns
        preempt_disable();
        smp_cpu_index_default();
        current_cpu_data = boot_cpu_data;
-       cpu_callin_map = cpumask_of_cpu(0);
+       cpumask_copy(cpu_callin_mask, cpumask_of(0));
        mb();
        /*
         * Setup boot CPU information
@@@ -1241,8 -1244,8 +1243,8 @@@ void __init native_smp_prepare_boot_cpu
        init_gdt(me);
  #endif
        switch_to_new_gdt();
-       /* already set me in cpu_online_map in boot_cpu_init() */
-       cpu_set(me, cpu_callout_map);
+       /* already set me in cpu_online_mask in boot_cpu_init() */
+       cpumask_set_cpu(me, cpu_callout_mask);
        per_cpu(cpu_state, me) = CPU_ONLINE;
  }
  
@@@ -1310,7 -1313,7 +1312,7 @@@ __init void prefill_possible_map(void
                possible, max_t(int, possible - num_processors, 0));
  
        for (i = 0; i < possible; i++)
-               cpu_set(i, cpu_possible_map);
+               set_cpu_possible(i, true);
  
        nr_cpu_ids = possible;
  }
@@@ -1322,31 -1325,31 +1324,31 @@@ static void remove_siblinginfo(int cpu
        int sibling;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
  
-       for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
-               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
+       for_each_cpu(sibling, cpu_core_mask(cpu)) {
+               cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
                /*/
                 * last thread sibling in this cpu core going down
                 */
-               if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
+               if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
                        cpu_data(sibling).booted_cores--;
        }
  
-       for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
-       cpus_clear(per_cpu(cpu_sibling_map, cpu));
-       cpus_clear(per_cpu(cpu_core_map, cpu));
+       for_each_cpu(sibling, cpu_sibling_mask(cpu))
+               cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
+       cpumask_clear(cpu_sibling_mask(cpu));
+       cpumask_clear(cpu_core_mask(cpu));
        c->phys_proc_id = 0;
        c->cpu_core_id = 0;
-       cpu_clear(cpu, cpu_sibling_setup_map);
+       cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
  }
  
  static void __ref remove_cpu_from_maps(int cpu)
  {
-       cpu_clear(cpu, cpu_online_map);
-       cpu_clear(cpu, cpu_callout_map);
-       cpu_clear(cpu, cpu_callin_map);
+       set_cpu_online(cpu, false);
+       cpumask_clear_cpu(cpu, cpu_callout_mask);
+       cpumask_clear_cpu(cpu, cpu_callin_mask);
        /* was set by cpu_init() */
-       cpu_clear(cpu, cpu_initialized);
+       cpumask_clear_cpu(cpu, cpu_initialized_mask);
        numa_remove_cpu(cpu);
  }
  
diff --combined arch/x86/mm/init_32.c
index a9dd0b7ad61850b13cfd887c80292adb5413c756,88f1b10de3be712a33545b20292a45f7a1c43c50..4a6989e47a5307f336298f4d1ed0ce1e34391d54
@@@ -49,6 -49,7 +49,6 @@@
  #include <asm/paravirt.h>
  #include <asm/setup.h>
  #include <asm/cacheflush.h>
 -#include <asm/smp.h>
  
  unsigned int __VMALLOC_RESERVE = 128 << 20;
  
@@@ -327,6 -328,8 +327,8 @@@ int devmem_is_allowed(unsigned long pag
  {
        if (pagenr <= 256)
                return 1;
+       if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+               return 0;
        if (!page_is_ram(pagenr))
                return 1;
        return 0;
@@@ -1078,7 -1081,7 +1080,7 @@@ int arch_add_memory(int nid, u64 start
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
  
-       return __add_pages(zone, start_pfn, nr_pages);
+       return __add_pages(nid, zone, start_pfn, nr_pages);
  }
  #endif