]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
cpumask: convert struct cpuinfo_x86's llc_shared_map to cpumask_var_t
authorRusty Russell <rusty@rustcorp.com.au>
Fri, 13 Mar 2009 04:19:53 +0000 (14:49 +1030)
committerRusty Russell <rusty@rustcorp.com.au>
Fri, 13 Mar 2009 04:19:53 +0000 (14:49 +1030)
Impact: reduce kernel memory usage when CONFIG_CPUMASK_OFFSTACK=y

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
arch/x86/include/asm/processor.h
arch/x86/kernel/smpboot.c

index 76139506c3e4f489d6252edd272e98d0e8637ead..d794d9483c56e75a4199b1073b5953525bf97d88 100644 (file)
@@ -94,7 +94,7 @@ struct cpuinfo_x86 {
        unsigned long           loops_per_jiffy;
 #ifdef CONFIG_SMP
        /* cpus sharing the last level cache: */
-       cpumask_t               llc_shared_map;
+       cpumask_var_t           llc_shared_map;
 #endif
        /* cpuid returned max cores value: */
        u16                      x86_max_cores;
index 5a58a45ac1e3f94f7d69ece20f1e8f0eda051340..d6427aa569661a87d79f6094e410f52e2e0d80a4 100644 (file)
@@ -329,6 +329,23 @@ notrace static void __cpuinit start_secondary(void *unused)
        cpu_idle();
 }
 
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* In this case, llc_shared_map is a pointer to a cpumask. */
+static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
+                                   const struct cpuinfo_x86 *src)
+{
+       struct cpumask *llc = dst->llc_shared_map;
+       *dst = *src;
+       dst->llc_shared_map = llc;
+}
+#else
+static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
+                                   const struct cpuinfo_x86 *src)
+{
+       *dst = *src;
+}
+#endif /* CONFIG_CPUMASK_OFFSTACK */
+
 /*
  * The bootstrap kernel entry code has set these up. Save them for
  * a given CPU
@@ -338,7 +355,7 @@ void __cpuinit smp_store_cpu_info(int id)
 {
        struct cpuinfo_x86 *c = &cpu_data(id);
 
-       *c = boot_cpu_data;
+       copy_cpuinfo_x86(c, &boot_cpu_data);
        c->cpu_index = id;
        if (id != 0)
                identify_secondary_cpu(c);
@@ -362,15 +379,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                                cpumask_set_cpu(cpu, cpu_sibling_mask(i));
                                cpumask_set_cpu(i, cpu_core_mask(cpu));
                                cpumask_set_cpu(cpu, cpu_core_mask(i));
-                               cpumask_set_cpu(i, &c->llc_shared_map);
-                               cpumask_set_cpu(cpu, &o->llc_shared_map);
+                               cpumask_set_cpu(i, c->llc_shared_map);
+                               cpumask_set_cpu(cpu, o->llc_shared_map);
                        }
                }
        } else {
                cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
        }
 
-       cpumask_set_cpu(cpu, &c->llc_shared_map);
+       cpumask_set_cpu(cpu, c->llc_shared_map);
 
        if (current_cpu_data.x86_max_cores == 1) {
                cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
@@ -381,8 +398,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
        for_each_cpu(i, cpu_sibling_setup_mask) {
                if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
-                       cpumask_set_cpu(i, &c->llc_shared_map);
-                       cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
+                       cpumask_set_cpu(i, c->llc_shared_map);
+                       cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
                }
                if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
                        cpumask_set_cpu(i, cpu_core_mask(cpu));
@@ -420,7 +437,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
        if (sched_mc_power_savings || sched_smt_power_savings)
                return cpu_core_mask(cpu);
        else
-               return &c->llc_shared_map;
+               return c->llc_shared_map;
 }
 
 static void impress_friends(void)
@@ -1039,8 +1056,10 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        for_each_possible_cpu(i) {
                alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
                alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+               alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
                cpumask_clear(per_cpu(cpu_core_map, i));
                cpumask_clear(per_cpu(cpu_sibling_map, i));
+               cpumask_clear(cpu_data(i).llc_shared_map);
        }
        set_cpu_sibling_map(0);