1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <asm/percpu.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/topology.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
18 #ifdef CONFIG_X86_LOCAL_APIC
19 unsigned int num_processors;
20 unsigned disabled_cpus __cpuinitdata;
21 /* Processor that is doing the boot up */
22 unsigned int boot_cpu_physical_apicid = -1U;
23 unsigned int max_physical_apicid;
24 EXPORT_SYMBOL(boot_cpu_physical_apicid);
26 /* Bitmask of physically existing CPUs */
27 physid_mask_t phys_cpu_present_map;
30 /* map cpu index to physical APIC ID */
31 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
32 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
33 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
34 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
36 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
39 /* map cpu index to node index */
40 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
41 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
43 /* which logical CPUs are on which nodes */
44 cpumask_t *node_to_cpumask_map;
45 EXPORT_SYMBOL(node_to_cpumask_map);
47 /* setup node_to_cpumask_map */
48 static void __init setup_node_to_cpumask_map(void);
51 static inline void setup_node_to_cpumask_map(void) { }
54 #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
56 * Copy data used in early init routines from the initial arrays to the
57 * per cpu data areas. These arrays then become expendable and the
58 * *_early_ptr's are zeroed indicating that the static arrays are gone.
60 static void __init setup_per_cpu_maps(void)
64 for_each_possible_cpu(cpu) {
65 per_cpu(x86_cpu_to_apicid, cpu) =
66 early_per_cpu_map(x86_cpu_to_apicid, cpu);
67 per_cpu(x86_bios_cpu_apicid, cpu) =
68 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
70 per_cpu(x86_cpu_to_node_map, cpu) =
71 early_per_cpu_map(x86_cpu_to_node_map, cpu);
75 /* indicate the early static arrays will soon be gone */
76 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
77 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
79 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
83 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
85 * Replace static cpumask_of_cpu_map in the initdata section,
86 * with one that's allocated sized by the possible number of cpus.
88 * (requires nr_cpu_ids to be initialized)
90 static void __init setup_cpumask_of_cpu(void)
94 /* alloc_bootmem zeroes memory */
95 cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
96 for (i = 0; i < nr_cpu_ids; i++)
97 cpu_set(i, cpumask_of_cpu_map[i]);
100 static inline void setup_cpumask_of_cpu(void) { }
105 * Great future not-so-futuristic plan: make i386 and x86_64 do it
108 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
109 EXPORT_SYMBOL(__per_cpu_offset);
110 static inline void setup_cpu_pda_map(void) { }
112 #elif !defined(CONFIG_SMP)
113 static inline void setup_cpu_pda_map(void) { }
115 #else /* CONFIG_SMP && CONFIG_X86_64 */
118 * Allocate cpu_pda pointer table and array via alloc_bootmem.
120 static void __init setup_cpu_pda_map(void)
123 struct x8664_pda **new_cpu_pda;
127 size = roundup(sizeof(struct x8664_pda), cache_line_size());
129 /* allocate cpu_pda array and pointer table */
131 unsigned long tsize = nr_cpu_ids * sizeof(void *);
132 unsigned long asize = size * (nr_cpu_ids - 1);
134 tsize = roundup(tsize, cache_line_size());
135 new_cpu_pda = alloc_bootmem(tsize + asize);
136 pda = (char *)new_cpu_pda + tsize;
139 /* initialize pointer table to static pda's */
140 for_each_possible_cpu(cpu) {
142 /* leave boot cpu pda in place */
143 new_cpu_pda[0] = cpu_pda(0);
146 new_cpu_pda[cpu] = (struct x8664_pda *)pda;
147 new_cpu_pda[cpu]->in_bootmem = 1;
151 /* point to new pointer table */
152 _cpu_pda = new_cpu_pda;
158 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
159 * Always point %gs to its beginning
161 void __init setup_per_cpu_areas(void)
163 ssize_t size = PERCPU_ENOUGH_ROOM;
167 /* Setup cpu_pda map */
170 /* Copy section for each CPU (we discard the original) */
171 size = PERCPU_ENOUGH_ROOM;
172 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
175 for_each_possible_cpu(cpu) {
176 #ifndef CONFIG_NEED_MULTIPLE_NODES
177 ptr = alloc_bootmem_pages(size);
179 int node = early_cpu_to_node(cpu);
180 if (!node_online(node) || !NODE_DATA(node)) {
181 ptr = alloc_bootmem_pages(size);
183 "cpu %d has no node %d or node-local memory\n",
187 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
189 per_cpu_offset(cpu) = ptr - __per_cpu_start;
190 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
194 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
195 NR_CPUS, nr_cpu_ids, nr_node_ids);
197 /* Setup percpu data maps */
198 setup_per_cpu_maps();
200 /* Setup node to cpumask map */
201 setup_node_to_cpumask_map();
203 /* Setup cpumask_of_cpu map */
204 setup_cpumask_of_cpu();
212 * Allocate node_to_cpumask_map based on number of available nodes
213 * Requires node_possible_map to be valid.
215 * Note: node_to_cpumask() is not valid until after this is done.
217 static void __init setup_node_to_cpumask_map(void)
219 unsigned int node, num = 0;
222 /* setup nr_node_ids if not done yet */
223 if (nr_node_ids == MAX_NUMNODES) {
224 for_each_node_mask(node, node_possible_map)
226 nr_node_ids = num + 1;
229 /* allocate the map */
230 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
232 pr_debug(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n",
235 /* node_to_cpumask() will now work */
236 node_to_cpumask_map = map;
239 void __cpuinit numa_set_node(int cpu, int node)
241 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
243 if (cpu_pda(cpu) && node != NUMA_NO_NODE)
244 cpu_pda(cpu)->nodenumber = node;
247 cpu_to_node_map[cpu] = node;
249 else if (per_cpu_offset(cpu))
250 per_cpu(x86_cpu_to_node_map, cpu) = node;
253 pr_debug("Setting node for non-present cpu %d\n", cpu);
256 void __cpuinit numa_clear_node(int cpu)
258 numa_set_node(cpu, NUMA_NO_NODE);
261 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
263 void __cpuinit numa_add_cpu(int cpu)
265 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
268 void __cpuinit numa_remove_cpu(int cpu)
270 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
273 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
276 * --------- debug versions of the numa functions ---------
278 static void __cpuinit numa_set_cpumask(int cpu, int enable)
280 int node = cpu_to_node(cpu);
284 if (node_to_cpumask_map == NULL) {
285 printk(KERN_ERR "node_to_cpumask_map NULL\n");
290 mask = &node_to_cpumask_map[node];
294 cpu_clear(cpu, *mask);
296 cpulist_scnprintf(buf, sizeof(buf), *mask);
297 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
298 enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
301 void __cpuinit numa_add_cpu(int cpu)
303 numa_set_cpumask(cpu, 1);
306 void __cpuinit numa_remove_cpu(int cpu)
308 numa_set_cpumask(cpu, 0);
311 int cpu_to_node(int cpu)
313 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
315 "cpu_to_node(%d): usage too early!\n", cpu);
317 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
319 return per_cpu(x86_cpu_to_node_map, cpu);
321 EXPORT_SYMBOL(cpu_to_node);
324 * Same function as cpu_to_node() but used if called before the
325 * per_cpu areas are setup.
327 int early_cpu_to_node(int cpu)
329 if (early_per_cpu_ptr(x86_cpu_to_node_map))
330 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
332 if (!per_cpu_offset(cpu)) {
334 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
338 return per_cpu(x86_cpu_to_node_map, cpu);
343 static const cpumask_t cpu_mask_none;
346 * Returns a pointer to the bitmask of CPUs on Node 'node'.
348 const cpumask_t *_node_to_cpumask_ptr(int node)
350 if (node_to_cpumask_map == NULL) {
352 "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
355 return (const cpumask_t *)&cpu_online_map;
357 if (node >= nr_node_ids) {
359 "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n",
362 return &cpu_mask_none;
364 return &node_to_cpumask_map[node];
366 EXPORT_SYMBOL(_node_to_cpumask_ptr);
369 * Returns a bitmask of CPUs on Node 'node'.
371 * Side note: this function creates the returned cpumask on the stack
372 * so with a high NR_CPUS count, excessive stack space is used. The
373 * node_to_cpumask_ptr function should be used whenever possible.
375 cpumask_t node_to_cpumask(int node)
377 if (node_to_cpumask_map == NULL) {
379 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
381 return cpu_online_map;
383 if (node >= nr_node_ids) {
385 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
388 return cpu_mask_none;
390 return node_to_cpumask_map[node];
392 EXPORT_SYMBOL(node_to_cpumask);
395 * --------- end of debug versions of the numa functions ---------
398 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
400 #endif /* X86_64_NUMA */