2 * X86-64 specific CPU setup.
3 * Copyright (C) 1995 Linus Torvalds
4 * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
5 * See setup.c for older changelog.
7 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/string.h>
11 #include <linux/bootmem.h>
12 #include <linux/bitops.h>
13 #include <linux/module.h>
14 #include <linux/kgdb.h>
16 #include <asm/pgtable.h>
17 #include <asm/processor.h>
19 #include <asm/atomic.h>
20 #include <asm/mmu_context.h>
23 #include <asm/percpu.h>
24 #include <asm/proto.h>
25 #include <asm/sections.h>
26 #include <asm/setup.h>
28 #ifndef CONFIG_DEBUG_BOOT_PARAMS
29 struct boot_params __initdata boot_params;
31 struct boot_params boot_params;
34 cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
36 struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
37 EXPORT_SYMBOL(_cpu_pda);
38 struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
40 struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
42 char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
44 unsigned long __supported_pte_mask __read_mostly = ~0UL;
45 EXPORT_SYMBOL_GPL(__supported_pte_mask);
47 static int do_not_nx __cpuinitdata = 0;
50 Control non executable mappings for 64bit processes.
55 static int __init nonx_setup(char *str)
59 if (!strncmp(str, "on", 2)) {
60 __supported_pte_mask |= _PAGE_NX;
62 } else if (!strncmp(str, "off", 3)) {
64 __supported_pte_mask &= ~_PAGE_NX;
68 early_param("noexec", nonx_setup);
70 int force_personality32 = 0;
73 Control non executable heap for 32bit processes.
74 To control the stack too use noexec=off
76 on PROT_READ does not imply PROT_EXEC for 32bit processes
77 off PROT_READ implies PROT_EXEC (default)
79 static int __init nonx32_setup(char *str)
81 if (!strcmp(str, "on"))
82 force_personality32 &= ~READ_IMPLIES_EXEC;
83 else if (!strcmp(str, "off"))
84 force_personality32 |= READ_IMPLIES_EXEC;
87 __setup("noexec32=", nonx32_setup);
90 * Copy data used in early init routines from the initial arrays to the
91 * per cpu data areas. These arrays then become expendable and the
92 * *_early_ptr's are zeroed indicating that the static arrays are gone.
94 static void __init setup_per_cpu_maps(void)
98 for_each_possible_cpu(cpu) {
100 if (per_cpu_offset(cpu)) {
102 per_cpu(x86_cpu_to_apicid, cpu) =
103 x86_cpu_to_apicid_init[cpu];
104 per_cpu(x86_bios_cpu_apicid, cpu) =
105 x86_bios_cpu_apicid_init[cpu];
107 per_cpu(x86_cpu_to_node_map, cpu) =
108 x86_cpu_to_node_map_init[cpu];
113 printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n",
118 /* indicate the early static arrays will soon be gone */
119 x86_cpu_to_apicid_early_ptr = NULL;
120 x86_bios_cpu_apicid_early_ptr = NULL;
122 x86_cpu_to_node_map_early_ptr = NULL;
128 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
129 * Always point %gs to its beginning
131 void __init setup_per_cpu_areas(void)
136 #ifdef CONFIG_HOTPLUG_CPU
137 prefill_possible_map();
140 /* Copy section for each CPU (we discard the original) */
141 size = PERCPU_ENOUGH_ROOM;
143 printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
144 for_each_cpu_mask (i, cpu_possible_map) {
146 #ifndef CONFIG_NEED_MULTIPLE_NODES
147 ptr = alloc_bootmem_pages(size);
149 int node = early_cpu_to_node(i);
151 if (!node_online(node) || !NODE_DATA(node))
152 ptr = alloc_bootmem_pages(size);
154 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
157 panic("Cannot allocate cpu data for CPU %d\n", i);
158 cpu_pda(i)->data_offset = ptr - __per_cpu_start;
159 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
162 /* setup percpu data maps early */
163 setup_per_cpu_maps();
166 void pda_init(int cpu)
168 struct x8664_pda *pda = cpu_pda(cpu);
170 /* Setup up data that may be needed in __get_free_pages early */
171 asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
172 /* Memory clobbers used to order PDA accessed */
174 wrmsrl(MSR_GS_BASE, pda);
177 pda->cpunumber = cpu;
180 (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
181 pda->active_mm = &init_mm;
185 /* others are initialized in smpboot.c */
186 pda->pcurrent = &init_task;
187 pda->irqstackptr = boot_cpu_stack;
189 pda->irqstackptr = (char *)
190 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
191 if (!pda->irqstackptr)
192 panic("cannot allocate irqstack for cpu %d", cpu);
196 pda->irqstackptr += IRQSTACKSIZE-64;
199 char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
200 __attribute__((section(".bss.page_aligned")));
202 extern asmlinkage void ignore_sysret(void);
204 /* May not be marked __init: used by software suspend */
205 void syscall_init(void)
208 * LSTAR and STAR live in a bit strange symbiosis.
209 * They both write to the same internal register. STAR allows to set CS/DS
210 * but only a 32bit target. LSTAR sets the 64bit rip.
212 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
213 wrmsrl(MSR_LSTAR, system_call);
214 wrmsrl(MSR_CSTAR, ignore_sysret);
216 #ifdef CONFIG_IA32_EMULATION
217 syscall32_cpu_init ();
220 /* Flags to clear on syscall */
221 wrmsrl(MSR_SYSCALL_MASK,
222 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
225 void __cpuinit check_efer(void)
229 rdmsrl(MSR_EFER, efer);
230 if (!(efer & EFER_NX) || do_not_nx) {
231 __supported_pte_mask &= ~_PAGE_NX;
235 unsigned long kernel_eflags;
238 * Copies of the original ist values from the tss are only accessed during
239 * debugging, no special alignment required.
241 DEFINE_PER_CPU(struct orig_ist, orig_ist);
244 * cpu_init() initializes state that is per-CPU. Some data is already
245 * initialized (naturally) in the bootstrap process, such as the GDT
246 * and IDT. We reload them nevertheless, this function acts as a
247 * 'CPU state barrier', nothing should get across.
248 * A lot of state is already set up in PDA init.
250 void __cpuinit cpu_init (void)
252 int cpu = stack_smp_processor_id();
253 struct tss_struct *t = &per_cpu(init_tss, cpu);
254 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
256 char *estacks = NULL;
257 struct task_struct *me;
260 /* CPU 0 is initialised in head64.c */
264 estacks = boot_exception_stacks;
268 if (cpu_test_and_set(cpu, cpu_initialized))
269 panic("CPU#%d already initialized!\n", cpu);
271 printk("Initializing CPU#%d\n", cpu);
273 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
276 * Initialize the per-CPU GDT with the boot GDT,
277 * and set up the GDT descriptor:
280 memcpy(get_cpu_gdt_table(cpu), cpu_gdt_table, GDT_SIZE);
282 cpu_gdt_descr[cpu].size = GDT_SIZE;
283 load_gdt((const struct desc_ptr *)&cpu_gdt_descr[cpu]);
284 load_idt((const struct desc_ptr *)&idt_descr);
286 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
289 wrmsrl(MSR_FS_BASE, 0);
290 wrmsrl(MSR_KERNEL_GS_BASE, 0);
296 * set up and load the per-CPU TSS
298 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
299 static const unsigned int order[N_EXCEPTION_STACKS] = {
300 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
301 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
304 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
306 panic("Cannot allocate exception stack %ld %d\n",
309 estacks += PAGE_SIZE << order[v];
310 orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks;
313 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
315 * <= is required because the CPU will access up to
316 * 8 bits beyond the end of the IO permission bitmap.
318 for (i = 0; i <= IO_BITMAP_LONGS; i++)
319 t->io_bitmap[i] = ~0UL;
321 atomic_inc(&init_mm.mm_count);
322 me->active_mm = &init_mm;
325 enter_lazy_tlb(&init_mm, me);
327 set_tss_desc(cpu, t);
329 load_LDT(&init_mm.context);
333 * If the kgdb is connected no debug regs should be altered. This
334 * is only applicable when KGDB and a KGDB I/O module are built
335 * into the kernel and you are using early debugging with
336 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
338 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
339 arch_kgdb_ops.correct_hw_break();
343 * Clear all 6 debug registers:
346 set_debugreg(0UL, 0);
347 set_debugreg(0UL, 1);
348 set_debugreg(0UL, 2);
349 set_debugreg(0UL, 3);
350 set_debugreg(0UL, 6);
351 set_debugreg(0UL, 7);
353 /* If the kgdb is connected no debug regs should be altered. */
359 raw_local_save_flags(kernel_eflags);