2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
19 #include <linux/stackprotector.h>
20 #include <linux/cpu.h>
21 #include <linux/errno.h>
22 #include <linux/sched.h>
24 #include <linux/kernel.h>
26 #include <linux/elfcore.h>
27 #include <linux/smp.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/interrupt.h>
31 #include <linux/utsname.h>
32 #include <linux/delay.h>
33 #include <linux/module.h>
34 #include <linux/ptrace.h>
35 #include <linux/random.h>
36 #include <linux/notifier.h>
37 #include <linux/kprobes.h>
38 #include <linux/kdebug.h>
39 #include <linux/tick.h>
40 #include <linux/prctl.h>
41 #include <linux/uaccess.h>
44 #include <asm/pgtable.h>
45 #include <asm/system.h>
46 #include <asm/processor.h>
48 #include <asm/mmu_context.h>
50 #include <asm/prctl.h>
52 #include <asm/proto.h>
55 #include <asm/syscalls.h>
57 asmlinkage extern void ret_from_fork(void);
59 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
61 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
63 void idle_notifier_register(struct notifier_block *n)
65 atomic_notifier_chain_register(&idle_notifier, n);
71 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
74 static void __exit_idle(void)
76 if (test_and_clear_bit_pda(0, isidle) == 0)
78 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
81 /* Called from interrupts to signify idle end */
84 /* idle loop has pid 0 */
91 static inline void play_dead(void)
98 * The idle thread. There's no useful work to be
99 * done, so just try to conserve power and have a
100 * low exit latency (ie sit in a loop waiting for
101 * somebody to say that they'd like to reschedule)
105 current_thread_info()->status |= TS_POLLING;
108 * If we're the non-boot CPU, nothing set the PDA stack
109 * canary up for us - and if we are the boot CPU we have
110 * a 0 stack canary. This is a good place for updating
111 * it, as we wont ever return from this function (so the
112 * invalid canaries already on the stack wont ever
115 boot_init_stack_canary();
117 /* endless idle loop with no priority at all */
119 tick_nohz_stop_sched_tick(1);
120 while (!need_resched()) {
124 if (cpu_is_offline(smp_processor_id()))
127 * Idle routines should keep interrupts disabled
128 * from here on, until they go to idle.
129 * Otherwise, idle callbacks can misfire.
133 /* Don't trace irqs off for idle */
134 stop_critical_timings();
136 start_critical_timings();
137 /* In many cases the interrupt that ended idle
138 has already called exit_idle. But some idle
139 loops can be woken up without interrupt. */
143 tick_nohz_restart_sched_tick();
144 preempt_enable_no_resched();
150 /* Prints also some state that isn't saved in the pt_regs */
151 void __show_regs(struct pt_regs *regs, int all)
153 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
154 unsigned long d0, d1, d2, d3, d6, d7;
155 unsigned int fsindex, gsindex;
156 unsigned int ds, cs, es;
160 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
161 current->pid, current->comm, print_tainted(),
162 init_utsname()->release,
163 (int)strcspn(init_utsname()->version, " "),
164 init_utsname()->version);
165 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
166 printk_address(regs->ip, 1);
167 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
168 regs->sp, regs->flags);
169 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
170 regs->ax, regs->bx, regs->cx);
171 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
172 regs->dx, regs->si, regs->di);
173 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
174 regs->bp, regs->r8, regs->r9);
175 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
176 regs->r10, regs->r11, regs->r12);
177 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
178 regs->r13, regs->r14, regs->r15);
180 asm("movl %%ds,%0" : "=r" (ds));
181 asm("movl %%cs,%0" : "=r" (cs));
182 asm("movl %%es,%0" : "=r" (es));
183 asm("movl %%fs,%0" : "=r" (fsindex));
184 asm("movl %%gs,%0" : "=r" (gsindex));
186 rdmsrl(MSR_FS_BASE, fs);
187 rdmsrl(MSR_GS_BASE, gs);
188 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
198 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
199 fs, fsindex, gs, gsindex, shadowgs);
200 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
202 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
208 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
212 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
215 void show_regs(struct pt_regs *regs)
217 printk(KERN_INFO "CPU %d:", smp_processor_id());
218 __show_regs(regs, 1);
219 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
223 * Free current thread data structures etc..
225 void exit_thread(void)
227 struct task_struct *me = current;
228 struct thread_struct *t = &me->thread;
230 if (me->thread.io_bitmap_ptr) {
231 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
233 kfree(t->io_bitmap_ptr);
234 t->io_bitmap_ptr = NULL;
235 clear_thread_flag(TIF_IO_BITMAP);
237 * Careful, clear this in the TSS too:
239 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
240 t->io_bitmap_max = 0;
244 /* Free any DS contexts that have not been properly released. */
245 if (unlikely(t->ds_ctx)) {
246 /* we clear debugctl to make sure DS is not used. */
247 update_debugctlmsr(0);
250 #endif /* CONFIG_X86_DS */
253 void flush_thread(void)
255 struct task_struct *tsk = current;
257 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
258 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
259 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
260 clear_tsk_thread_flag(tsk, TIF_IA32);
262 set_tsk_thread_flag(tsk, TIF_IA32);
263 current_thread_info()->status |= TS_COMPAT;
266 clear_tsk_thread_flag(tsk, TIF_DEBUG);
268 tsk->thread.debugreg0 = 0;
269 tsk->thread.debugreg1 = 0;
270 tsk->thread.debugreg2 = 0;
271 tsk->thread.debugreg3 = 0;
272 tsk->thread.debugreg6 = 0;
273 tsk->thread.debugreg7 = 0;
274 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
276 * Forget coprocessor state..
278 tsk->fpu_counter = 0;
283 void release_thread(struct task_struct *dead_task)
286 if (dead_task->mm->context.size) {
287 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
289 dead_task->mm->context.ldt,
290 dead_task->mm->context.size);
296 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
298 struct user_desc ud = {
305 struct desc_struct *desc = t->thread.tls_array;
310 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
312 return get_desc_base(&t->thread.tls_array[tls]);
316 * This gets called before we allocate a new thread and copy
317 * the current task into it.
319 void prepare_to_copy(struct task_struct *tsk)
324 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
325 unsigned long unused,
326 struct task_struct *p, struct pt_regs *regs)
329 struct pt_regs *childregs;
330 struct task_struct *me = current;
332 childregs = ((struct pt_regs *)
333 (THREAD_SIZE + task_stack_page(p))) - 1;
339 childregs->sp = (unsigned long)childregs;
341 p->thread.sp = (unsigned long) childregs;
342 p->thread.sp0 = (unsigned long) (childregs+1);
343 p->thread.usersp = me->thread.usersp;
345 set_tsk_thread_flag(p, TIF_FORK);
347 p->thread.fs = me->thread.fs;
348 p->thread.gs = me->thread.gs;
350 savesegment(gs, p->thread.gsindex);
351 savesegment(fs, p->thread.fsindex);
352 savesegment(es, p->thread.es);
353 savesegment(ds, p->thread.ds);
355 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
356 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
357 if (!p->thread.io_bitmap_ptr) {
358 p->thread.io_bitmap_max = 0;
361 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
363 set_tsk_thread_flag(p, TIF_IO_BITMAP);
367 * Set a new TLS for the child thread?
369 if (clone_flags & CLONE_SETTLS) {
370 #ifdef CONFIG_IA32_EMULATION
371 if (test_thread_flag(TIF_IA32))
372 err = do_set_thread_area(p, -1,
373 (struct user_desc __user *)childregs->si, 0);
376 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
382 if (err && p->thread.io_bitmap_ptr) {
383 kfree(p->thread.io_bitmap_ptr);
384 p->thread.io_bitmap_max = 0;
390 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
398 write_pda(oldrsp, new_sp);
399 regs->cs = __USER_CS;
400 regs->ss = __USER_DS;
404 * Free the old FP and other extended state
406 free_thread_xstate(current);
408 EXPORT_SYMBOL_GPL(start_thread);
410 static void hard_disable_TSC(void)
412 write_cr4(read_cr4() | X86_CR4_TSD);
415 void disable_TSC(void)
418 if (!test_and_set_thread_flag(TIF_NOTSC))
420 * Must flip the CPU state synchronously with
421 * TIF_NOTSC in the current running context.
427 static void hard_enable_TSC(void)
429 write_cr4(read_cr4() & ~X86_CR4_TSD);
432 static void enable_TSC(void)
435 if (test_and_clear_thread_flag(TIF_NOTSC))
437 * Must flip the CPU state synchronously with
438 * TIF_NOTSC in the current running context.
444 int get_tsc_mode(unsigned long adr)
448 if (test_thread_flag(TIF_NOTSC))
449 val = PR_TSC_SIGSEGV;
453 return put_user(val, (unsigned int __user *)adr);
456 int set_tsc_mode(unsigned int val)
458 if (val == PR_TSC_SIGSEGV)
460 else if (val == PR_TSC_ENABLE)
469 * This special macro can be used to load a debugging register
471 #define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
473 static inline void __switch_to_xtra(struct task_struct *prev_p,
474 struct task_struct *next_p,
475 struct tss_struct *tss)
477 struct thread_struct *prev, *next;
478 unsigned long debugctl;
480 prev = &prev_p->thread,
481 next = &next_p->thread;
483 debugctl = prev->debugctlmsr;
487 unsigned long ds_prev = 0, ds_next = 0;
490 ds_prev = (unsigned long)prev->ds_ctx->ds;
492 ds_next = (unsigned long)next->ds_ctx->ds;
494 if (ds_next != ds_prev) {
496 * We clear debugctl to make sure DS
497 * is not in use when we change it:
500 update_debugctlmsr(0);
501 wrmsrl(MSR_IA32_DS_AREA, ds_next);
504 #endif /* CONFIG_X86_DS */
506 if (next->debugctlmsr != debugctl)
507 update_debugctlmsr(next->debugctlmsr);
509 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
519 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
520 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
521 /* prev and next are different */
522 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
528 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
530 * Copy the relevant range of the IO bitmap.
531 * Normally this is 128 bytes or less:
533 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
534 max(prev->io_bitmap_max, next->io_bitmap_max));
535 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
537 * Clear any possible leftover bits:
539 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
542 #ifdef CONFIG_X86_PTRACE_BTS
543 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
544 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
546 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
547 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
548 #endif /* CONFIG_X86_PTRACE_BTS */
552 * switch_to(x,y) should switch tasks from x to y.
554 * This could still be optimized:
555 * - fold all the options into a flag word and test it with a single test.
556 * - could test fs/gs bitsliced
558 * Kprobes not supported here. Set the probe on schedule instead.
561 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
563 struct thread_struct *prev = &prev_p->thread;
564 struct thread_struct *next = &next_p->thread;
565 int cpu = smp_processor_id();
566 struct tss_struct *tss = &per_cpu(init_tss, cpu);
567 unsigned fsindex, gsindex;
569 /* we're going to use this soon, after a few expensive things */
570 if (next_p->fpu_counter > 5)
571 prefetch(next->xstate);
574 * Reload esp0, LDT and the page table pointer:
580 * This won't pick up thread selector changes, but I guess that is ok.
582 savesegment(es, prev->es);
583 if (unlikely(next->es | prev->es))
584 loadsegment(es, next->es);
586 savesegment(ds, prev->ds);
587 if (unlikely(next->ds | prev->ds))
588 loadsegment(ds, next->ds);
591 /* We must save %fs and %gs before load_TLS() because
592 * %fs and %gs may be cleared by load_TLS().
594 * (e.g. xen_load_tls())
596 savesegment(fs, fsindex);
597 savesegment(gs, gsindex);
602 * Leave lazy mode, flushing any hypercalls made here.
603 * This must be done before restoring TLS segments so
604 * the GDT and LDT are properly updated, and must be
605 * done before math_state_restore, so the TS bit is up
608 arch_leave_lazy_cpu_mode();
613 * Segment register != 0 always requires a reload. Also
614 * reload when it has changed. When prev process used 64bit
615 * base always reload to avoid an information leak.
617 if (unlikely(fsindex | next->fsindex | prev->fs)) {
618 loadsegment(fs, next->fsindex);
620 * Check if the user used a selector != 0; if yes
621 * clear 64bit base, since overloaded base is always
622 * mapped to the Null selector
627 /* when next process has a 64bit base use it */
629 wrmsrl(MSR_FS_BASE, next->fs);
630 prev->fsindex = fsindex;
632 if (unlikely(gsindex | next->gsindex | prev->gs)) {
633 load_gs_index(next->gsindex);
638 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
639 prev->gsindex = gsindex;
641 /* Must be after DS reload */
645 * Switch the PDA and FPU contexts.
647 prev->usersp = read_pda(oldrsp);
648 write_pda(oldrsp, next->usersp);
649 write_pda(pcurrent, next_p);
651 write_pda(kernelstack,
652 (unsigned long)task_stack_page(next_p) +
653 THREAD_SIZE - PDA_STACKOFFSET);
654 #ifdef CONFIG_CC_STACKPROTECTOR
656 * Build time only check to make sure the stack_canary is at
657 * offset 40 in the pda; this is a gcc ABI requirement
659 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
663 * Now maybe reload the debug registers and handle I/O bitmaps
665 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
666 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
667 __switch_to_xtra(prev_p, next_p, tss);
669 /* If the task has used fpu the last 5 timeslices, just do a full
670 * restore of the math state immediately to avoid the trap; the
671 * chances of needing FPU soon are obviously high now
673 * tsk_used_math() checks prevent calling math_state_restore(),
674 * which can sleep in the case of !tsk_used_math()
676 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
677 math_state_restore();
682 * sys_execve() executes a new program.
685 long sys_execve(char __user *name, char __user * __user *argv,
686 char __user * __user *envp, struct pt_regs *regs)
691 filename = getname(name);
692 error = PTR_ERR(filename);
693 if (IS_ERR(filename))
695 error = do_execve(filename, argv, envp, regs);
700 void set_personality_64bit(void)
702 /* inherit personality from parent */
704 /* Make sure to be in 64bit mode */
705 clear_thread_flag(TIF_IA32);
707 /* TBD: overwrites user setup. Should have two bits.
708 But 64bit processes have always behaved this way,
709 so it's not too bad. The main problem is just that
710 32bit childs are affected again. */
711 current->personality &= ~READ_IMPLIES_EXEC;
714 asmlinkage long sys_fork(struct pt_regs *regs)
716 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
720 sys_clone(unsigned long clone_flags, unsigned long newsp,
721 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
725 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
729 * This is trivial, and on the face of it looks like it
730 * could equally well be done in user mode.
732 * Not so, for quite unobvious reasons - register pressure.
733 * In user mode vfork() cannot have a stack frame, and if
734 * done by calling the "clone()" system call directly, you
735 * do not have enough call-clobbered registers to hold all
736 * the information you need.
738 asmlinkage long sys_vfork(struct pt_regs *regs)
740 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
744 unsigned long get_wchan(struct task_struct *p)
750 if (!p || p == current || p->state == TASK_RUNNING)
752 stack = (unsigned long)task_stack_page(p);
753 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
755 fp = *(u64 *)(p->thread.sp);
757 if (fp < (unsigned long)stack ||
758 fp >= (unsigned long)stack+THREAD_SIZE)
761 if (!in_sched_functions(ip))
764 } while (count++ < 16);
768 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
771 int doit = task == current;
776 if (addr >= TASK_SIZE_OF(task))
779 /* handle small bases via the GDT because that's faster to
781 if (addr <= 0xffffffff) {
782 set_32bit_tls(task, GS_TLS, addr);
784 load_TLS(&task->thread, cpu);
785 load_gs_index(GS_TLS_SEL);
787 task->thread.gsindex = GS_TLS_SEL;
790 task->thread.gsindex = 0;
791 task->thread.gs = addr;
794 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
800 /* Not strictly needed for fs, but do it for symmetry
802 if (addr >= TASK_SIZE_OF(task))
805 /* handle small bases via the GDT because that's faster to
807 if (addr <= 0xffffffff) {
808 set_32bit_tls(task, FS_TLS, addr);
810 load_TLS(&task->thread, cpu);
811 loadsegment(fs, FS_TLS_SEL);
813 task->thread.fsindex = FS_TLS_SEL;
816 task->thread.fsindex = 0;
817 task->thread.fs = addr;
819 /* set the selector to 0 to not confuse
822 ret = checking_wrmsrl(MSR_FS_BASE, addr);
829 if (task->thread.fsindex == FS_TLS_SEL)
830 base = read_32bit_tls(task, FS_TLS);
832 rdmsrl(MSR_FS_BASE, base);
834 base = task->thread.fs;
835 ret = put_user(base, (unsigned long __user *)addr);
841 if (task->thread.gsindex == GS_TLS_SEL)
842 base = read_32bit_tls(task, GS_TLS);
844 savesegment(gs, gsindex);
846 rdmsrl(MSR_KERNEL_GS_BASE, base);
848 base = task->thread.gs;
850 base = task->thread.gs;
851 ret = put_user(base, (unsigned long __user *)addr);
863 long sys_arch_prctl(int code, unsigned long addr)
865 return do_arch_prctl(current, code, addr);
868 unsigned long arch_align_stack(unsigned long sp)
870 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
871 sp -= get_random_int() % 8192;
875 unsigned long arch_randomize_brk(struct mm_struct *mm)
877 unsigned long range_end = mm->brk + 0x02000000;
878 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;