2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/init.h>
29 #include <linux/prctl.h>
30 #include <linux/init_task.h>
31 #include <linux/module.h>
32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h>
35 #include <linux/utsname.h>
37 #include <asm/pgtable.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
41 #include <asm/processor.h>
44 #include <asm/machdep.h>
46 #include <asm/syscalls.h>
48 #include <asm/firmware.h>
51 extern unsigned long _get_SP(void);
54 struct task_struct *last_task_used_math = NULL;
55 struct task_struct *last_task_used_altivec = NULL;
56 struct task_struct *last_task_used_vsx = NULL;
57 struct task_struct *last_task_used_spe = NULL;
61 * Make sure the floating-point register state in the
62 * the thread_struct is up to date for task tsk.
64 void flush_fp_to_thread(struct task_struct *tsk)
66 if (tsk->thread.regs) {
68 * We need to disable preemption here because if we didn't,
69 * another process could get scheduled after the regs->msr
70 * test but before we have finished saving the FP registers
71 * to the thread_struct. That process could take over the
72 * FPU, and then when we get scheduled again we would store
73 * bogus values for the remaining FP registers.
76 if (tsk->thread.regs->msr & MSR_FP) {
79 * This should only ever be called for current or
80 * for a stopped child process. Since we save away
81 * the FP register state on context switch on SMP,
82 * there is something wrong if a stopped child appears
83 * to still have its FP state in the CPU registers.
85 BUG_ON(tsk != current);
93 void enable_kernel_fp(void)
95 WARN_ON(preemptible());
98 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
101 giveup_fpu(NULL); /* just enables FP for kernel */
103 giveup_fpu(last_task_used_math);
104 #endif /* CONFIG_SMP */
106 EXPORT_SYMBOL(enable_kernel_fp);
108 int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
115 if (!tsk->thread.regs)
117 flush_fp_to_thread(current);
120 reg = (elf_fpreg_t *)fpregs;
121 for (i = 0; i < ELF_NFPREG - 1; i++, reg++)
122 *reg = tsk->thread.TS_FPR(i);
123 memcpy(reg, &tsk->thread.fpscr, sizeof(elf_fpreg_t));
125 memcpy(fpregs, &tsk->thread.TS_FPR(0), sizeof(*fpregs));
131 #ifdef CONFIG_ALTIVEC
132 void enable_kernel_altivec(void)
134 WARN_ON(preemptible());
137 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
138 giveup_altivec(current);
140 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
142 giveup_altivec(last_task_used_altivec);
143 #endif /* CONFIG_SMP */
145 EXPORT_SYMBOL(enable_kernel_altivec);
148 * Make sure the VMX/Altivec register state in the
149 * the thread_struct is up to date for task tsk.
151 void flush_altivec_to_thread(struct task_struct *tsk)
153 if (tsk->thread.regs) {
155 if (tsk->thread.regs->msr & MSR_VEC) {
157 BUG_ON(tsk != current);
165 int dump_task_altivec(struct task_struct *tsk, elf_vrreg_t *vrregs)
167 /* ELF_NVRREG includes the VSCR and VRSAVE which we need to save
168 * separately, see below */
169 const int nregs = ELF_NVRREG - 2;
174 flush_altivec_to_thread(tsk);
176 reg = (elf_vrreg_t *)vrregs;
178 /* copy the 32 vr registers */
179 memcpy(reg, &tsk->thread.vr[0], nregs * sizeof(*reg));
183 memcpy(reg, &tsk->thread.vscr, sizeof(*reg));
186 /* vrsave is stored in the high 32bit slot of the final 128bits */
187 memset(reg, 0, sizeof(*reg));
189 *dest = tsk->thread.vrsave;
193 #endif /* CONFIG_ALTIVEC */
197 /* not currently used, but some crazy RAID module might want to later */
198 void enable_kernel_vsx(void)
200 WARN_ON(preemptible());
203 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
206 giveup_vsx(NULL); /* just enable vsx for kernel - force */
208 giveup_vsx(last_task_used_vsx);
209 #endif /* CONFIG_SMP */
211 EXPORT_SYMBOL(enable_kernel_vsx);
214 void flush_vsx_to_thread(struct task_struct *tsk)
216 if (tsk->thread.regs) {
218 if (tsk->thread.regs->msr & MSR_VSX) {
220 BUG_ON(tsk != current);
229 * This dumps the lower half 64bits of the first 32 VSX registers.
230 * This needs to be called with dump_task_fp and dump_task_altivec to
231 * get all the VSX state.
233 int dump_task_vsx(struct task_struct *tsk, elf_vrreg_t *vrregs)
240 flush_vsx_to_thread(tsk);
242 reg = (elf_vrreg_t *)vrregs;
244 for (i = 0; i < 32 ; i++)
245 buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
246 memcpy(reg, buf, sizeof(buf));
250 #endif /* CONFIG_VSX */
252 int dump_task_vector(struct task_struct *tsk, elf_vrregset_t *vrregs)
255 elf_vrreg_t *regs = (elf_vrreg_t *)vrregs;
256 #ifdef CONFIG_ALTIVEC
257 rc = dump_task_altivec(tsk, regs);
264 rc = dump_task_vsx(tsk, regs);
271 void enable_kernel_spe(void)
273 WARN_ON(preemptible());
276 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
279 giveup_spe(NULL); /* just enable SPE for kernel - force */
281 giveup_spe(last_task_used_spe);
282 #endif /* __SMP __ */
284 EXPORT_SYMBOL(enable_kernel_spe);
286 void flush_spe_to_thread(struct task_struct *tsk)
288 if (tsk->thread.regs) {
290 if (tsk->thread.regs->msr & MSR_SPE) {
292 BUG_ON(tsk != current);
300 int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
302 flush_spe_to_thread(current);
303 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
304 memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35);
307 #endif /* CONFIG_SPE */
311 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
312 * and the current task has some state, discard it.
314 void discard_lazy_cpu_state(void)
317 if (last_task_used_math == current)
318 last_task_used_math = NULL;
319 #ifdef CONFIG_ALTIVEC
320 if (last_task_used_altivec == current)
321 last_task_used_altivec = NULL;
322 #endif /* CONFIG_ALTIVEC */
324 if (last_task_used_vsx == current)
325 last_task_used_vsx = NULL;
326 #endif /* CONFIG_VSX */
328 if (last_task_used_spe == current)
329 last_task_used_spe = NULL;
333 #endif /* CONFIG_SMP */
335 static DEFINE_PER_CPU(unsigned long, current_dabr);
337 int set_dabr(unsigned long dabr)
339 __get_cpu_var(current_dabr) = dabr;
341 #ifdef CONFIG_PPC_MERGE /* XXX for now */
343 return ppc_md.set_dabr(dabr);
346 /* XXX should we have a CPU_FTR_HAS_DABR ? */
347 #if defined(CONFIG_PPC64) || defined(CONFIG_6xx)
348 mtspr(SPRN_DABR, dabr);
354 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
357 struct task_struct *__switch_to(struct task_struct *prev,
358 struct task_struct *new)
360 struct thread_struct *new_thread, *old_thread;
362 struct task_struct *last;
365 /* avoid complexity of lazy save/restore of fpu
366 * by just saving it every time we switch out if
367 * this task used the fpu during the last quantum.
369 * If it tries to use the fpu again, it'll trap and
370 * reload its fp regs. So we don't have to do a restore
371 * every switch, just a save.
374 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
376 #ifdef CONFIG_ALTIVEC
378 * If the previous thread used altivec in the last quantum
379 * (thus changing altivec regs) then save them.
380 * We used to check the VRSAVE register but not all apps
381 * set it, so we don't rely on it now (and in fact we need
382 * to save & restore VSCR even if VRSAVE == 0). -- paulus
384 * On SMP we always save/restore altivec regs just to avoid the
385 * complexity of changing processors.
388 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
389 giveup_altivec(prev);
390 #endif /* CONFIG_ALTIVEC */
392 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
394 #endif /* CONFIG_VSX */
397 * If the previous thread used spe in the last quantum
398 * (thus changing spe regs) then save them.
400 * On SMP we always save/restore spe regs just to avoid the
401 * complexity of changing processors.
403 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
405 #endif /* CONFIG_SPE */
407 #else /* CONFIG_SMP */
408 #ifdef CONFIG_ALTIVEC
409 /* Avoid the trap. On smp this this never happens since
410 * we don't set last_task_used_altivec -- Cort
412 if (new->thread.regs && last_task_used_altivec == new)
413 new->thread.regs->msr |= MSR_VEC;
414 #endif /* CONFIG_ALTIVEC */
416 if (new->thread.regs && last_task_used_vsx == new)
417 new->thread.regs->msr |= MSR_VSX;
418 #endif /* CONFIG_VSX */
420 /* Avoid the trap. On smp this this never happens since
421 * we don't set last_task_used_spe
423 if (new->thread.regs && last_task_used_spe == new)
424 new->thread.regs->msr |= MSR_SPE;
425 #endif /* CONFIG_SPE */
427 #endif /* CONFIG_SMP */
429 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
430 set_dabr(new->thread.dabr);
432 new_thread = &new->thread;
433 old_thread = ¤t->thread;
437 * Collect processor utilization data per process
439 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
440 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
441 long unsigned start_tb, current_tb;
442 start_tb = old_thread->start_tb;
443 cu->current_tb = current_tb = mfspr(SPRN_PURR);
444 old_thread->accum_tb += (current_tb - start_tb);
445 new_thread->start_tb = current_tb;
449 local_irq_save(flags);
451 account_system_vtime(current);
452 account_process_vtime(current);
453 calculate_steal_time();
456 * We can't take a PMU exception inside _switch() since there is a
457 * window where the kernel stack SLB and the kernel stack are out
458 * of sync. Hard disable here.
461 last = _switch(old_thread, new_thread);
463 local_irq_restore(flags);
468 static int instructions_to_print = 16;
470 static void show_instructions(struct pt_regs *regs)
473 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
476 printk("Instruction dump:");
478 for (i = 0; i < instructions_to_print; i++) {
484 #if !defined(CONFIG_BOOKE)
485 /* If executing with the IMMU off, adjust pc rather
486 * than print XXXXXXXX.
488 if (!(regs->msr & MSR_IR))
489 pc = (unsigned long)phys_to_virt(pc);
492 /* We use __get_user here *only* to avoid an OOPS on a
493 * bad address because the pc *should* only be a
496 if (!__kernel_text_address(pc) ||
497 __get_user(instr, (unsigned int __user *)pc)) {
501 printk("<%08x> ", instr);
503 printk("%08x ", instr);
512 static struct regbit {
527 static void printbits(unsigned long val, struct regbit *bits)
529 const char *sep = "";
532 for (; bits->bit; ++bits)
533 if (val & bits->bit) {
534 printk("%s%s", sep, bits->name);
542 #define REGS_PER_LINE 4
543 #define LAST_VOLATILE 13
546 #define REGS_PER_LINE 8
547 #define LAST_VOLATILE 12
550 void show_regs(struct pt_regs * regs)
554 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
555 regs->nip, regs->link, regs->ctr);
556 printk("REGS: %p TRAP: %04lx %s (%s)\n",
557 regs, regs->trap, print_tainted(), init_utsname()->release);
558 printk("MSR: "REG" ", regs->msr);
559 printbits(regs->msr, msr_bits);
560 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
562 if (trap == 0x300 || trap == 0x600)
563 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
564 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
566 printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
568 printk("TASK = %p[%d] '%s' THREAD: %p",
569 current, task_pid_nr(current), current->comm, task_thread_info(current));
572 printk(" CPU: %d", raw_smp_processor_id());
573 #endif /* CONFIG_SMP */
575 for (i = 0; i < 32; i++) {
576 if ((i % REGS_PER_LINE) == 0)
577 printk("\n" KERN_INFO "GPR%02d: ", i);
578 printk(REG " ", regs->gpr[i]);
579 if (i == LAST_VOLATILE && !FULL_REGS(regs))
583 #ifdef CONFIG_KALLSYMS
585 * Lookup NIP late so we have the best change of getting the
586 * above info out without failing
588 printk("NIP ["REG"] ", regs->nip);
589 print_symbol("%s\n", regs->nip);
590 printk("LR ["REG"] ", regs->link);
591 print_symbol("%s\n", regs->link);
593 show_stack(current, (unsigned long *) regs->gpr[1]);
594 if (!user_mode(regs))
595 show_instructions(regs);
598 void exit_thread(void)
600 discard_lazy_cpu_state();
603 void flush_thread(void)
606 struct thread_info *t = current_thread_info();
608 if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
609 clear_ti_thread_flag(t, TIF_ABI_PENDING);
610 if (test_ti_thread_flag(t, TIF_32BIT))
611 clear_ti_thread_flag(t, TIF_32BIT);
613 set_ti_thread_flag(t, TIF_32BIT);
617 discard_lazy_cpu_state();
619 if (current->thread.dabr) {
620 current->thread.dabr = 0;
626 release_thread(struct task_struct *t)
631 * This gets called before we allocate a new thread and copy
632 * the current task into it.
634 void prepare_to_copy(struct task_struct *tsk)
636 flush_fp_to_thread(current);
637 flush_altivec_to_thread(current);
638 flush_vsx_to_thread(current);
639 flush_spe_to_thread(current);
645 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
646 unsigned long unused, struct task_struct *p,
647 struct pt_regs *regs)
649 struct pt_regs *childregs, *kregs;
650 extern void ret_from_fork(void);
651 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
653 CHECK_FULL_REGS(regs);
655 sp -= sizeof(struct pt_regs);
656 childregs = (struct pt_regs *) sp;
658 if ((childregs->msr & MSR_PR) == 0) {
659 /* for kernel thread, set `current' and stackptr in new task */
660 childregs->gpr[1] = sp + sizeof(struct pt_regs);
662 childregs->gpr[2] = (unsigned long) p;
664 clear_tsk_thread_flag(p, TIF_32BIT);
666 p->thread.regs = NULL; /* no user register state */
668 childregs->gpr[1] = usp;
669 p->thread.regs = childregs;
670 if (clone_flags & CLONE_SETTLS) {
672 if (!test_thread_flag(TIF_32BIT))
673 childregs->gpr[13] = childregs->gpr[6];
676 childregs->gpr[2] = childregs->gpr[6];
679 childregs->gpr[3] = 0; /* Result from fork() */
680 sp -= STACK_FRAME_OVERHEAD;
683 * The way this works is that at some point in the future
684 * some task will call _switch to switch to the new task.
685 * That will pop off the stack frame created below and start
686 * the new task running at ret_from_fork. The new task will
687 * do some house keeping and then return from the fork or clone
688 * system call, using the stack frame created above.
690 sp -= sizeof(struct pt_regs);
691 kregs = (struct pt_regs *) sp;
692 sp -= STACK_FRAME_OVERHEAD;
694 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
695 _ALIGN_UP(sizeof(struct thread_info), 16);
698 if (cpu_has_feature(CPU_FTR_SLB)) {
699 unsigned long sp_vsid;
700 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
702 if (cpu_has_feature(CPU_FTR_1T_SEGMENT))
703 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
704 << SLB_VSID_SHIFT_1T;
706 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
708 sp_vsid |= SLB_VSID_KERNEL | llp;
709 p->thread.ksp_vsid = sp_vsid;
713 * The PPC64 ABI makes use of a TOC to contain function
714 * pointers. The function (ret_from_except) is actually a pointer
715 * to the TOC entry. The first entry is a pointer to the actual
718 kregs->nip = *((unsigned long *)ret_from_fork);
720 kregs->nip = (unsigned long)ret_from_fork;
727 * Set up a thread for executing a new program
729 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
732 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
738 * If we exec out of a kernel thread then thread.regs will not be
741 if (!current->thread.regs) {
742 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
743 current->thread.regs = regs - 1;
746 memset(regs->gpr, 0, sizeof(regs->gpr));
754 * We have just cleared all the nonvolatile GPRs, so make
755 * FULL_REGS(regs) return true. This is necessary to allow
756 * ptrace to examine the thread immediately after exec.
763 regs->msr = MSR_USER;
765 if (!test_thread_flag(TIF_32BIT)) {
766 unsigned long entry, toc;
768 /* start is a relocated pointer to the function descriptor for
769 * the elf _start routine. The first entry in the function
770 * descriptor is the entry address of _start and the second
771 * entry is the TOC value we need to use.
773 __get_user(entry, (unsigned long __user *)start);
774 __get_user(toc, (unsigned long __user *)start+1);
776 /* Check whether the e_entry function descriptor entries
777 * need to be relocated before we can use them.
779 if (load_addr != 0) {
785 regs->msr = MSR_USER64;
789 regs->msr = MSR_USER32;
793 discard_lazy_cpu_state();
795 current->thread.used_vsr = 0;
797 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
798 current->thread.fpscr.val = 0;
799 #ifdef CONFIG_ALTIVEC
800 memset(current->thread.vr, 0, sizeof(current->thread.vr));
801 memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr));
802 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
803 current->thread.vrsave = 0;
804 current->thread.used_vr = 0;
805 #endif /* CONFIG_ALTIVEC */
807 memset(current->thread.evr, 0, sizeof(current->thread.evr));
808 current->thread.acc = 0;
809 current->thread.spefscr = 0;
810 current->thread.used_spe = 0;
811 #endif /* CONFIG_SPE */
814 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
815 | PR_FP_EXC_RES | PR_FP_EXC_INV)
817 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
819 struct pt_regs *regs = tsk->thread.regs;
821 /* This is a bit hairy. If we are an SPE enabled processor
822 * (have embedded fp) we store the IEEE exception enable flags in
823 * fpexc_mode. fpexc_mode is also used for setting FP exception
824 * mode (asyn, precise, disabled) for 'Classic' FP. */
825 if (val & PR_FP_EXC_SW_ENABLE) {
827 if (cpu_has_feature(CPU_FTR_SPE)) {
828 tsk->thread.fpexc_mode = val &
829 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
839 /* on a CONFIG_SPE this does not hurt us. The bits that
840 * __pack_fe01 use do not overlap with bits used for
841 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
842 * on CONFIG_SPE implementations are reserved so writing to
843 * them does not change anything */
844 if (val > PR_FP_EXC_PRECISE)
846 tsk->thread.fpexc_mode = __pack_fe01(val);
847 if (regs != NULL && (regs->msr & MSR_FP) != 0)
848 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
849 | tsk->thread.fpexc_mode;
853 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
857 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
859 if (cpu_has_feature(CPU_FTR_SPE))
860 val = tsk->thread.fpexc_mode;
867 val = __unpack_fe01(tsk->thread.fpexc_mode);
868 return put_user(val, (unsigned int __user *) adr);
871 int set_endian(struct task_struct *tsk, unsigned int val)
873 struct pt_regs *regs = tsk->thread.regs;
875 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
876 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
882 if (val == PR_ENDIAN_BIG)
883 regs->msr &= ~MSR_LE;
884 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
892 int get_endian(struct task_struct *tsk, unsigned long adr)
894 struct pt_regs *regs = tsk->thread.regs;
897 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
898 !cpu_has_feature(CPU_FTR_REAL_LE))
904 if (regs->msr & MSR_LE) {
905 if (cpu_has_feature(CPU_FTR_REAL_LE))
906 val = PR_ENDIAN_LITTLE;
908 val = PR_ENDIAN_PPC_LITTLE;
912 return put_user(val, (unsigned int __user *)adr);
915 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
917 tsk->thread.align_ctl = val;
921 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
923 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
926 #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
928 int sys_clone(unsigned long clone_flags, unsigned long usp,
929 int __user *parent_tidp, void __user *child_threadptr,
930 int __user *child_tidp, int p6,
931 struct pt_regs *regs)
933 CHECK_FULL_REGS(regs);
935 usp = regs->gpr[1]; /* stack pointer for child */
937 if (test_thread_flag(TIF_32BIT)) {
938 parent_tidp = TRUNC_PTR(parent_tidp);
939 child_tidp = TRUNC_PTR(child_tidp);
942 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
945 int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
946 unsigned long p4, unsigned long p5, unsigned long p6,
947 struct pt_regs *regs)
949 CHECK_FULL_REGS(regs);
950 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
953 int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
954 unsigned long p4, unsigned long p5, unsigned long p6,
955 struct pt_regs *regs)
957 CHECK_FULL_REGS(regs);
958 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
959 regs, 0, NULL, NULL);
962 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
963 unsigned long a3, unsigned long a4, unsigned long a5,
964 struct pt_regs *regs)
969 filename = getname((char __user *) a0);
970 error = PTR_ERR(filename);
971 if (IS_ERR(filename))
973 flush_fp_to_thread(current);
974 flush_altivec_to_thread(current);
975 flush_spe_to_thread(current);
976 error = do_execve(filename, (char __user * __user *) a1,
977 (char __user * __user *) a2, regs);
983 #ifdef CONFIG_IRQSTACKS
984 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
985 unsigned long nbytes)
987 unsigned long stack_page;
988 unsigned long cpu = task_cpu(p);
991 * Avoid crashing if the stack has overflowed and corrupted
992 * task_cpu(p), which is in the thread_info struct.
994 if (cpu < NR_CPUS && cpu_possible(cpu)) {
995 stack_page = (unsigned long) hardirq_ctx[cpu];
996 if (sp >= stack_page + sizeof(struct thread_struct)
997 && sp <= stack_page + THREAD_SIZE - nbytes)
1000 stack_page = (unsigned long) softirq_ctx[cpu];
1001 if (sp >= stack_page + sizeof(struct thread_struct)
1002 && sp <= stack_page + THREAD_SIZE - nbytes)
1009 #define valid_irq_stack(sp, p, nb) 0
1010 #endif /* CONFIG_IRQSTACKS */
1012 int validate_sp(unsigned long sp, struct task_struct *p,
1013 unsigned long nbytes)
1015 unsigned long stack_page = (unsigned long)task_stack_page(p);
1017 if (sp >= stack_page + sizeof(struct thread_struct)
1018 && sp <= stack_page + THREAD_SIZE - nbytes)
1021 return valid_irq_stack(sp, p, nbytes);
1024 EXPORT_SYMBOL(validate_sp);
1026 unsigned long get_wchan(struct task_struct *p)
1028 unsigned long ip, sp;
1031 if (!p || p == current || p->state == TASK_RUNNING)
1035 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1039 sp = *(unsigned long *)sp;
1040 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1043 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1044 if (!in_sched_functions(ip))
1047 } while (count++ < 16);
1051 static int kstack_depth_to_print = 64;
1053 void show_stack(struct task_struct *tsk, unsigned long *stack)
1055 unsigned long sp, ip, lr, newsp;
1059 sp = (unsigned long) stack;
1064 asm("mr %0,1" : "=r" (sp));
1066 sp = tsk->thread.ksp;
1070 printk("Call Trace:\n");
1072 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1075 stack = (unsigned long *) sp;
1077 ip = stack[STACK_FRAME_LR_SAVE];
1078 if (!firstframe || ip != lr) {
1079 printk("["REG"] ["REG"] ", sp, ip);
1080 print_symbol("%s", ip);
1082 printk(" (unreliable)");
1088 * See if this is an exception frame.
1089 * We look for the "regshere" marker in the current frame.
1091 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1092 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1093 struct pt_regs *regs = (struct pt_regs *)
1094 (sp + STACK_FRAME_OVERHEAD);
1095 printk("--- Exception: %lx", regs->trap);
1096 print_symbol(" at %s\n", regs->nip);
1098 print_symbol(" LR = %s\n", lr);
1103 } while (count++ < kstack_depth_to_print);
1106 void dump_stack(void)
1108 show_stack(current, NULL);
1110 EXPORT_SYMBOL(dump_stack);
1113 void ppc64_runlatch_on(void)
1117 if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) {
1120 ctrl = mfspr(SPRN_CTRLF);
1121 ctrl |= CTRL_RUNLATCH;
1122 mtspr(SPRN_CTRLT, ctrl);
1124 set_thread_flag(TIF_RUNLATCH);
1128 void ppc64_runlatch_off(void)
1132 if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) {
1135 clear_thread_flag(TIF_RUNLATCH);
1137 ctrl = mfspr(SPRN_CTRLF);
1138 ctrl &= ~CTRL_RUNLATCH;
1139 mtspr(SPRN_CTRLT, ctrl);
1144 #if THREAD_SHIFT < PAGE_SHIFT
1146 static struct kmem_cache *thread_info_cache;
1148 struct thread_info *alloc_thread_info(struct task_struct *tsk)
1150 struct thread_info *ti;
1152 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
1153 if (unlikely(ti == NULL))
1155 #ifdef CONFIG_DEBUG_STACK_USAGE
1156 memset(ti, 0, THREAD_SIZE);
1161 void free_thread_info(struct thread_info *ti)
1163 kmem_cache_free(thread_info_cache, ti);
1166 void thread_info_cache_init(void)
1168 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
1169 THREAD_SIZE, 0, NULL);
1170 BUG_ON(thread_info_cache == NULL);
1173 #endif /* THREAD_SHIFT < PAGE_SHIFT */