xref: /linux/arch/powerpc/kernel/process.c (revision 58254e1002a82eb383c5977ad9fd5a451b91fe29)
114cf11afSPaul Mackerras /*
214cf11afSPaul Mackerras  *  Derived from "arch/i386/kernel/process.c"
314cf11afSPaul Mackerras  *    Copyright (C) 1995  Linus Torvalds
414cf11afSPaul Mackerras  *
514cf11afSPaul Mackerras  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
614cf11afSPaul Mackerras  *  Paul Mackerras (paulus@cs.anu.edu.au)
714cf11afSPaul Mackerras  *
814cf11afSPaul Mackerras  *  PowerPC version
914cf11afSPaul Mackerras  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
1014cf11afSPaul Mackerras  *
1114cf11afSPaul Mackerras  *  This program is free software; you can redistribute it and/or
1214cf11afSPaul Mackerras  *  modify it under the terms of the GNU General Public License
1314cf11afSPaul Mackerras  *  as published by the Free Software Foundation; either version
1414cf11afSPaul Mackerras  *  2 of the License, or (at your option) any later version.
1514cf11afSPaul Mackerras  */
1614cf11afSPaul Mackerras 
1714cf11afSPaul Mackerras #include <linux/errno.h>
1814cf11afSPaul Mackerras #include <linux/sched.h>
1914cf11afSPaul Mackerras #include <linux/kernel.h>
2014cf11afSPaul Mackerras #include <linux/mm.h>
2114cf11afSPaul Mackerras #include <linux/smp.h>
2214cf11afSPaul Mackerras #include <linux/stddef.h>
2314cf11afSPaul Mackerras #include <linux/unistd.h>
2414cf11afSPaul Mackerras #include <linux/ptrace.h>
2514cf11afSPaul Mackerras #include <linux/slab.h>
2614cf11afSPaul Mackerras #include <linux/user.h>
2714cf11afSPaul Mackerras #include <linux/elf.h>
2814cf11afSPaul Mackerras #include <linux/init.h>
2914cf11afSPaul Mackerras #include <linux/prctl.h>
3014cf11afSPaul Mackerras #include <linux/init_task.h>
314b16f8e2SPaul Gortmaker #include <linux/export.h>
3214cf11afSPaul Mackerras #include <linux/kallsyms.h>
3314cf11afSPaul Mackerras #include <linux/mqueue.h>
3414cf11afSPaul Mackerras #include <linux/hardirq.h>
3506d67d54SPaul Mackerras #include <linux/utsname.h>
366794c782SSteven Rostedt #include <linux/ftrace.h>
3779741dd3SMartin Schwidefsky #include <linux/kernel_stat.h>
38d839088cSAnton Blanchard #include <linux/personality.h>
39d839088cSAnton Blanchard #include <linux/random.h>
405aae8a53SK.Prasad #include <linux/hw_breakpoint.h>
4114cf11afSPaul Mackerras 
4214cf11afSPaul Mackerras #include <asm/pgtable.h>
4314cf11afSPaul Mackerras #include <asm/uaccess.h>
4414cf11afSPaul Mackerras #include <asm/io.h>
4514cf11afSPaul Mackerras #include <asm/processor.h>
4614cf11afSPaul Mackerras #include <asm/mmu.h>
4714cf11afSPaul Mackerras #include <asm/prom.h>
4876032de8SMichael Ellerman #include <asm/machdep.h>
49c6622f63SPaul Mackerras #include <asm/time.h>
50ae3a197eSDavid Howells #include <asm/runlatch.h>
51a7f31841SArnd Bergmann #include <asm/syscalls.h>
52ae3a197eSDavid Howells #include <asm/switch_to.h>
53ae3a197eSDavid Howells #include <asm/debug.h>
5406d67d54SPaul Mackerras #ifdef CONFIG_PPC64
5506d67d54SPaul Mackerras #include <asm/firmware.h>
5606d67d54SPaul Mackerras #endif
57d6a61bfcSLuis Machado #include <linux/kprobes.h>
58d6a61bfcSLuis Machado #include <linux/kdebug.h>
5914cf11afSPaul Mackerras 
6014cf11afSPaul Mackerras extern unsigned long _get_SP(void);
6114cf11afSPaul Mackerras 
6214cf11afSPaul Mackerras #ifndef CONFIG_SMP
6314cf11afSPaul Mackerras struct task_struct *last_task_used_math = NULL;
6414cf11afSPaul Mackerras struct task_struct *last_task_used_altivec = NULL;
65ce48b210SMichael Neuling struct task_struct *last_task_used_vsx = NULL;
6614cf11afSPaul Mackerras struct task_struct *last_task_used_spe = NULL;
6714cf11afSPaul Mackerras #endif
6814cf11afSPaul Mackerras 
6914cf11afSPaul Mackerras /*
7014cf11afSPaul Mackerras  * Make sure the floating-point register state in the
7114cf11afSPaul Mackerras  * the thread_struct is up to date for task tsk.
7214cf11afSPaul Mackerras  */
7314cf11afSPaul Mackerras void flush_fp_to_thread(struct task_struct *tsk)
7414cf11afSPaul Mackerras {
7514cf11afSPaul Mackerras 	if (tsk->thread.regs) {
7614cf11afSPaul Mackerras 		/*
7714cf11afSPaul Mackerras 		 * We need to disable preemption here because if we didn't,
7814cf11afSPaul Mackerras 		 * another process could get scheduled after the regs->msr
7914cf11afSPaul Mackerras 		 * test but before we have finished saving the FP registers
8014cf11afSPaul Mackerras 		 * to the thread_struct.  That process could take over the
8114cf11afSPaul Mackerras 		 * FPU, and then when we get scheduled again we would store
8214cf11afSPaul Mackerras 		 * bogus values for the remaining FP registers.
8314cf11afSPaul Mackerras 		 */
8414cf11afSPaul Mackerras 		preempt_disable();
8514cf11afSPaul Mackerras 		if (tsk->thread.regs->msr & MSR_FP) {
8614cf11afSPaul Mackerras #ifdef CONFIG_SMP
8714cf11afSPaul Mackerras 			/*
8814cf11afSPaul Mackerras 			 * This should only ever be called for current or
8914cf11afSPaul Mackerras 			 * for a stopped child process.  Since we save away
9014cf11afSPaul Mackerras 			 * the FP register state on context switch on SMP,
9114cf11afSPaul Mackerras 			 * there is something wrong if a stopped child appears
9214cf11afSPaul Mackerras 			 * to still have its FP state in the CPU registers.
9314cf11afSPaul Mackerras 			 */
9414cf11afSPaul Mackerras 			BUG_ON(tsk != current);
9514cf11afSPaul Mackerras #endif
960ee6c15eSKumar Gala 			giveup_fpu(tsk);
9714cf11afSPaul Mackerras 		}
9814cf11afSPaul Mackerras 		preempt_enable();
9914cf11afSPaul Mackerras 	}
10014cf11afSPaul Mackerras }
101de56a948SPaul Mackerras EXPORT_SYMBOL_GPL(flush_fp_to_thread);
10214cf11afSPaul Mackerras 
10314cf11afSPaul Mackerras void enable_kernel_fp(void)
10414cf11afSPaul Mackerras {
10514cf11afSPaul Mackerras 	WARN_ON(preemptible());
10614cf11afSPaul Mackerras 
10714cf11afSPaul Mackerras #ifdef CONFIG_SMP
10814cf11afSPaul Mackerras 	if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
10914cf11afSPaul Mackerras 		giveup_fpu(current);
11014cf11afSPaul Mackerras 	else
11114cf11afSPaul Mackerras 		giveup_fpu(NULL);	/* just enables FP for kernel */
11214cf11afSPaul Mackerras #else
11314cf11afSPaul Mackerras 	giveup_fpu(last_task_used_math);
11414cf11afSPaul Mackerras #endif /* CONFIG_SMP */
11514cf11afSPaul Mackerras }
11614cf11afSPaul Mackerras EXPORT_SYMBOL(enable_kernel_fp);
11714cf11afSPaul Mackerras 
11814cf11afSPaul Mackerras #ifdef CONFIG_ALTIVEC
11914cf11afSPaul Mackerras void enable_kernel_altivec(void)
12014cf11afSPaul Mackerras {
12114cf11afSPaul Mackerras 	WARN_ON(preemptible());
12214cf11afSPaul Mackerras 
12314cf11afSPaul Mackerras #ifdef CONFIG_SMP
12414cf11afSPaul Mackerras 	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
12514cf11afSPaul Mackerras 		giveup_altivec(current);
12614cf11afSPaul Mackerras 	else
12735000870SAnton Blanchard 		giveup_altivec_notask();
12814cf11afSPaul Mackerras #else
12914cf11afSPaul Mackerras 	giveup_altivec(last_task_used_altivec);
13014cf11afSPaul Mackerras #endif /* CONFIG_SMP */
13114cf11afSPaul Mackerras }
13214cf11afSPaul Mackerras EXPORT_SYMBOL(enable_kernel_altivec);
13314cf11afSPaul Mackerras 
13414cf11afSPaul Mackerras /*
13514cf11afSPaul Mackerras  * Make sure the VMX/Altivec register state in the
13614cf11afSPaul Mackerras  * the thread_struct is up to date for task tsk.
13714cf11afSPaul Mackerras  */
13814cf11afSPaul Mackerras void flush_altivec_to_thread(struct task_struct *tsk)
13914cf11afSPaul Mackerras {
14014cf11afSPaul Mackerras 	if (tsk->thread.regs) {
14114cf11afSPaul Mackerras 		preempt_disable();
14214cf11afSPaul Mackerras 		if (tsk->thread.regs->msr & MSR_VEC) {
14314cf11afSPaul Mackerras #ifdef CONFIG_SMP
14414cf11afSPaul Mackerras 			BUG_ON(tsk != current);
14514cf11afSPaul Mackerras #endif
1460ee6c15eSKumar Gala 			giveup_altivec(tsk);
14714cf11afSPaul Mackerras 		}
14814cf11afSPaul Mackerras 		preempt_enable();
14914cf11afSPaul Mackerras 	}
15014cf11afSPaul Mackerras }
151de56a948SPaul Mackerras EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
15214cf11afSPaul Mackerras #endif /* CONFIG_ALTIVEC */
15314cf11afSPaul Mackerras 
154ce48b210SMichael Neuling #ifdef CONFIG_VSX
155ce48b210SMichael Neuling #if 0
156ce48b210SMichael Neuling /* not currently used, but some crazy RAID module might want to later */
157ce48b210SMichael Neuling void enable_kernel_vsx(void)
158ce48b210SMichael Neuling {
159ce48b210SMichael Neuling 	WARN_ON(preemptible());
160ce48b210SMichael Neuling 
161ce48b210SMichael Neuling #ifdef CONFIG_SMP
162ce48b210SMichael Neuling 	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
163ce48b210SMichael Neuling 		giveup_vsx(current);
164ce48b210SMichael Neuling 	else
165ce48b210SMichael Neuling 		giveup_vsx(NULL);	/* just enable vsx for kernel - force */
166ce48b210SMichael Neuling #else
167ce48b210SMichael Neuling 	giveup_vsx(last_task_used_vsx);
168ce48b210SMichael Neuling #endif /* CONFIG_SMP */
169ce48b210SMichael Neuling }
170ce48b210SMichael Neuling EXPORT_SYMBOL(enable_kernel_vsx);
171ce48b210SMichael Neuling #endif
172ce48b210SMichael Neuling 
1737c292170SMichael Neuling void giveup_vsx(struct task_struct *tsk)
1747c292170SMichael Neuling {
1757c292170SMichael Neuling 	giveup_fpu(tsk);
1767c292170SMichael Neuling 	giveup_altivec(tsk);
1777c292170SMichael Neuling 	__giveup_vsx(tsk);
1787c292170SMichael Neuling }
1797c292170SMichael Neuling 
180ce48b210SMichael Neuling void flush_vsx_to_thread(struct task_struct *tsk)
181ce48b210SMichael Neuling {
182ce48b210SMichael Neuling 	if (tsk->thread.regs) {
183ce48b210SMichael Neuling 		preempt_disable();
184ce48b210SMichael Neuling 		if (tsk->thread.regs->msr & MSR_VSX) {
185ce48b210SMichael Neuling #ifdef CONFIG_SMP
186ce48b210SMichael Neuling 			BUG_ON(tsk != current);
187ce48b210SMichael Neuling #endif
188ce48b210SMichael Neuling 			giveup_vsx(tsk);
189ce48b210SMichael Neuling 		}
190ce48b210SMichael Neuling 		preempt_enable();
191ce48b210SMichael Neuling 	}
192ce48b210SMichael Neuling }
193de56a948SPaul Mackerras EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
194ce48b210SMichael Neuling #endif /* CONFIG_VSX */
195ce48b210SMichael Neuling 
19614cf11afSPaul Mackerras #ifdef CONFIG_SPE
19714cf11afSPaul Mackerras 
19814cf11afSPaul Mackerras void enable_kernel_spe(void)
19914cf11afSPaul Mackerras {
20014cf11afSPaul Mackerras 	WARN_ON(preemptible());
20114cf11afSPaul Mackerras 
20214cf11afSPaul Mackerras #ifdef CONFIG_SMP
20314cf11afSPaul Mackerras 	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
20414cf11afSPaul Mackerras 		giveup_spe(current);
20514cf11afSPaul Mackerras 	else
20614cf11afSPaul Mackerras 		giveup_spe(NULL);	/* just enable SPE for kernel - force */
20714cf11afSPaul Mackerras #else
20814cf11afSPaul Mackerras 	giveup_spe(last_task_used_spe);
20914cf11afSPaul Mackerras #endif /* __SMP __ */
21014cf11afSPaul Mackerras }
21114cf11afSPaul Mackerras EXPORT_SYMBOL(enable_kernel_spe);
21214cf11afSPaul Mackerras 
21314cf11afSPaul Mackerras void flush_spe_to_thread(struct task_struct *tsk)
21414cf11afSPaul Mackerras {
21514cf11afSPaul Mackerras 	if (tsk->thread.regs) {
21614cf11afSPaul Mackerras 		preempt_disable();
21714cf11afSPaul Mackerras 		if (tsk->thread.regs->msr & MSR_SPE) {
21814cf11afSPaul Mackerras #ifdef CONFIG_SMP
21914cf11afSPaul Mackerras 			BUG_ON(tsk != current);
22014cf11afSPaul Mackerras #endif
221685659eeSyu liu 			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
2220ee6c15eSKumar Gala 			giveup_spe(tsk);
22314cf11afSPaul Mackerras 		}
22414cf11afSPaul Mackerras 		preempt_enable();
22514cf11afSPaul Mackerras 	}
22614cf11afSPaul Mackerras }
22714cf11afSPaul Mackerras #endif /* CONFIG_SPE */
22814cf11afSPaul Mackerras 
2295388fb10SPaul Mackerras #ifndef CONFIG_SMP
23048abec07SPaul Mackerras /*
23148abec07SPaul Mackerras  * If we are doing lazy switching of CPU state (FP, altivec or SPE),
23248abec07SPaul Mackerras  * and the current task has some state, discard it.
23348abec07SPaul Mackerras  */
2345388fb10SPaul Mackerras void discard_lazy_cpu_state(void)
23548abec07SPaul Mackerras {
23648abec07SPaul Mackerras 	preempt_disable();
23748abec07SPaul Mackerras 	if (last_task_used_math == current)
23848abec07SPaul Mackerras 		last_task_used_math = NULL;
23948abec07SPaul Mackerras #ifdef CONFIG_ALTIVEC
24048abec07SPaul Mackerras 	if (last_task_used_altivec == current)
24148abec07SPaul Mackerras 		last_task_used_altivec = NULL;
24248abec07SPaul Mackerras #endif /* CONFIG_ALTIVEC */
243ce48b210SMichael Neuling #ifdef CONFIG_VSX
244ce48b210SMichael Neuling 	if (last_task_used_vsx == current)
245ce48b210SMichael Neuling 		last_task_used_vsx = NULL;
246ce48b210SMichael Neuling #endif /* CONFIG_VSX */
24748abec07SPaul Mackerras #ifdef CONFIG_SPE
24848abec07SPaul Mackerras 	if (last_task_used_spe == current)
24948abec07SPaul Mackerras 		last_task_used_spe = NULL;
25048abec07SPaul Mackerras #endif
25148abec07SPaul Mackerras 	preempt_enable();
25248abec07SPaul Mackerras }
2535388fb10SPaul Mackerras #endif /* CONFIG_SMP */
25448abec07SPaul Mackerras 
2553bffb652SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2563bffb652SDave Kleikamp void do_send_trap(struct pt_regs *regs, unsigned long address,
2573bffb652SDave Kleikamp 		  unsigned long error_code, int signal_code, int breakpt)
2583bffb652SDave Kleikamp {
2593bffb652SDave Kleikamp 	siginfo_t info;
2603bffb652SDave Kleikamp 
2613bffb652SDave Kleikamp 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
2623bffb652SDave Kleikamp 			11, SIGSEGV) == NOTIFY_STOP)
2633bffb652SDave Kleikamp 		return;
2643bffb652SDave Kleikamp 
2653bffb652SDave Kleikamp 	/* Deliver the signal to userspace */
2663bffb652SDave Kleikamp 	info.si_signo = SIGTRAP;
2673bffb652SDave Kleikamp 	info.si_errno = breakpt;	/* breakpoint or watchpoint id */
2683bffb652SDave Kleikamp 	info.si_code = signal_code;
2693bffb652SDave Kleikamp 	info.si_addr = (void __user *)address;
2703bffb652SDave Kleikamp 	force_sig_info(SIGTRAP, &info, current);
2713bffb652SDave Kleikamp }
2723bffb652SDave Kleikamp #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
273d6a61bfcSLuis Machado void do_dabr(struct pt_regs *regs, unsigned long address,
274d6a61bfcSLuis Machado 		    unsigned long error_code)
275d6a61bfcSLuis Machado {
276d6a61bfcSLuis Machado 	siginfo_t info;
277d6a61bfcSLuis Machado 
278d6a61bfcSLuis Machado 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
279d6a61bfcSLuis Machado 			11, SIGSEGV) == NOTIFY_STOP)
280d6a61bfcSLuis Machado 		return;
281d6a61bfcSLuis Machado 
282d6a61bfcSLuis Machado 	if (debugger_dabr_match(regs))
283d6a61bfcSLuis Machado 		return;
284d6a61bfcSLuis Machado 
285d6a61bfcSLuis Machado 	/* Clear the DABR */
286d6a61bfcSLuis Machado 	set_dabr(0);
287d6a61bfcSLuis Machado 
288d6a61bfcSLuis Machado 	/* Deliver the signal to userspace */
289d6a61bfcSLuis Machado 	info.si_signo = SIGTRAP;
290d6a61bfcSLuis Machado 	info.si_errno = 0;
291d6a61bfcSLuis Machado 	info.si_code = TRAP_HWBKPT;
292d6a61bfcSLuis Machado 	info.si_addr = (void __user *)address;
293d6a61bfcSLuis Machado 	force_sig_info(SIGTRAP, &info, current);
294d6a61bfcSLuis Machado }
2953bffb652SDave Kleikamp #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
296d6a61bfcSLuis Machado 
297a2ceff5eSMichael Ellerman static DEFINE_PER_CPU(unsigned long, current_dabr);
298a2ceff5eSMichael Ellerman 
2993bffb652SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3003bffb652SDave Kleikamp /*
3013bffb652SDave Kleikamp  * Set the debug registers back to their default "safe" values.
3023bffb652SDave Kleikamp  */
3033bffb652SDave Kleikamp static void set_debug_reg_defaults(struct thread_struct *thread)
3043bffb652SDave Kleikamp {
3053bffb652SDave Kleikamp 	thread->iac1 = thread->iac2 = 0;
3063bffb652SDave Kleikamp #if CONFIG_PPC_ADV_DEBUG_IACS > 2
3073bffb652SDave Kleikamp 	thread->iac3 = thread->iac4 = 0;
3083bffb652SDave Kleikamp #endif
3093bffb652SDave Kleikamp 	thread->dac1 = thread->dac2 = 0;
3103bffb652SDave Kleikamp #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
3113bffb652SDave Kleikamp 	thread->dvc1 = thread->dvc2 = 0;
3123bffb652SDave Kleikamp #endif
3133bffb652SDave Kleikamp 	thread->dbcr0 = 0;
3143bffb652SDave Kleikamp #ifdef CONFIG_BOOKE
3153bffb652SDave Kleikamp 	/*
3163bffb652SDave Kleikamp 	 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
3173bffb652SDave Kleikamp 	 */
3183bffb652SDave Kleikamp 	thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |	\
3193bffb652SDave Kleikamp 			DBCR1_IAC3US | DBCR1_IAC4US;
3203bffb652SDave Kleikamp 	/*
3213bffb652SDave Kleikamp 	 * Force Data Address Compare User/Supervisor bits to be User-only
3223bffb652SDave Kleikamp 	 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
3233bffb652SDave Kleikamp 	 */
3243bffb652SDave Kleikamp 	thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
3253bffb652SDave Kleikamp #else
3263bffb652SDave Kleikamp 	thread->dbcr1 = 0;
3273bffb652SDave Kleikamp #endif
3283bffb652SDave Kleikamp }
3293bffb652SDave Kleikamp 
3303bffb652SDave Kleikamp static void prime_debug_regs(struct thread_struct *thread)
3313bffb652SDave Kleikamp {
3323bffb652SDave Kleikamp 	mtspr(SPRN_IAC1, thread->iac1);
3333bffb652SDave Kleikamp 	mtspr(SPRN_IAC2, thread->iac2);
3343bffb652SDave Kleikamp #if CONFIG_PPC_ADV_DEBUG_IACS > 2
3353bffb652SDave Kleikamp 	mtspr(SPRN_IAC3, thread->iac3);
3363bffb652SDave Kleikamp 	mtspr(SPRN_IAC4, thread->iac4);
3373bffb652SDave Kleikamp #endif
3383bffb652SDave Kleikamp 	mtspr(SPRN_DAC1, thread->dac1);
3393bffb652SDave Kleikamp 	mtspr(SPRN_DAC2, thread->dac2);
3403bffb652SDave Kleikamp #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
3413bffb652SDave Kleikamp 	mtspr(SPRN_DVC1, thread->dvc1);
3423bffb652SDave Kleikamp 	mtspr(SPRN_DVC2, thread->dvc2);
3433bffb652SDave Kleikamp #endif
3443bffb652SDave Kleikamp 	mtspr(SPRN_DBCR0, thread->dbcr0);
3453bffb652SDave Kleikamp 	mtspr(SPRN_DBCR1, thread->dbcr1);
3463bffb652SDave Kleikamp #ifdef CONFIG_BOOKE
3473bffb652SDave Kleikamp 	mtspr(SPRN_DBCR2, thread->dbcr2);
3483bffb652SDave Kleikamp #endif
3493bffb652SDave Kleikamp }
3503bffb652SDave Kleikamp /*
3513bffb652SDave Kleikamp  * Unless neither the old or new thread are making use of the
3523bffb652SDave Kleikamp  * debug registers, set the debug registers from the values
3533bffb652SDave Kleikamp  * stored in the new thread.
3543bffb652SDave Kleikamp  */
3553bffb652SDave Kleikamp static void switch_booke_debug_regs(struct thread_struct *new_thread)
3563bffb652SDave Kleikamp {
3573bffb652SDave Kleikamp 	if ((current->thread.dbcr0 & DBCR0_IDM)
3583bffb652SDave Kleikamp 		|| (new_thread->dbcr0 & DBCR0_IDM))
3593bffb652SDave Kleikamp 			prime_debug_regs(new_thread);
3603bffb652SDave Kleikamp }
3613bffb652SDave Kleikamp #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
362e0780b72SK.Prasad #ifndef CONFIG_HAVE_HW_BREAKPOINT
3633bffb652SDave Kleikamp static void set_debug_reg_defaults(struct thread_struct *thread)
3643bffb652SDave Kleikamp {
3653bffb652SDave Kleikamp 	if (thread->dabr) {
3663bffb652SDave Kleikamp 		thread->dabr = 0;
3673bffb652SDave Kleikamp 		set_dabr(0);
3683bffb652SDave Kleikamp 	}
3693bffb652SDave Kleikamp }
370e0780b72SK.Prasad #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
3713bffb652SDave Kleikamp #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
3723bffb652SDave Kleikamp 
37314cf11afSPaul Mackerras int set_dabr(unsigned long dabr)
37414cf11afSPaul Mackerras {
375a2ceff5eSMichael Ellerman 	__get_cpu_var(current_dabr) = dabr;
376a2ceff5eSMichael Ellerman 
377cab0af98SMichael Ellerman 	if (ppc_md.set_dabr)
378cab0af98SMichael Ellerman 		return ppc_md.set_dabr(dabr);
37914cf11afSPaul Mackerras 
380791cc501SBenjamin Herrenschmidt 	/* XXX should we have a CPU_FTR_HAS_DABR ? */
381172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
382c6c9eaceSBenjamin Herrenschmidt 	mtspr(SPRN_DAC1, dabr);
383221c185dSDave Kleikamp #ifdef CONFIG_PPC_47x
384221c185dSDave Kleikamp 	isync();
385221c185dSDave Kleikamp #endif
386c6c9eaceSBenjamin Herrenschmidt #elif defined(CONFIG_PPC_BOOK3S)
387cab0af98SMichael Ellerman 	mtspr(SPRN_DABR, dabr);
388791cc501SBenjamin Herrenschmidt #endif
389d6a61bfcSLuis Machado 
390d6a61bfcSLuis Machado 
391cab0af98SMichael Ellerman 	return 0;
39214cf11afSPaul Mackerras }
39314cf11afSPaul Mackerras 
39406d67d54SPaul Mackerras #ifdef CONFIG_PPC64
39506d67d54SPaul Mackerras DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
39606d67d54SPaul Mackerras #endif
39714cf11afSPaul Mackerras 
39814cf11afSPaul Mackerras struct task_struct *__switch_to(struct task_struct *prev,
39914cf11afSPaul Mackerras 	struct task_struct *new)
40014cf11afSPaul Mackerras {
40114cf11afSPaul Mackerras 	struct thread_struct *new_thread, *old_thread;
40214cf11afSPaul Mackerras 	unsigned long flags;
40314cf11afSPaul Mackerras 	struct task_struct *last;
404d6bf29b4SPeter Zijlstra #ifdef CONFIG_PPC_BOOK3S_64
405d6bf29b4SPeter Zijlstra 	struct ppc64_tlb_batch *batch;
406d6bf29b4SPeter Zijlstra #endif
40714cf11afSPaul Mackerras 
40814cf11afSPaul Mackerras #ifdef CONFIG_SMP
40914cf11afSPaul Mackerras 	/* avoid complexity of lazy save/restore of fpu
41014cf11afSPaul Mackerras 	 * by just saving it every time we switch out if
41114cf11afSPaul Mackerras 	 * this task used the fpu during the last quantum.
41214cf11afSPaul Mackerras 	 *
41314cf11afSPaul Mackerras 	 * If it tries to use the fpu again, it'll trap and
41414cf11afSPaul Mackerras 	 * reload its fp regs.  So we don't have to do a restore
41514cf11afSPaul Mackerras 	 * every switch, just a save.
41614cf11afSPaul Mackerras 	 *  -- Cort
41714cf11afSPaul Mackerras 	 */
41814cf11afSPaul Mackerras 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
41914cf11afSPaul Mackerras 		giveup_fpu(prev);
42014cf11afSPaul Mackerras #ifdef CONFIG_ALTIVEC
42114cf11afSPaul Mackerras 	/*
42214cf11afSPaul Mackerras 	 * If the previous thread used altivec in the last quantum
42314cf11afSPaul Mackerras 	 * (thus changing altivec regs) then save them.
42414cf11afSPaul Mackerras 	 * We used to check the VRSAVE register but not all apps
42514cf11afSPaul Mackerras 	 * set it, so we don't rely on it now (and in fact we need
42614cf11afSPaul Mackerras 	 * to save & restore VSCR even if VRSAVE == 0).  -- paulus
42714cf11afSPaul Mackerras 	 *
42814cf11afSPaul Mackerras 	 * On SMP we always save/restore altivec regs just to avoid the
42914cf11afSPaul Mackerras 	 * complexity of changing processors.
43014cf11afSPaul Mackerras 	 *  -- Cort
43114cf11afSPaul Mackerras 	 */
43214cf11afSPaul Mackerras 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
43314cf11afSPaul Mackerras 		giveup_altivec(prev);
43414cf11afSPaul Mackerras #endif /* CONFIG_ALTIVEC */
435ce48b210SMichael Neuling #ifdef CONFIG_VSX
436ce48b210SMichael Neuling 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
4377c292170SMichael Neuling 		/* VMX and FPU registers are already save here */
4387c292170SMichael Neuling 		__giveup_vsx(prev);
439ce48b210SMichael Neuling #endif /* CONFIG_VSX */
44014cf11afSPaul Mackerras #ifdef CONFIG_SPE
44114cf11afSPaul Mackerras 	/*
44214cf11afSPaul Mackerras 	 * If the previous thread used spe in the last quantum
44314cf11afSPaul Mackerras 	 * (thus changing spe regs) then save them.
44414cf11afSPaul Mackerras 	 *
44514cf11afSPaul Mackerras 	 * On SMP we always save/restore spe regs just to avoid the
44614cf11afSPaul Mackerras 	 * complexity of changing processors.
44714cf11afSPaul Mackerras 	 */
44814cf11afSPaul Mackerras 	if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
44914cf11afSPaul Mackerras 		giveup_spe(prev);
450c0c0d996SPaul Mackerras #endif /* CONFIG_SPE */
451c0c0d996SPaul Mackerras 
452c0c0d996SPaul Mackerras #else  /* CONFIG_SMP */
453c0c0d996SPaul Mackerras #ifdef CONFIG_ALTIVEC
454c0c0d996SPaul Mackerras 	/* Avoid the trap.  On smp this this never happens since
455c0c0d996SPaul Mackerras 	 * we don't set last_task_used_altivec -- Cort
456c0c0d996SPaul Mackerras 	 */
457c0c0d996SPaul Mackerras 	if (new->thread.regs && last_task_used_altivec == new)
458c0c0d996SPaul Mackerras 		new->thread.regs->msr |= MSR_VEC;
459c0c0d996SPaul Mackerras #endif /* CONFIG_ALTIVEC */
460ce48b210SMichael Neuling #ifdef CONFIG_VSX
461ce48b210SMichael Neuling 	if (new->thread.regs && last_task_used_vsx == new)
462ce48b210SMichael Neuling 		new->thread.regs->msr |= MSR_VSX;
463ce48b210SMichael Neuling #endif /* CONFIG_VSX */
464c0c0d996SPaul Mackerras #ifdef CONFIG_SPE
46514cf11afSPaul Mackerras 	/* Avoid the trap.  On smp this this never happens since
46614cf11afSPaul Mackerras 	 * we don't set last_task_used_spe
46714cf11afSPaul Mackerras 	 */
46814cf11afSPaul Mackerras 	if (new->thread.regs && last_task_used_spe == new)
46914cf11afSPaul Mackerras 		new->thread.regs->msr |= MSR_SPE;
47014cf11afSPaul Mackerras #endif /* CONFIG_SPE */
471c0c0d996SPaul Mackerras 
47214cf11afSPaul Mackerras #endif /* CONFIG_SMP */
47314cf11afSPaul Mackerras 
474172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
4753bffb652SDave Kleikamp 	switch_booke_debug_regs(&new->thread);
476c6c9eaceSBenjamin Herrenschmidt #else
4775aae8a53SK.Prasad /*
4785aae8a53SK.Prasad  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
4795aae8a53SK.Prasad  * schedule DABR
4805aae8a53SK.Prasad  */
4815aae8a53SK.Prasad #ifndef CONFIG_HAVE_HW_BREAKPOINT
482c6c9eaceSBenjamin Herrenschmidt 	if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
483c6c9eaceSBenjamin Herrenschmidt 		set_dabr(new->thread.dabr);
4845aae8a53SK.Prasad #endif /* CONFIG_HAVE_HW_BREAKPOINT */
485d6a61bfcSLuis Machado #endif
486d6a61bfcSLuis Machado 
487c6c9eaceSBenjamin Herrenschmidt 
48814cf11afSPaul Mackerras 	new_thread = &new->thread;
48914cf11afSPaul Mackerras 	old_thread = &current->thread;
49006d67d54SPaul Mackerras 
49106d67d54SPaul Mackerras #ifdef CONFIG_PPC64
49206d67d54SPaul Mackerras 	/*
49306d67d54SPaul Mackerras 	 * Collect processor utilization data per process
49406d67d54SPaul Mackerras 	 */
49506d67d54SPaul Mackerras 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
49606d67d54SPaul Mackerras 		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
49706d67d54SPaul Mackerras 		long unsigned start_tb, current_tb;
49806d67d54SPaul Mackerras 		start_tb = old_thread->start_tb;
49906d67d54SPaul Mackerras 		cu->current_tb = current_tb = mfspr(SPRN_PURR);
50006d67d54SPaul Mackerras 		old_thread->accum_tb += (current_tb - start_tb);
50106d67d54SPaul Mackerras 		new_thread->start_tb = current_tb;
50206d67d54SPaul Mackerras 	}
503d6bf29b4SPeter Zijlstra #endif /* CONFIG_PPC64 */
504d6bf29b4SPeter Zijlstra 
505d6bf29b4SPeter Zijlstra #ifdef CONFIG_PPC_BOOK3S_64
506d6bf29b4SPeter Zijlstra 	batch = &__get_cpu_var(ppc64_tlb_batch);
507d6bf29b4SPeter Zijlstra 	if (batch->active) {
508d6bf29b4SPeter Zijlstra 		current_thread_info()->local_flags |= _TLF_LAZY_MMU;
509d6bf29b4SPeter Zijlstra 		if (batch->index)
510d6bf29b4SPeter Zijlstra 			__flush_tlb_pending(batch);
511d6bf29b4SPeter Zijlstra 		batch->active = 0;
512d6bf29b4SPeter Zijlstra 	}
513d6bf29b4SPeter Zijlstra #endif /* CONFIG_PPC_BOOK3S_64 */
51406d67d54SPaul Mackerras 
51514cf11afSPaul Mackerras 	local_irq_save(flags);
516c6622f63SPaul Mackerras 
517c6622f63SPaul Mackerras 	account_system_vtime(current);
51881a3843fSTony Breeds 	account_process_vtime(current);
519c6622f63SPaul Mackerras 
52044387e9fSAnton Blanchard 	/*
52144387e9fSAnton Blanchard 	 * We can't take a PMU exception inside _switch() since there is a
52244387e9fSAnton Blanchard 	 * window where the kernel stack SLB and the kernel stack are out
52344387e9fSAnton Blanchard 	 * of sync. Hard disable here.
52444387e9fSAnton Blanchard 	 */
52544387e9fSAnton Blanchard 	hard_irq_disable();
52614cf11afSPaul Mackerras 	last = _switch(old_thread, new_thread);
52714cf11afSPaul Mackerras 
528d6bf29b4SPeter Zijlstra #ifdef CONFIG_PPC_BOOK3S_64
529d6bf29b4SPeter Zijlstra 	if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
530d6bf29b4SPeter Zijlstra 		current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
531d6bf29b4SPeter Zijlstra 		batch = &__get_cpu_var(ppc64_tlb_batch);
532d6bf29b4SPeter Zijlstra 		batch->active = 1;
533d6bf29b4SPeter Zijlstra 	}
534d6bf29b4SPeter Zijlstra #endif /* CONFIG_PPC_BOOK3S_64 */
535d6bf29b4SPeter Zijlstra 
53614cf11afSPaul Mackerras 	local_irq_restore(flags);
53714cf11afSPaul Mackerras 
53814cf11afSPaul Mackerras 	return last;
53914cf11afSPaul Mackerras }
54014cf11afSPaul Mackerras 
54106d67d54SPaul Mackerras static int instructions_to_print = 16;
54206d67d54SPaul Mackerras 
54306d67d54SPaul Mackerras static void show_instructions(struct pt_regs *regs)
54406d67d54SPaul Mackerras {
54506d67d54SPaul Mackerras 	int i;
54606d67d54SPaul Mackerras 	unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
54706d67d54SPaul Mackerras 			sizeof(int));
54806d67d54SPaul Mackerras 
54906d67d54SPaul Mackerras 	printk("Instruction dump:");
55006d67d54SPaul Mackerras 
55106d67d54SPaul Mackerras 	for (i = 0; i < instructions_to_print; i++) {
55206d67d54SPaul Mackerras 		int instr;
55306d67d54SPaul Mackerras 
55406d67d54SPaul Mackerras 		if (!(i % 8))
55506d67d54SPaul Mackerras 			printk("\n");
55606d67d54SPaul Mackerras 
5570de2d820SScott Wood #if !defined(CONFIG_BOOKE)
5580de2d820SScott Wood 		/* If executing with the IMMU off, adjust pc rather
5590de2d820SScott Wood 		 * than print XXXXXXXX.
5600de2d820SScott Wood 		 */
5610de2d820SScott Wood 		if (!(regs->msr & MSR_IR))
5620de2d820SScott Wood 			pc = (unsigned long)phys_to_virt(pc);
5630de2d820SScott Wood #endif
5640de2d820SScott Wood 
565af308377SStephen Rothwell 		/* We use __get_user here *only* to avoid an OOPS on a
566af308377SStephen Rothwell 		 * bad address because the pc *should* only be a
567af308377SStephen Rothwell 		 * kernel address.
568af308377SStephen Rothwell 		 */
56900ae36deSAnton Blanchard 		if (!__kernel_text_address(pc) ||
57000ae36deSAnton Blanchard 		     __get_user(instr, (unsigned int __user *)pc)) {
57140c8cefaSIra Snyder 			printk(KERN_CONT "XXXXXXXX ");
57206d67d54SPaul Mackerras 		} else {
57306d67d54SPaul Mackerras 			if (regs->nip == pc)
57440c8cefaSIra Snyder 				printk(KERN_CONT "<%08x> ", instr);
57506d67d54SPaul Mackerras 			else
57640c8cefaSIra Snyder 				printk(KERN_CONT "%08x ", instr);
57706d67d54SPaul Mackerras 		}
57806d67d54SPaul Mackerras 
57906d67d54SPaul Mackerras 		pc += sizeof(int);
58006d67d54SPaul Mackerras 	}
58106d67d54SPaul Mackerras 
58206d67d54SPaul Mackerras 	printk("\n");
58306d67d54SPaul Mackerras }
58406d67d54SPaul Mackerras 
58506d67d54SPaul Mackerras static struct regbit {
58606d67d54SPaul Mackerras 	unsigned long bit;
58706d67d54SPaul Mackerras 	const char *name;
58806d67d54SPaul Mackerras } msr_bits[] = {
5893bfd0c9cSAnton Blanchard #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
5903bfd0c9cSAnton Blanchard 	{MSR_SF,	"SF"},
5913bfd0c9cSAnton Blanchard 	{MSR_HV,	"HV"},
5923bfd0c9cSAnton Blanchard #endif
5933bfd0c9cSAnton Blanchard 	{MSR_VEC,	"VEC"},
5943bfd0c9cSAnton Blanchard 	{MSR_VSX,	"VSX"},
5953bfd0c9cSAnton Blanchard #ifdef CONFIG_BOOKE
5963bfd0c9cSAnton Blanchard 	{MSR_CE,	"CE"},
5973bfd0c9cSAnton Blanchard #endif
59806d67d54SPaul Mackerras 	{MSR_EE,	"EE"},
59906d67d54SPaul Mackerras 	{MSR_PR,	"PR"},
60006d67d54SPaul Mackerras 	{MSR_FP,	"FP"},
60106d67d54SPaul Mackerras 	{MSR_ME,	"ME"},
6023bfd0c9cSAnton Blanchard #ifdef CONFIG_BOOKE
6031b98326bSKumar Gala 	{MSR_DE,	"DE"},
6043bfd0c9cSAnton Blanchard #else
6053bfd0c9cSAnton Blanchard 	{MSR_SE,	"SE"},
6063bfd0c9cSAnton Blanchard 	{MSR_BE,	"BE"},
6073bfd0c9cSAnton Blanchard #endif
60806d67d54SPaul Mackerras 	{MSR_IR,	"IR"},
60906d67d54SPaul Mackerras 	{MSR_DR,	"DR"},
6103bfd0c9cSAnton Blanchard 	{MSR_PMM,	"PMM"},
6113bfd0c9cSAnton Blanchard #ifndef CONFIG_BOOKE
6123bfd0c9cSAnton Blanchard 	{MSR_RI,	"RI"},
6133bfd0c9cSAnton Blanchard 	{MSR_LE,	"LE"},
6143bfd0c9cSAnton Blanchard #endif
61506d67d54SPaul Mackerras 	{0,		NULL}
61606d67d54SPaul Mackerras };
61706d67d54SPaul Mackerras 
61806d67d54SPaul Mackerras static void printbits(unsigned long val, struct regbit *bits)
61906d67d54SPaul Mackerras {
62006d67d54SPaul Mackerras 	const char *sep = "";
62106d67d54SPaul Mackerras 
62206d67d54SPaul Mackerras 	printk("<");
62306d67d54SPaul Mackerras 	for (; bits->bit; ++bits)
62406d67d54SPaul Mackerras 		if (val & bits->bit) {
62506d67d54SPaul Mackerras 			printk("%s%s", sep, bits->name);
62606d67d54SPaul Mackerras 			sep = ",";
62706d67d54SPaul Mackerras 		}
62806d67d54SPaul Mackerras 	printk(">");
62906d67d54SPaul Mackerras }
63006d67d54SPaul Mackerras 
63106d67d54SPaul Mackerras #ifdef CONFIG_PPC64
632f6f7dde3Santon@samba.org #define REG		"%016lx"
63306d67d54SPaul Mackerras #define REGS_PER_LINE	4
63406d67d54SPaul Mackerras #define LAST_VOLATILE	13
63506d67d54SPaul Mackerras #else
636f6f7dde3Santon@samba.org #define REG		"%08lx"
63706d67d54SPaul Mackerras #define REGS_PER_LINE	8
63806d67d54SPaul Mackerras #define LAST_VOLATILE	12
63906d67d54SPaul Mackerras #endif
64006d67d54SPaul Mackerras 
64114cf11afSPaul Mackerras void show_regs(struct pt_regs * regs)
64214cf11afSPaul Mackerras {
64314cf11afSPaul Mackerras 	int i, trap;
64414cf11afSPaul Mackerras 
64506d67d54SPaul Mackerras 	printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
64606d67d54SPaul Mackerras 	       regs->nip, regs->link, regs->ctr);
64706d67d54SPaul Mackerras 	printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
64896b644bdSSerge E. Hallyn 	       regs, regs->trap, print_tainted(), init_utsname()->release);
64906d67d54SPaul Mackerras 	printk("MSR: "REG" ", regs->msr);
65006d67d54SPaul Mackerras 	printbits(regs->msr, msr_bits);
651f6f7dde3Santon@samba.org 	printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
6527230c564SBenjamin Herrenschmidt #ifdef CONFIG_PPC64
6537230c564SBenjamin Herrenschmidt 	printk("SOFTE: %ld\n", regs->softe);
6547230c564SBenjamin Herrenschmidt #endif
65514cf11afSPaul Mackerras 	trap = TRAP(regs);
6565115a026SMichael Neuling 	if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
6575115a026SMichael Neuling 		printk("CFAR: "REG"\n", regs->orig_gpr3);
65814cf11afSPaul Mackerras 	if (trap == 0x300 || trap == 0x600)
659ba28c9aaSKumar Gala #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
66014170789SKumar Gala 		printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
66114170789SKumar Gala #else
6627071854bSAnton Blanchard 		printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
66314170789SKumar Gala #endif
66406d67d54SPaul Mackerras 	printk("TASK = %p[%d] '%s' THREAD: %p",
66519c5870cSAlexey Dobriyan 	       current, task_pid_nr(current), current->comm, task_thread_info(current));
66614cf11afSPaul Mackerras 
66714cf11afSPaul Mackerras #ifdef CONFIG_SMP
66879ccd1beSHugh Dickins 	printk(" CPU: %d", raw_smp_processor_id());
66914cf11afSPaul Mackerras #endif /* CONFIG_SMP */
67014cf11afSPaul Mackerras 
67114cf11afSPaul Mackerras 	for (i = 0;  i < 32;  i++) {
67206d67d54SPaul Mackerras 		if ((i % REGS_PER_LINE) == 0)
673a2367194SKumar Gala 			printk("\nGPR%02d: ", i);
67406d67d54SPaul Mackerras 		printk(REG " ", regs->gpr[i]);
67506d67d54SPaul Mackerras 		if (i == LAST_VOLATILE && !FULL_REGS(regs))
67614cf11afSPaul Mackerras 			break;
67714cf11afSPaul Mackerras 	}
67814cf11afSPaul Mackerras 	printk("\n");
67914cf11afSPaul Mackerras #ifdef CONFIG_KALLSYMS
68014cf11afSPaul Mackerras 	/*
68114cf11afSPaul Mackerras 	 * Lookup NIP late so we have the best change of getting the
68214cf11afSPaul Mackerras 	 * above info out without failing
68314cf11afSPaul Mackerras 	 */
684058c78f4SBenjamin Herrenschmidt 	printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
685058c78f4SBenjamin Herrenschmidt 	printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
68614cf11afSPaul Mackerras #endif
68714cf11afSPaul Mackerras 	show_stack(current, (unsigned long *) regs->gpr[1]);
68806d67d54SPaul Mackerras 	if (!user_mode(regs))
68906d67d54SPaul Mackerras 		show_instructions(regs);
69014cf11afSPaul Mackerras }
69114cf11afSPaul Mackerras 
69214cf11afSPaul Mackerras void exit_thread(void)
69314cf11afSPaul Mackerras {
69448abec07SPaul Mackerras 	discard_lazy_cpu_state();
69514cf11afSPaul Mackerras }
69614cf11afSPaul Mackerras 
69714cf11afSPaul Mackerras void flush_thread(void)
69814cf11afSPaul Mackerras {
69948abec07SPaul Mackerras 	discard_lazy_cpu_state();
70014cf11afSPaul Mackerras 
701e0780b72SK.Prasad #ifdef CONFIG_HAVE_HW_BREAKPOINT
7025aae8a53SK.Prasad 	flush_ptrace_hw_breakpoint(current);
703e0780b72SK.Prasad #else /* CONFIG_HAVE_HW_BREAKPOINT */
7043bffb652SDave Kleikamp 	set_debug_reg_defaults(&current->thread);
705e0780b72SK.Prasad #endif /* CONFIG_HAVE_HW_BREAKPOINT */
70614cf11afSPaul Mackerras }
70714cf11afSPaul Mackerras 
70814cf11afSPaul Mackerras void
70914cf11afSPaul Mackerras release_thread(struct task_struct *t)
71014cf11afSPaul Mackerras {
71114cf11afSPaul Mackerras }
71214cf11afSPaul Mackerras 
71314cf11afSPaul Mackerras /*
71455ccf3feSSuresh Siddha  * this gets called so that we can store coprocessor state into memory and
71555ccf3feSSuresh Siddha  * copy the current task into the new thread.
71614cf11afSPaul Mackerras  */
71755ccf3feSSuresh Siddha int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
71814cf11afSPaul Mackerras {
71955ccf3feSSuresh Siddha 	flush_fp_to_thread(src);
72055ccf3feSSuresh Siddha 	flush_altivec_to_thread(src);
72155ccf3feSSuresh Siddha 	flush_vsx_to_thread(src);
72255ccf3feSSuresh Siddha 	flush_spe_to_thread(src);
7235aae8a53SK.Prasad #ifdef CONFIG_HAVE_HW_BREAKPOINT
72455ccf3feSSuresh Siddha 	flush_ptrace_hw_breakpoint(src);
7255aae8a53SK.Prasad #endif /* CONFIG_HAVE_HW_BREAKPOINT */
72655ccf3feSSuresh Siddha 
72755ccf3feSSuresh Siddha 	*dst = *src;
72855ccf3feSSuresh Siddha 	return 0;
72914cf11afSPaul Mackerras }
73014cf11afSPaul Mackerras 
73114cf11afSPaul Mackerras /*
73214cf11afSPaul Mackerras  * Copy a thread..
73314cf11afSPaul Mackerras  */
734efcac658SAlexey Kardashevskiy extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
735efcac658SAlexey Kardashevskiy 
7366f2c55b8SAlexey Dobriyan int copy_thread(unsigned long clone_flags, unsigned long usp,
737*58254e10SAl Viro 		unsigned long arg, struct task_struct *p,
73806d67d54SPaul Mackerras 		struct pt_regs *regs)
73914cf11afSPaul Mackerras {
74014cf11afSPaul Mackerras 	struct pt_regs *childregs, *kregs;
74114cf11afSPaul Mackerras 	extern void ret_from_fork(void);
742*58254e10SAl Viro 	extern void ret_from_kernel_thread(void);
743*58254e10SAl Viro 	void (*f)(void);
7440cec6fd1SAl Viro 	unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
74514cf11afSPaul Mackerras 
74614cf11afSPaul Mackerras 	/* Copy registers */
74714cf11afSPaul Mackerras 	sp -= sizeof(struct pt_regs);
74814cf11afSPaul Mackerras 	childregs = (struct pt_regs *) sp;
749*58254e10SAl Viro 	if (!regs) {
75014cf11afSPaul Mackerras 		/* for kernel thread, set `current' and stackptr in new task */
751*58254e10SAl Viro 		memset(childregs, 0, sizeof(struct pt_regs));
75214cf11afSPaul Mackerras 		childregs->gpr[1] = sp + sizeof(struct pt_regs);
753*58254e10SAl Viro #ifdef CONFIG_PPC64
754*58254e10SAl Viro 		childregs->gpr[14] = *(unsigned long *)usp;
755*58254e10SAl Viro 		childregs->gpr[2] = ((unsigned long *)usp)[1],
756b5e2fc1cSAl Viro 		clear_tsk_thread_flag(p, TIF_32BIT);
757*58254e10SAl Viro #else
758*58254e10SAl Viro 		childregs->gpr[14] = usp;	/* function */
759*58254e10SAl Viro 		childregs->gpr[2] = (unsigned long) p;
76006d67d54SPaul Mackerras #endif
761*58254e10SAl Viro 		childregs->gpr[15] = arg;
76214cf11afSPaul Mackerras 		p->thread.regs = NULL;	/* no user register state */
763*58254e10SAl Viro 		f = ret_from_kernel_thread;
76414cf11afSPaul Mackerras 	} else {
765*58254e10SAl Viro 		CHECK_FULL_REGS(regs);
766*58254e10SAl Viro 		*childregs = *regs;
76714cf11afSPaul Mackerras 		childregs->gpr[1] = usp;
76814cf11afSPaul Mackerras 		p->thread.regs = childregs;
769*58254e10SAl Viro 		childregs->gpr[3] = 0;  /* Result from fork() */
77006d67d54SPaul Mackerras 		if (clone_flags & CLONE_SETTLS) {
77106d67d54SPaul Mackerras #ifdef CONFIG_PPC64
7729904b005SDenis Kirjanov 			if (!is_32bit_task())
77306d67d54SPaul Mackerras 				childregs->gpr[13] = childregs->gpr[6];
77406d67d54SPaul Mackerras 			else
77506d67d54SPaul Mackerras #endif
77614cf11afSPaul Mackerras 				childregs->gpr[2] = childregs->gpr[6];
77714cf11afSPaul Mackerras 		}
778*58254e10SAl Viro 
779*58254e10SAl Viro 		f = ret_from_fork;
78006d67d54SPaul Mackerras 	}
78114cf11afSPaul Mackerras 	sp -= STACK_FRAME_OVERHEAD;
78214cf11afSPaul Mackerras 
78314cf11afSPaul Mackerras 	/*
78414cf11afSPaul Mackerras 	 * The way this works is that at some point in the future
78514cf11afSPaul Mackerras 	 * some task will call _switch to switch to the new task.
78614cf11afSPaul Mackerras 	 * That will pop off the stack frame created below and start
78714cf11afSPaul Mackerras 	 * the new task running at ret_from_fork.  The new task will
78814cf11afSPaul Mackerras 	 * do some house keeping and then return from the fork or clone
78914cf11afSPaul Mackerras 	 * system call, using the stack frame created above.
79014cf11afSPaul Mackerras 	 */
79114cf11afSPaul Mackerras 	sp -= sizeof(struct pt_regs);
79214cf11afSPaul Mackerras 	kregs = (struct pt_regs *) sp;
79314cf11afSPaul Mackerras 	sp -= STACK_FRAME_OVERHEAD;
79414cf11afSPaul Mackerras 	p->thread.ksp = sp;
79585218827SKumar Gala 	p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
79685218827SKumar Gala 				_ALIGN_UP(sizeof(struct thread_info), 16);
79714cf11afSPaul Mackerras 
79894491685SBenjamin Herrenschmidt #ifdef CONFIG_PPC_STD_MMU_64
79944ae3ab3SMatt Evans 	if (mmu_has_feature(MMU_FTR_SLB)) {
8001189be65SPaul Mackerras 		unsigned long sp_vsid;
8013c726f8dSBenjamin Herrenschmidt 		unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
80206d67d54SPaul Mackerras 
80344ae3ab3SMatt Evans 		if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
8041189be65SPaul Mackerras 			sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
8051189be65SPaul Mackerras 				<< SLB_VSID_SHIFT_1T;
8061189be65SPaul Mackerras 		else
8071189be65SPaul Mackerras 			sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
8081189be65SPaul Mackerras 				<< SLB_VSID_SHIFT;
8093c726f8dSBenjamin Herrenschmidt 		sp_vsid |= SLB_VSID_KERNEL | llp;
81006d67d54SPaul Mackerras 		p->thread.ksp_vsid = sp_vsid;
81106d67d54SPaul Mackerras 	}
812747bea91SBenjamin Herrenschmidt #endif /* CONFIG_PPC_STD_MMU_64 */
813efcac658SAlexey Kardashevskiy #ifdef CONFIG_PPC64
814efcac658SAlexey Kardashevskiy 	if (cpu_has_feature(CPU_FTR_DSCR)) {
8151021cb26SAnton Blanchard 		p->thread.dscr_inherit = current->thread.dscr_inherit;
816efcac658SAlexey Kardashevskiy 		p->thread.dscr = current->thread.dscr;
817efcac658SAlexey Kardashevskiy 	}
818efcac658SAlexey Kardashevskiy #endif
81906d67d54SPaul Mackerras 	/*
82006d67d54SPaul Mackerras 	 * The PPC64 ABI makes use of a TOC to contain function
82106d67d54SPaul Mackerras 	 * pointers.  The function (ret_from_except) is actually a pointer
82206d67d54SPaul Mackerras 	 * to the TOC entry.  The first entry is a pointer to the actual
82306d67d54SPaul Mackerras 	 * function.
82406d67d54SPaul Mackerras 	 */
825747bea91SBenjamin Herrenschmidt #ifdef CONFIG_PPC64
826*58254e10SAl Viro 	kregs->nip = *((unsigned long *)f);
82706d67d54SPaul Mackerras #else
828*58254e10SAl Viro 	kregs->nip = (unsigned long)f;
82906d67d54SPaul Mackerras #endif
83014cf11afSPaul Mackerras 	return 0;
83114cf11afSPaul Mackerras }
83214cf11afSPaul Mackerras 
83314cf11afSPaul Mackerras /*
83414cf11afSPaul Mackerras  * Set up a thread for executing a new program
83514cf11afSPaul Mackerras  */
83606d67d54SPaul Mackerras void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
83714cf11afSPaul Mackerras {
83890eac727SMichael Ellerman #ifdef CONFIG_PPC64
83990eac727SMichael Ellerman 	unsigned long load_addr = regs->gpr[2];	/* saved by ELF_PLAT_INIT */
84090eac727SMichael Ellerman #endif
84190eac727SMichael Ellerman 
84206d67d54SPaul Mackerras 	/*
84306d67d54SPaul Mackerras 	 * If we exec out of a kernel thread then thread.regs will not be
84406d67d54SPaul Mackerras 	 * set.  Do it now.
84506d67d54SPaul Mackerras 	 */
84606d67d54SPaul Mackerras 	if (!current->thread.regs) {
8470cec6fd1SAl Viro 		struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
8480cec6fd1SAl Viro 		current->thread.regs = regs - 1;
84906d67d54SPaul Mackerras 	}
85006d67d54SPaul Mackerras 
85114cf11afSPaul Mackerras 	memset(regs->gpr, 0, sizeof(regs->gpr));
85214cf11afSPaul Mackerras 	regs->ctr = 0;
85314cf11afSPaul Mackerras 	regs->link = 0;
85414cf11afSPaul Mackerras 	regs->xer = 0;
85514cf11afSPaul Mackerras 	regs->ccr = 0;
85614cf11afSPaul Mackerras 	regs->gpr[1] = sp;
85706d67d54SPaul Mackerras 
858474f8196SRoland McGrath 	/*
859474f8196SRoland McGrath 	 * We have just cleared all the nonvolatile GPRs, so make
860474f8196SRoland McGrath 	 * FULL_REGS(regs) return true.  This is necessary to allow
861474f8196SRoland McGrath 	 * ptrace to examine the thread immediately after exec.
862474f8196SRoland McGrath 	 */
863474f8196SRoland McGrath 	regs->trap &= ~1UL;
864474f8196SRoland McGrath 
86506d67d54SPaul Mackerras #ifdef CONFIG_PPC32
86606d67d54SPaul Mackerras 	regs->mq = 0;
86706d67d54SPaul Mackerras 	regs->nip = start;
86814cf11afSPaul Mackerras 	regs->msr = MSR_USER;
86906d67d54SPaul Mackerras #else
8709904b005SDenis Kirjanov 	if (!is_32bit_task()) {
87190eac727SMichael Ellerman 		unsigned long entry, toc;
87206d67d54SPaul Mackerras 
87306d67d54SPaul Mackerras 		/* start is a relocated pointer to the function descriptor for
87406d67d54SPaul Mackerras 		 * the elf _start routine.  The first entry in the function
87506d67d54SPaul Mackerras 		 * descriptor is the entry address of _start and the second
87606d67d54SPaul Mackerras 		 * entry is the TOC value we need to use.
87706d67d54SPaul Mackerras 		 */
87806d67d54SPaul Mackerras 		__get_user(entry, (unsigned long __user *)start);
87906d67d54SPaul Mackerras 		__get_user(toc, (unsigned long __user *)start+1);
88006d67d54SPaul Mackerras 
88106d67d54SPaul Mackerras 		/* Check whether the e_entry function descriptor entries
88206d67d54SPaul Mackerras 		 * need to be relocated before we can use them.
88306d67d54SPaul Mackerras 		 */
88406d67d54SPaul Mackerras 		if (load_addr != 0) {
88506d67d54SPaul Mackerras 			entry += load_addr;
88606d67d54SPaul Mackerras 			toc   += load_addr;
88706d67d54SPaul Mackerras 		}
88806d67d54SPaul Mackerras 		regs->nip = entry;
88906d67d54SPaul Mackerras 		regs->gpr[2] = toc;
89006d67d54SPaul Mackerras 		regs->msr = MSR_USER64;
891d4bf9a78SStephen Rothwell 	} else {
892d4bf9a78SStephen Rothwell 		regs->nip = start;
893d4bf9a78SStephen Rothwell 		regs->gpr[2] = 0;
894d4bf9a78SStephen Rothwell 		regs->msr = MSR_USER32;
89506d67d54SPaul Mackerras 	}
89606d67d54SPaul Mackerras #endif
89706d67d54SPaul Mackerras 
89848abec07SPaul Mackerras 	discard_lazy_cpu_state();
899ce48b210SMichael Neuling #ifdef CONFIG_VSX
900ce48b210SMichael Neuling 	current->thread.used_vsr = 0;
901ce48b210SMichael Neuling #endif
90214cf11afSPaul Mackerras 	memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
90325c8a78bSDavid Gibson 	current->thread.fpscr.val = 0;
90414cf11afSPaul Mackerras #ifdef CONFIG_ALTIVEC
90514cf11afSPaul Mackerras 	memset(current->thread.vr, 0, sizeof(current->thread.vr));
90614cf11afSPaul Mackerras 	memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
90706d67d54SPaul Mackerras 	current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
90814cf11afSPaul Mackerras 	current->thread.vrsave = 0;
90914cf11afSPaul Mackerras 	current->thread.used_vr = 0;
91014cf11afSPaul Mackerras #endif /* CONFIG_ALTIVEC */
91114cf11afSPaul Mackerras #ifdef CONFIG_SPE
91214cf11afSPaul Mackerras 	memset(current->thread.evr, 0, sizeof(current->thread.evr));
91314cf11afSPaul Mackerras 	current->thread.acc = 0;
91414cf11afSPaul Mackerras 	current->thread.spefscr = 0;
91514cf11afSPaul Mackerras 	current->thread.used_spe = 0;
91614cf11afSPaul Mackerras #endif /* CONFIG_SPE */
91714cf11afSPaul Mackerras }
91814cf11afSPaul Mackerras 
91914cf11afSPaul Mackerras #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
92014cf11afSPaul Mackerras 		| PR_FP_EXC_RES | PR_FP_EXC_INV)
92114cf11afSPaul Mackerras 
92214cf11afSPaul Mackerras int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
92314cf11afSPaul Mackerras {
92414cf11afSPaul Mackerras 	struct pt_regs *regs = tsk->thread.regs;
92514cf11afSPaul Mackerras 
92614cf11afSPaul Mackerras 	/* This is a bit hairy.  If we are an SPE enabled  processor
92714cf11afSPaul Mackerras 	 * (have embedded fp) we store the IEEE exception enable flags in
92814cf11afSPaul Mackerras 	 * fpexc_mode.  fpexc_mode is also used for setting FP exception
92914cf11afSPaul Mackerras 	 * mode (asyn, precise, disabled) for 'Classic' FP. */
93014cf11afSPaul Mackerras 	if (val & PR_FP_EXC_SW_ENABLE) {
93114cf11afSPaul Mackerras #ifdef CONFIG_SPE
9325e14d21eSKumar Gala 		if (cpu_has_feature(CPU_FTR_SPE)) {
93314cf11afSPaul Mackerras 			tsk->thread.fpexc_mode = val &
93414cf11afSPaul Mackerras 				(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
93506d67d54SPaul Mackerras 			return 0;
9365e14d21eSKumar Gala 		} else {
9375e14d21eSKumar Gala 			return -EINVAL;
9385e14d21eSKumar Gala 		}
93914cf11afSPaul Mackerras #else
94014cf11afSPaul Mackerras 		return -EINVAL;
94114cf11afSPaul Mackerras #endif
94206d67d54SPaul Mackerras 	}
94306d67d54SPaul Mackerras 
94414cf11afSPaul Mackerras 	/* on a CONFIG_SPE this does not hurt us.  The bits that
94514cf11afSPaul Mackerras 	 * __pack_fe01 use do not overlap with bits used for
94614cf11afSPaul Mackerras 	 * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
94714cf11afSPaul Mackerras 	 * on CONFIG_SPE implementations are reserved so writing to
94814cf11afSPaul Mackerras 	 * them does not change anything */
94914cf11afSPaul Mackerras 	if (val > PR_FP_EXC_PRECISE)
95014cf11afSPaul Mackerras 		return -EINVAL;
95114cf11afSPaul Mackerras 	tsk->thread.fpexc_mode = __pack_fe01(val);
95214cf11afSPaul Mackerras 	if (regs != NULL && (regs->msr & MSR_FP) != 0)
95314cf11afSPaul Mackerras 		regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
95414cf11afSPaul Mackerras 			| tsk->thread.fpexc_mode;
95514cf11afSPaul Mackerras 	return 0;
95614cf11afSPaul Mackerras }
95714cf11afSPaul Mackerras 
95814cf11afSPaul Mackerras int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
95914cf11afSPaul Mackerras {
96014cf11afSPaul Mackerras 	unsigned int val;
96114cf11afSPaul Mackerras 
96214cf11afSPaul Mackerras 	if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
96314cf11afSPaul Mackerras #ifdef CONFIG_SPE
9645e14d21eSKumar Gala 		if (cpu_has_feature(CPU_FTR_SPE))
96514cf11afSPaul Mackerras 			val = tsk->thread.fpexc_mode;
9665e14d21eSKumar Gala 		else
9675e14d21eSKumar Gala 			return -EINVAL;
96814cf11afSPaul Mackerras #else
96914cf11afSPaul Mackerras 		return -EINVAL;
97014cf11afSPaul Mackerras #endif
97114cf11afSPaul Mackerras 	else
97214cf11afSPaul Mackerras 		val = __unpack_fe01(tsk->thread.fpexc_mode);
97314cf11afSPaul Mackerras 	return put_user(val, (unsigned int __user *) adr);
97414cf11afSPaul Mackerras }
97514cf11afSPaul Mackerras 
976fab5db97SPaul Mackerras int set_endian(struct task_struct *tsk, unsigned int val)
977fab5db97SPaul Mackerras {
978fab5db97SPaul Mackerras 	struct pt_regs *regs = tsk->thread.regs;
979fab5db97SPaul Mackerras 
980fab5db97SPaul Mackerras 	if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
981fab5db97SPaul Mackerras 	    (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
982fab5db97SPaul Mackerras 		return -EINVAL;
983fab5db97SPaul Mackerras 
984fab5db97SPaul Mackerras 	if (regs == NULL)
985fab5db97SPaul Mackerras 		return -EINVAL;
986fab5db97SPaul Mackerras 
987fab5db97SPaul Mackerras 	if (val == PR_ENDIAN_BIG)
988fab5db97SPaul Mackerras 		regs->msr &= ~MSR_LE;
989fab5db97SPaul Mackerras 	else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
990fab5db97SPaul Mackerras 		regs->msr |= MSR_LE;
991fab5db97SPaul Mackerras 	else
992fab5db97SPaul Mackerras 		return -EINVAL;
993fab5db97SPaul Mackerras 
994fab5db97SPaul Mackerras 	return 0;
995fab5db97SPaul Mackerras }
996fab5db97SPaul Mackerras 
997fab5db97SPaul Mackerras int get_endian(struct task_struct *tsk, unsigned long adr)
998fab5db97SPaul Mackerras {
999fab5db97SPaul Mackerras 	struct pt_regs *regs = tsk->thread.regs;
1000fab5db97SPaul Mackerras 	unsigned int val;
1001fab5db97SPaul Mackerras 
1002fab5db97SPaul Mackerras 	if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1003fab5db97SPaul Mackerras 	    !cpu_has_feature(CPU_FTR_REAL_LE))
1004fab5db97SPaul Mackerras 		return -EINVAL;
1005fab5db97SPaul Mackerras 
1006fab5db97SPaul Mackerras 	if (regs == NULL)
1007fab5db97SPaul Mackerras 		return -EINVAL;
1008fab5db97SPaul Mackerras 
1009fab5db97SPaul Mackerras 	if (regs->msr & MSR_LE) {
1010fab5db97SPaul Mackerras 		if (cpu_has_feature(CPU_FTR_REAL_LE))
1011fab5db97SPaul Mackerras 			val = PR_ENDIAN_LITTLE;
1012fab5db97SPaul Mackerras 		else
1013fab5db97SPaul Mackerras 			val = PR_ENDIAN_PPC_LITTLE;
1014fab5db97SPaul Mackerras 	} else
1015fab5db97SPaul Mackerras 		val = PR_ENDIAN_BIG;
1016fab5db97SPaul Mackerras 
1017fab5db97SPaul Mackerras 	return put_user(val, (unsigned int __user *)adr);
1018fab5db97SPaul Mackerras }
1019fab5db97SPaul Mackerras 
1020e9370ae1SPaul Mackerras int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1021e9370ae1SPaul Mackerras {
1022e9370ae1SPaul Mackerras 	tsk->thread.align_ctl = val;
1023e9370ae1SPaul Mackerras 	return 0;
1024e9370ae1SPaul Mackerras }
1025e9370ae1SPaul Mackerras 
1026e9370ae1SPaul Mackerras int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1027e9370ae1SPaul Mackerras {
1028e9370ae1SPaul Mackerras 	return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1029e9370ae1SPaul Mackerras }
1030e9370ae1SPaul Mackerras 
103106d67d54SPaul Mackerras #define TRUNC_PTR(x)	((typeof(x))(((unsigned long)(x)) & 0xffffffff))
103206d67d54SPaul Mackerras 
103314cf11afSPaul Mackerras int sys_clone(unsigned long clone_flags, unsigned long usp,
103414cf11afSPaul Mackerras 	      int __user *parent_tidp, void __user *child_threadptr,
103514cf11afSPaul Mackerras 	      int __user *child_tidp, int p6,
103614cf11afSPaul Mackerras 	      struct pt_regs *regs)
103714cf11afSPaul Mackerras {
103814cf11afSPaul Mackerras 	CHECK_FULL_REGS(regs);
103914cf11afSPaul Mackerras 	if (usp == 0)
104014cf11afSPaul Mackerras 		usp = regs->gpr[1];	/* stack pointer for child */
104106d67d54SPaul Mackerras #ifdef CONFIG_PPC64
10429904b005SDenis Kirjanov 	if (is_32bit_task()) {
104306d67d54SPaul Mackerras 		parent_tidp = TRUNC_PTR(parent_tidp);
104406d67d54SPaul Mackerras 		child_tidp = TRUNC_PTR(child_tidp);
104506d67d54SPaul Mackerras 	}
104606d67d54SPaul Mackerras #endif
104714cf11afSPaul Mackerras  	return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
104814cf11afSPaul Mackerras }
104914cf11afSPaul Mackerras 
105014cf11afSPaul Mackerras int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
105114cf11afSPaul Mackerras 	     unsigned long p4, unsigned long p5, unsigned long p6,
105214cf11afSPaul Mackerras 	     struct pt_regs *regs)
105314cf11afSPaul Mackerras {
105414cf11afSPaul Mackerras 	CHECK_FULL_REGS(regs);
105514cf11afSPaul Mackerras 	return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
105614cf11afSPaul Mackerras }
105714cf11afSPaul Mackerras 
105814cf11afSPaul Mackerras int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
105914cf11afSPaul Mackerras 	      unsigned long p4, unsigned long p5, unsigned long p6,
106014cf11afSPaul Mackerras 	      struct pt_regs *regs)
106114cf11afSPaul Mackerras {
106214cf11afSPaul Mackerras 	CHECK_FULL_REGS(regs);
106314cf11afSPaul Mackerras 	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
106414cf11afSPaul Mackerras 			regs, 0, NULL, NULL);
106514cf11afSPaul Mackerras }
106614cf11afSPaul Mackerras 
106714cf11afSPaul Mackerras int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
106814cf11afSPaul Mackerras 	       unsigned long a3, unsigned long a4, unsigned long a5,
106914cf11afSPaul Mackerras 	       struct pt_regs *regs)
107014cf11afSPaul Mackerras {
107114cf11afSPaul Mackerras 	int error;
107214cf11afSPaul Mackerras 	char *filename;
107314cf11afSPaul Mackerras 
1074c7887325SDavid Howells 	filename = getname((const char __user *) a0);
107514cf11afSPaul Mackerras 	error = PTR_ERR(filename);
107614cf11afSPaul Mackerras 	if (IS_ERR(filename))
107714cf11afSPaul Mackerras 		goto out;
107814cf11afSPaul Mackerras 	flush_fp_to_thread(current);
107914cf11afSPaul Mackerras 	flush_altivec_to_thread(current);
108014cf11afSPaul Mackerras 	flush_spe_to_thread(current);
1081d7627467SDavid Howells 	error = do_execve(filename,
1082d7627467SDavid Howells 			  (const char __user *const __user *) a1,
1083d7627467SDavid Howells 			  (const char __user *const __user *) a2, regs);
108414cf11afSPaul Mackerras 	putname(filename);
108514cf11afSPaul Mackerras out:
108614cf11afSPaul Mackerras 	return error;
108714cf11afSPaul Mackerras }
108814cf11afSPaul Mackerras 
1089bb72c481SPaul Mackerras static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1090bb72c481SPaul Mackerras 				  unsigned long nbytes)
1091bb72c481SPaul Mackerras {
1092bb72c481SPaul Mackerras 	unsigned long stack_page;
1093bb72c481SPaul Mackerras 	unsigned long cpu = task_cpu(p);
1094bb72c481SPaul Mackerras 
1095bb72c481SPaul Mackerras 	/*
1096bb72c481SPaul Mackerras 	 * Avoid crashing if the stack has overflowed and corrupted
1097bb72c481SPaul Mackerras 	 * task_cpu(p), which is in the thread_info struct.
1098bb72c481SPaul Mackerras 	 */
1099bb72c481SPaul Mackerras 	if (cpu < NR_CPUS && cpu_possible(cpu)) {
1100bb72c481SPaul Mackerras 		stack_page = (unsigned long) hardirq_ctx[cpu];
1101bb72c481SPaul Mackerras 		if (sp >= stack_page + sizeof(struct thread_struct)
1102bb72c481SPaul Mackerras 		    && sp <= stack_page + THREAD_SIZE - nbytes)
1103bb72c481SPaul Mackerras 			return 1;
1104bb72c481SPaul Mackerras 
1105bb72c481SPaul Mackerras 		stack_page = (unsigned long) softirq_ctx[cpu];
1106bb72c481SPaul Mackerras 		if (sp >= stack_page + sizeof(struct thread_struct)
1107bb72c481SPaul Mackerras 		    && sp <= stack_page + THREAD_SIZE - nbytes)
1108bb72c481SPaul Mackerras 			return 1;
1109bb72c481SPaul Mackerras 	}
1110bb72c481SPaul Mackerras 	return 0;
1111bb72c481SPaul Mackerras }
1112bb72c481SPaul Mackerras 
11132f25194dSAnton Blanchard int validate_sp(unsigned long sp, struct task_struct *p,
111414cf11afSPaul Mackerras 		       unsigned long nbytes)
111514cf11afSPaul Mackerras {
11160cec6fd1SAl Viro 	unsigned long stack_page = (unsigned long)task_stack_page(p);
111714cf11afSPaul Mackerras 
111814cf11afSPaul Mackerras 	if (sp >= stack_page + sizeof(struct thread_struct)
111914cf11afSPaul Mackerras 	    && sp <= stack_page + THREAD_SIZE - nbytes)
112014cf11afSPaul Mackerras 		return 1;
112114cf11afSPaul Mackerras 
1122bb72c481SPaul Mackerras 	return valid_irq_stack(sp, p, nbytes);
112314cf11afSPaul Mackerras }
112414cf11afSPaul Mackerras 
11252f25194dSAnton Blanchard EXPORT_SYMBOL(validate_sp);
11262f25194dSAnton Blanchard 
112706d67d54SPaul Mackerras unsigned long get_wchan(struct task_struct *p)
112806d67d54SPaul Mackerras {
112906d67d54SPaul Mackerras 	unsigned long ip, sp;
113006d67d54SPaul Mackerras 	int count = 0;
113106d67d54SPaul Mackerras 
113206d67d54SPaul Mackerras 	if (!p || p == current || p->state == TASK_RUNNING)
113306d67d54SPaul Mackerras 		return 0;
113406d67d54SPaul Mackerras 
113506d67d54SPaul Mackerras 	sp = p->thread.ksp;
1136ec2b36b9SBenjamin Herrenschmidt 	if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
113706d67d54SPaul Mackerras 		return 0;
113806d67d54SPaul Mackerras 
113906d67d54SPaul Mackerras 	do {
114006d67d54SPaul Mackerras 		sp = *(unsigned long *)sp;
1141ec2b36b9SBenjamin Herrenschmidt 		if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
114206d67d54SPaul Mackerras 			return 0;
114306d67d54SPaul Mackerras 		if (count > 0) {
1144ec2b36b9SBenjamin Herrenschmidt 			ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
114506d67d54SPaul Mackerras 			if (!in_sched_functions(ip))
114606d67d54SPaul Mackerras 				return ip;
114706d67d54SPaul Mackerras 		}
114806d67d54SPaul Mackerras 	} while (count++ < 16);
114906d67d54SPaul Mackerras 	return 0;
115006d67d54SPaul Mackerras }
115106d67d54SPaul Mackerras 
1152c4d04be1SJohannes Berg static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
115314cf11afSPaul Mackerras 
115414cf11afSPaul Mackerras void show_stack(struct task_struct *tsk, unsigned long *stack)
115514cf11afSPaul Mackerras {
115606d67d54SPaul Mackerras 	unsigned long sp, ip, lr, newsp;
115714cf11afSPaul Mackerras 	int count = 0;
115806d67d54SPaul Mackerras 	int firstframe = 1;
11596794c782SSteven Rostedt #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11606794c782SSteven Rostedt 	int curr_frame = current->curr_ret_stack;
11616794c782SSteven Rostedt 	extern void return_to_handler(void);
11629135c3ccSSteven Rostedt 	unsigned long rth = (unsigned long)return_to_handler;
11639135c3ccSSteven Rostedt 	unsigned long mrth = -1;
11646794c782SSteven Rostedt #ifdef CONFIG_PPC64
11659135c3ccSSteven Rostedt 	extern void mod_return_to_handler(void);
11669135c3ccSSteven Rostedt 	rth = *(unsigned long *)rth;
11679135c3ccSSteven Rostedt 	mrth = (unsigned long)mod_return_to_handler;
11689135c3ccSSteven Rostedt 	mrth = *(unsigned long *)mrth;
11696794c782SSteven Rostedt #endif
11706794c782SSteven Rostedt #endif
117114cf11afSPaul Mackerras 
117214cf11afSPaul Mackerras 	sp = (unsigned long) stack;
117314cf11afSPaul Mackerras 	if (tsk == NULL)
117414cf11afSPaul Mackerras 		tsk = current;
117514cf11afSPaul Mackerras 	if (sp == 0) {
117614cf11afSPaul Mackerras 		if (tsk == current)
117714cf11afSPaul Mackerras 			asm("mr %0,1" : "=r" (sp));
117814cf11afSPaul Mackerras 		else
117914cf11afSPaul Mackerras 			sp = tsk->thread.ksp;
118014cf11afSPaul Mackerras 	}
118114cf11afSPaul Mackerras 
118206d67d54SPaul Mackerras 	lr = 0;
118306d67d54SPaul Mackerras 	printk("Call Trace:\n");
118414cf11afSPaul Mackerras 	do {
1185ec2b36b9SBenjamin Herrenschmidt 		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
118606d67d54SPaul Mackerras 			return;
118706d67d54SPaul Mackerras 
118806d67d54SPaul Mackerras 		stack = (unsigned long *) sp;
118906d67d54SPaul Mackerras 		newsp = stack[0];
1190ec2b36b9SBenjamin Herrenschmidt 		ip = stack[STACK_FRAME_LR_SAVE];
119106d67d54SPaul Mackerras 		if (!firstframe || ip != lr) {
1192058c78f4SBenjamin Herrenschmidt 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
11936794c782SSteven Rostedt #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11949135c3ccSSteven Rostedt 			if ((ip == rth || ip == mrth) && curr_frame >= 0) {
11956794c782SSteven Rostedt 				printk(" (%pS)",
11966794c782SSteven Rostedt 				       (void *)current->ret_stack[curr_frame].ret);
11976794c782SSteven Rostedt 				curr_frame--;
11986794c782SSteven Rostedt 			}
11996794c782SSteven Rostedt #endif
120006d67d54SPaul Mackerras 			if (firstframe)
120106d67d54SPaul Mackerras 				printk(" (unreliable)");
120206d67d54SPaul Mackerras 			printk("\n");
120314cf11afSPaul Mackerras 		}
120406d67d54SPaul Mackerras 		firstframe = 0;
120506d67d54SPaul Mackerras 
120606d67d54SPaul Mackerras 		/*
120706d67d54SPaul Mackerras 		 * See if this is an exception frame.
120806d67d54SPaul Mackerras 		 * We look for the "regshere" marker in the current frame.
120906d67d54SPaul Mackerras 		 */
1210ec2b36b9SBenjamin Herrenschmidt 		if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1211ec2b36b9SBenjamin Herrenschmidt 		    && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
121206d67d54SPaul Mackerras 			struct pt_regs *regs = (struct pt_regs *)
121306d67d54SPaul Mackerras 				(sp + STACK_FRAME_OVERHEAD);
121406d67d54SPaul Mackerras 			lr = regs->link;
1215058c78f4SBenjamin Herrenschmidt 			printk("--- Exception: %lx at %pS\n    LR = %pS\n",
1216058c78f4SBenjamin Herrenschmidt 			       regs->trap, (void *)regs->nip, (void *)lr);
121706d67d54SPaul Mackerras 			firstframe = 1;
121814cf11afSPaul Mackerras 		}
121906d67d54SPaul Mackerras 
122006d67d54SPaul Mackerras 		sp = newsp;
122106d67d54SPaul Mackerras 	} while (count++ < kstack_depth_to_print);
122206d67d54SPaul Mackerras }
122306d67d54SPaul Mackerras 
122406d67d54SPaul Mackerras void dump_stack(void)
122506d67d54SPaul Mackerras {
122606d67d54SPaul Mackerras 	show_stack(current, NULL);
122706d67d54SPaul Mackerras }
122806d67d54SPaul Mackerras EXPORT_SYMBOL(dump_stack);
1229cb2c9b27SAnton Blanchard 
1230cb2c9b27SAnton Blanchard #ifdef CONFIG_PPC64
1231fe1952fcSBenjamin Herrenschmidt /* Called with hard IRQs off */
1232fe1952fcSBenjamin Herrenschmidt void __ppc64_runlatch_on(void)
1233cb2c9b27SAnton Blanchard {
1234fe1952fcSBenjamin Herrenschmidt 	struct thread_info *ti = current_thread_info();
1235cb2c9b27SAnton Blanchard 	unsigned long ctrl;
1236cb2c9b27SAnton Blanchard 
1237cb2c9b27SAnton Blanchard 	ctrl = mfspr(SPRN_CTRLF);
1238cb2c9b27SAnton Blanchard 	ctrl |= CTRL_RUNLATCH;
1239cb2c9b27SAnton Blanchard 	mtspr(SPRN_CTRLT, ctrl);
1240cb2c9b27SAnton Blanchard 
1241fae2e0fbSBenjamin Herrenschmidt 	ti->local_flags |= _TLF_RUNLATCH;
1242cb2c9b27SAnton Blanchard }
1243cb2c9b27SAnton Blanchard 
1244fe1952fcSBenjamin Herrenschmidt /* Called with hard IRQs off */
12454138d653SAnton Blanchard void __ppc64_runlatch_off(void)
1246cb2c9b27SAnton Blanchard {
1247fe1952fcSBenjamin Herrenschmidt 	struct thread_info *ti = current_thread_info();
1248cb2c9b27SAnton Blanchard 	unsigned long ctrl;
1249cb2c9b27SAnton Blanchard 
1250fae2e0fbSBenjamin Herrenschmidt 	ti->local_flags &= ~_TLF_RUNLATCH;
1251cb2c9b27SAnton Blanchard 
1252cb2c9b27SAnton Blanchard 	ctrl = mfspr(SPRN_CTRLF);
1253cb2c9b27SAnton Blanchard 	ctrl &= ~CTRL_RUNLATCH;
1254cb2c9b27SAnton Blanchard 	mtspr(SPRN_CTRLT, ctrl);
1255cb2c9b27SAnton Blanchard }
1256fe1952fcSBenjamin Herrenschmidt #endif /* CONFIG_PPC64 */
1257f6a61680SBenjamin Herrenschmidt 
1258d839088cSAnton Blanchard unsigned long arch_align_stack(unsigned long sp)
1259d839088cSAnton Blanchard {
1260d839088cSAnton Blanchard 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1261d839088cSAnton Blanchard 		sp -= get_random_int() & ~PAGE_MASK;
1262d839088cSAnton Blanchard 	return sp & ~0xf;
1263d839088cSAnton Blanchard }
1264912f9ee2SAnton Blanchard 
1265912f9ee2SAnton Blanchard static inline unsigned long brk_rnd(void)
1266912f9ee2SAnton Blanchard {
1267912f9ee2SAnton Blanchard         unsigned long rnd = 0;
1268912f9ee2SAnton Blanchard 
1269912f9ee2SAnton Blanchard 	/* 8MB for 32bit, 1GB for 64bit */
1270912f9ee2SAnton Blanchard 	if (is_32bit_task())
1271912f9ee2SAnton Blanchard 		rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1272912f9ee2SAnton Blanchard 	else
1273912f9ee2SAnton Blanchard 		rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1274912f9ee2SAnton Blanchard 
1275912f9ee2SAnton Blanchard 	return rnd << PAGE_SHIFT;
1276912f9ee2SAnton Blanchard }
1277912f9ee2SAnton Blanchard 
1278912f9ee2SAnton Blanchard unsigned long arch_randomize_brk(struct mm_struct *mm)
1279912f9ee2SAnton Blanchard {
12808bbde7a7SAnton Blanchard 	unsigned long base = mm->brk;
12818bbde7a7SAnton Blanchard 	unsigned long ret;
12828bbde7a7SAnton Blanchard 
1283ce7a35c7SKumar Gala #ifdef CONFIG_PPC_STD_MMU_64
12848bbde7a7SAnton Blanchard 	/*
12858bbde7a7SAnton Blanchard 	 * If we are using 1TB segments and we are allowed to randomise
12868bbde7a7SAnton Blanchard 	 * the heap, we can put it above 1TB so it is backed by a 1TB
12878bbde7a7SAnton Blanchard 	 * segment. Otherwise the heap will be in the bottom 1TB
12888bbde7a7SAnton Blanchard 	 * which always uses 256MB segments and this may result in a
12898bbde7a7SAnton Blanchard 	 * performance penalty.
12908bbde7a7SAnton Blanchard 	 */
12918bbde7a7SAnton Blanchard 	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
12928bbde7a7SAnton Blanchard 		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
12938bbde7a7SAnton Blanchard #endif
12948bbde7a7SAnton Blanchard 
12958bbde7a7SAnton Blanchard 	ret = PAGE_ALIGN(base + brk_rnd());
1296912f9ee2SAnton Blanchard 
1297912f9ee2SAnton Blanchard 	if (ret < mm->brk)
1298912f9ee2SAnton Blanchard 		return mm->brk;
1299912f9ee2SAnton Blanchard 
1300912f9ee2SAnton Blanchard 	return ret;
1301912f9ee2SAnton Blanchard }
1302501cb16dSAnton Blanchard 
1303501cb16dSAnton Blanchard unsigned long randomize_et_dyn(unsigned long base)
1304501cb16dSAnton Blanchard {
1305501cb16dSAnton Blanchard 	unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1306501cb16dSAnton Blanchard 
1307501cb16dSAnton Blanchard 	if (ret < base)
1308501cb16dSAnton Blanchard 		return base;
1309501cb16dSAnton Blanchard 
1310501cb16dSAnton Blanchard 	return ret;
1311501cb16dSAnton Blanchard }
1312