xref: /linux/arch/powerpc/kernel/process.c (revision 18461960cbf50bf345ef0667d45d5f64de8fb893)
114cf11afSPaul Mackerras /*
214cf11afSPaul Mackerras  *  Derived from "arch/i386/kernel/process.c"
314cf11afSPaul Mackerras  *    Copyright (C) 1995  Linus Torvalds
414cf11afSPaul Mackerras  *
514cf11afSPaul Mackerras  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
614cf11afSPaul Mackerras  *  Paul Mackerras (paulus@cs.anu.edu.au)
714cf11afSPaul Mackerras  *
814cf11afSPaul Mackerras  *  PowerPC version
914cf11afSPaul Mackerras  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
1014cf11afSPaul Mackerras  *
1114cf11afSPaul Mackerras  *  This program is free software; you can redistribute it and/or
1214cf11afSPaul Mackerras  *  modify it under the terms of the GNU General Public License
1314cf11afSPaul Mackerras  *  as published by the Free Software Foundation; either version
1414cf11afSPaul Mackerras  *  2 of the License, or (at your option) any later version.
1514cf11afSPaul Mackerras  */
1614cf11afSPaul Mackerras 
1714cf11afSPaul Mackerras #include <linux/errno.h>
1814cf11afSPaul Mackerras #include <linux/sched.h>
1914cf11afSPaul Mackerras #include <linux/kernel.h>
2014cf11afSPaul Mackerras #include <linux/mm.h>
2114cf11afSPaul Mackerras #include <linux/smp.h>
2214cf11afSPaul Mackerras #include <linux/stddef.h>
2314cf11afSPaul Mackerras #include <linux/unistd.h>
2414cf11afSPaul Mackerras #include <linux/ptrace.h>
2514cf11afSPaul Mackerras #include <linux/slab.h>
2614cf11afSPaul Mackerras #include <linux/user.h>
2714cf11afSPaul Mackerras #include <linux/elf.h>
2814cf11afSPaul Mackerras #include <linux/init.h>
2914cf11afSPaul Mackerras #include <linux/prctl.h>
3014cf11afSPaul Mackerras #include <linux/init_task.h>
314b16f8e2SPaul Gortmaker #include <linux/export.h>
3214cf11afSPaul Mackerras #include <linux/kallsyms.h>
3314cf11afSPaul Mackerras #include <linux/mqueue.h>
3414cf11afSPaul Mackerras #include <linux/hardirq.h>
3506d67d54SPaul Mackerras #include <linux/utsname.h>
366794c782SSteven Rostedt #include <linux/ftrace.h>
3779741dd3SMartin Schwidefsky #include <linux/kernel_stat.h>
38d839088cSAnton Blanchard #include <linux/personality.h>
39d839088cSAnton Blanchard #include <linux/random.h>
405aae8a53SK.Prasad #include <linux/hw_breakpoint.h>
4114cf11afSPaul Mackerras 
4214cf11afSPaul Mackerras #include <asm/pgtable.h>
4314cf11afSPaul Mackerras #include <asm/uaccess.h>
4414cf11afSPaul Mackerras #include <asm/io.h>
4514cf11afSPaul Mackerras #include <asm/processor.h>
4614cf11afSPaul Mackerras #include <asm/mmu.h>
4714cf11afSPaul Mackerras #include <asm/prom.h>
4876032de8SMichael Ellerman #include <asm/machdep.h>
49c6622f63SPaul Mackerras #include <asm/time.h>
50ae3a197eSDavid Howells #include <asm/runlatch.h>
51a7f31841SArnd Bergmann #include <asm/syscalls.h>
52ae3a197eSDavid Howells #include <asm/switch_to.h>
53fb09692eSMichael Neuling #include <asm/tm.h>
54ae3a197eSDavid Howells #include <asm/debug.h>
5506d67d54SPaul Mackerras #ifdef CONFIG_PPC64
5606d67d54SPaul Mackerras #include <asm/firmware.h>
5706d67d54SPaul Mackerras #endif
58d6a61bfcSLuis Machado #include <linux/kprobes.h>
59d6a61bfcSLuis Machado #include <linux/kdebug.h>
6014cf11afSPaul Mackerras 
618b3c34cfSMichael Neuling /* Transactional Memory debug */
628b3c34cfSMichael Neuling #ifdef TM_DEBUG_SW
638b3c34cfSMichael Neuling #define TM_DEBUG(x...) printk(KERN_INFO x)
648b3c34cfSMichael Neuling #else
658b3c34cfSMichael Neuling #define TM_DEBUG(x...) do { } while(0)
668b3c34cfSMichael Neuling #endif
678b3c34cfSMichael Neuling 
6814cf11afSPaul Mackerras extern unsigned long _get_SP(void);
6914cf11afSPaul Mackerras 
7014cf11afSPaul Mackerras #ifndef CONFIG_SMP
7114cf11afSPaul Mackerras struct task_struct *last_task_used_math = NULL;
7214cf11afSPaul Mackerras struct task_struct *last_task_used_altivec = NULL;
73ce48b210SMichael Neuling struct task_struct *last_task_used_vsx = NULL;
7414cf11afSPaul Mackerras struct task_struct *last_task_used_spe = NULL;
7514cf11afSPaul Mackerras #endif
7614cf11afSPaul Mackerras 
77037f0eedSKevin Hao #ifdef CONFIG_PPC_FPU
7814cf11afSPaul Mackerras /*
7914cf11afSPaul Mackerras  * Make sure the floating-point register state in the
8014cf11afSPaul Mackerras  * the thread_struct is up to date for task tsk.
8114cf11afSPaul Mackerras  */
8214cf11afSPaul Mackerras void flush_fp_to_thread(struct task_struct *tsk)
8314cf11afSPaul Mackerras {
8414cf11afSPaul Mackerras 	if (tsk->thread.regs) {
8514cf11afSPaul Mackerras 		/*
8614cf11afSPaul Mackerras 		 * We need to disable preemption here because if we didn't,
8714cf11afSPaul Mackerras 		 * another process could get scheduled after the regs->msr
8814cf11afSPaul Mackerras 		 * test but before we have finished saving the FP registers
8914cf11afSPaul Mackerras 		 * to the thread_struct.  That process could take over the
9014cf11afSPaul Mackerras 		 * FPU, and then when we get scheduled again we would store
9114cf11afSPaul Mackerras 		 * bogus values for the remaining FP registers.
9214cf11afSPaul Mackerras 		 */
9314cf11afSPaul Mackerras 		preempt_disable();
9414cf11afSPaul Mackerras 		if (tsk->thread.regs->msr & MSR_FP) {
9514cf11afSPaul Mackerras #ifdef CONFIG_SMP
9614cf11afSPaul Mackerras 			/*
9714cf11afSPaul Mackerras 			 * This should only ever be called for current or
9814cf11afSPaul Mackerras 			 * for a stopped child process.  Since we save away
9914cf11afSPaul Mackerras 			 * the FP register state on context switch on SMP,
10014cf11afSPaul Mackerras 			 * there is something wrong if a stopped child appears
10114cf11afSPaul Mackerras 			 * to still have its FP state in the CPU registers.
10214cf11afSPaul Mackerras 			 */
10314cf11afSPaul Mackerras 			BUG_ON(tsk != current);
10414cf11afSPaul Mackerras #endif
1050ee6c15eSKumar Gala 			giveup_fpu(tsk);
10614cf11afSPaul Mackerras 		}
10714cf11afSPaul Mackerras 		preempt_enable();
10814cf11afSPaul Mackerras 	}
10914cf11afSPaul Mackerras }
110de56a948SPaul Mackerras EXPORT_SYMBOL_GPL(flush_fp_to_thread);
111037f0eedSKevin Hao #endif
11214cf11afSPaul Mackerras 
11314cf11afSPaul Mackerras void enable_kernel_fp(void)
11414cf11afSPaul Mackerras {
11514cf11afSPaul Mackerras 	WARN_ON(preemptible());
11614cf11afSPaul Mackerras 
11714cf11afSPaul Mackerras #ifdef CONFIG_SMP
11814cf11afSPaul Mackerras 	if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
11914cf11afSPaul Mackerras 		giveup_fpu(current);
12014cf11afSPaul Mackerras 	else
12114cf11afSPaul Mackerras 		giveup_fpu(NULL);	/* just enables FP for kernel */
12214cf11afSPaul Mackerras #else
12314cf11afSPaul Mackerras 	giveup_fpu(last_task_used_math);
12414cf11afSPaul Mackerras #endif /* CONFIG_SMP */
12514cf11afSPaul Mackerras }
12614cf11afSPaul Mackerras EXPORT_SYMBOL(enable_kernel_fp);
12714cf11afSPaul Mackerras 
12814cf11afSPaul Mackerras #ifdef CONFIG_ALTIVEC
12914cf11afSPaul Mackerras void enable_kernel_altivec(void)
13014cf11afSPaul Mackerras {
13114cf11afSPaul Mackerras 	WARN_ON(preemptible());
13214cf11afSPaul Mackerras 
13314cf11afSPaul Mackerras #ifdef CONFIG_SMP
13414cf11afSPaul Mackerras 	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
13514cf11afSPaul Mackerras 		giveup_altivec(current);
13614cf11afSPaul Mackerras 	else
13735000870SAnton Blanchard 		giveup_altivec_notask();
13814cf11afSPaul Mackerras #else
13914cf11afSPaul Mackerras 	giveup_altivec(last_task_used_altivec);
14014cf11afSPaul Mackerras #endif /* CONFIG_SMP */
14114cf11afSPaul Mackerras }
14214cf11afSPaul Mackerras EXPORT_SYMBOL(enable_kernel_altivec);
14314cf11afSPaul Mackerras 
14414cf11afSPaul Mackerras /*
14514cf11afSPaul Mackerras  * Make sure the VMX/Altivec register state in the
14614cf11afSPaul Mackerras  * the thread_struct is up to date for task tsk.
14714cf11afSPaul Mackerras  */
14814cf11afSPaul Mackerras void flush_altivec_to_thread(struct task_struct *tsk)
14914cf11afSPaul Mackerras {
15014cf11afSPaul Mackerras 	if (tsk->thread.regs) {
15114cf11afSPaul Mackerras 		preempt_disable();
15214cf11afSPaul Mackerras 		if (tsk->thread.regs->msr & MSR_VEC) {
15314cf11afSPaul Mackerras #ifdef CONFIG_SMP
15414cf11afSPaul Mackerras 			BUG_ON(tsk != current);
15514cf11afSPaul Mackerras #endif
1560ee6c15eSKumar Gala 			giveup_altivec(tsk);
15714cf11afSPaul Mackerras 		}
15814cf11afSPaul Mackerras 		preempt_enable();
15914cf11afSPaul Mackerras 	}
16014cf11afSPaul Mackerras }
161de56a948SPaul Mackerras EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
16214cf11afSPaul Mackerras #endif /* CONFIG_ALTIVEC */
16314cf11afSPaul Mackerras 
164ce48b210SMichael Neuling #ifdef CONFIG_VSX
165ce48b210SMichael Neuling #if 0
166ce48b210SMichael Neuling /* not currently used, but some crazy RAID module might want to later */
167ce48b210SMichael Neuling void enable_kernel_vsx(void)
168ce48b210SMichael Neuling {
169ce48b210SMichael Neuling 	WARN_ON(preemptible());
170ce48b210SMichael Neuling 
171ce48b210SMichael Neuling #ifdef CONFIG_SMP
172ce48b210SMichael Neuling 	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
173ce48b210SMichael Neuling 		giveup_vsx(current);
174ce48b210SMichael Neuling 	else
175ce48b210SMichael Neuling 		giveup_vsx(NULL);	/* just enable vsx for kernel - force */
176ce48b210SMichael Neuling #else
177ce48b210SMichael Neuling 	giveup_vsx(last_task_used_vsx);
178ce48b210SMichael Neuling #endif /* CONFIG_SMP */
179ce48b210SMichael Neuling }
180ce48b210SMichael Neuling EXPORT_SYMBOL(enable_kernel_vsx);
181ce48b210SMichael Neuling #endif
182ce48b210SMichael Neuling 
1837c292170SMichael Neuling void giveup_vsx(struct task_struct *tsk)
1847c292170SMichael Neuling {
1857c292170SMichael Neuling 	giveup_fpu(tsk);
1867c292170SMichael Neuling 	giveup_altivec(tsk);
1877c292170SMichael Neuling 	__giveup_vsx(tsk);
1887c292170SMichael Neuling }
1897c292170SMichael Neuling 
190ce48b210SMichael Neuling void flush_vsx_to_thread(struct task_struct *tsk)
191ce48b210SMichael Neuling {
192ce48b210SMichael Neuling 	if (tsk->thread.regs) {
193ce48b210SMichael Neuling 		preempt_disable();
194ce48b210SMichael Neuling 		if (tsk->thread.regs->msr & MSR_VSX) {
195ce48b210SMichael Neuling #ifdef CONFIG_SMP
196ce48b210SMichael Neuling 			BUG_ON(tsk != current);
197ce48b210SMichael Neuling #endif
198ce48b210SMichael Neuling 			giveup_vsx(tsk);
199ce48b210SMichael Neuling 		}
200ce48b210SMichael Neuling 		preempt_enable();
201ce48b210SMichael Neuling 	}
202ce48b210SMichael Neuling }
203de56a948SPaul Mackerras EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
204ce48b210SMichael Neuling #endif /* CONFIG_VSX */
205ce48b210SMichael Neuling 
20614cf11afSPaul Mackerras #ifdef CONFIG_SPE
20714cf11afSPaul Mackerras 
20814cf11afSPaul Mackerras void enable_kernel_spe(void)
20914cf11afSPaul Mackerras {
21014cf11afSPaul Mackerras 	WARN_ON(preemptible());
21114cf11afSPaul Mackerras 
21214cf11afSPaul Mackerras #ifdef CONFIG_SMP
21314cf11afSPaul Mackerras 	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
21414cf11afSPaul Mackerras 		giveup_spe(current);
21514cf11afSPaul Mackerras 	else
21614cf11afSPaul Mackerras 		giveup_spe(NULL);	/* just enable SPE for kernel - force */
21714cf11afSPaul Mackerras #else
21814cf11afSPaul Mackerras 	giveup_spe(last_task_used_spe);
21914cf11afSPaul Mackerras #endif /* __SMP __ */
22014cf11afSPaul Mackerras }
22114cf11afSPaul Mackerras EXPORT_SYMBOL(enable_kernel_spe);
22214cf11afSPaul Mackerras 
22314cf11afSPaul Mackerras void flush_spe_to_thread(struct task_struct *tsk)
22414cf11afSPaul Mackerras {
22514cf11afSPaul Mackerras 	if (tsk->thread.regs) {
22614cf11afSPaul Mackerras 		preempt_disable();
22714cf11afSPaul Mackerras 		if (tsk->thread.regs->msr & MSR_SPE) {
22814cf11afSPaul Mackerras #ifdef CONFIG_SMP
22914cf11afSPaul Mackerras 			BUG_ON(tsk != current);
23014cf11afSPaul Mackerras #endif
231685659eeSyu liu 			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
2320ee6c15eSKumar Gala 			giveup_spe(tsk);
23314cf11afSPaul Mackerras 		}
23414cf11afSPaul Mackerras 		preempt_enable();
23514cf11afSPaul Mackerras 	}
23614cf11afSPaul Mackerras }
23714cf11afSPaul Mackerras #endif /* CONFIG_SPE */
23814cf11afSPaul Mackerras 
2395388fb10SPaul Mackerras #ifndef CONFIG_SMP
24048abec07SPaul Mackerras /*
24148abec07SPaul Mackerras  * If we are doing lazy switching of CPU state (FP, altivec or SPE),
24248abec07SPaul Mackerras  * and the current task has some state, discard it.
24348abec07SPaul Mackerras  */
2445388fb10SPaul Mackerras void discard_lazy_cpu_state(void)
24548abec07SPaul Mackerras {
24648abec07SPaul Mackerras 	preempt_disable();
24748abec07SPaul Mackerras 	if (last_task_used_math == current)
24848abec07SPaul Mackerras 		last_task_used_math = NULL;
24948abec07SPaul Mackerras #ifdef CONFIG_ALTIVEC
25048abec07SPaul Mackerras 	if (last_task_used_altivec == current)
25148abec07SPaul Mackerras 		last_task_used_altivec = NULL;
25248abec07SPaul Mackerras #endif /* CONFIG_ALTIVEC */
253ce48b210SMichael Neuling #ifdef CONFIG_VSX
254ce48b210SMichael Neuling 	if (last_task_used_vsx == current)
255ce48b210SMichael Neuling 		last_task_used_vsx = NULL;
256ce48b210SMichael Neuling #endif /* CONFIG_VSX */
25748abec07SPaul Mackerras #ifdef CONFIG_SPE
25848abec07SPaul Mackerras 	if (last_task_used_spe == current)
25948abec07SPaul Mackerras 		last_task_used_spe = NULL;
26048abec07SPaul Mackerras #endif
26148abec07SPaul Mackerras 	preempt_enable();
26248abec07SPaul Mackerras }
2635388fb10SPaul Mackerras #endif /* CONFIG_SMP */
26448abec07SPaul Mackerras 
2653bffb652SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2663bffb652SDave Kleikamp void do_send_trap(struct pt_regs *regs, unsigned long address,
2673bffb652SDave Kleikamp 		  unsigned long error_code, int signal_code, int breakpt)
2683bffb652SDave Kleikamp {
2693bffb652SDave Kleikamp 	siginfo_t info;
2703bffb652SDave Kleikamp 
27141ab5266SAnanth N Mavinakayanahalli 	current->thread.trap_nr = signal_code;
2723bffb652SDave Kleikamp 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
2733bffb652SDave Kleikamp 			11, SIGSEGV) == NOTIFY_STOP)
2743bffb652SDave Kleikamp 		return;
2753bffb652SDave Kleikamp 
2763bffb652SDave Kleikamp 	/* Deliver the signal to userspace */
2773bffb652SDave Kleikamp 	info.si_signo = SIGTRAP;
2783bffb652SDave Kleikamp 	info.si_errno = breakpt;	/* breakpoint or watchpoint id */
2793bffb652SDave Kleikamp 	info.si_code = signal_code;
2803bffb652SDave Kleikamp 	info.si_addr = (void __user *)address;
2813bffb652SDave Kleikamp 	force_sig_info(SIGTRAP, &info, current);
2823bffb652SDave Kleikamp }
2833bffb652SDave Kleikamp #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
2849422de3eSMichael Neuling void do_break (struct pt_regs *regs, unsigned long address,
285d6a61bfcSLuis Machado 		    unsigned long error_code)
286d6a61bfcSLuis Machado {
287d6a61bfcSLuis Machado 	siginfo_t info;
288d6a61bfcSLuis Machado 
28941ab5266SAnanth N Mavinakayanahalli 	current->thread.trap_nr = TRAP_HWBKPT;
290d6a61bfcSLuis Machado 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
291d6a61bfcSLuis Machado 			11, SIGSEGV) == NOTIFY_STOP)
292d6a61bfcSLuis Machado 		return;
293d6a61bfcSLuis Machado 
2949422de3eSMichael Neuling 	if (debugger_break_match(regs))
295d6a61bfcSLuis Machado 		return;
296d6a61bfcSLuis Machado 
2979422de3eSMichael Neuling 	/* Clear the breakpoint */
2989422de3eSMichael Neuling 	hw_breakpoint_disable();
299d6a61bfcSLuis Machado 
300d6a61bfcSLuis Machado 	/* Deliver the signal to userspace */
301d6a61bfcSLuis Machado 	info.si_signo = SIGTRAP;
302d6a61bfcSLuis Machado 	info.si_errno = 0;
303d6a61bfcSLuis Machado 	info.si_code = TRAP_HWBKPT;
304d6a61bfcSLuis Machado 	info.si_addr = (void __user *)address;
305d6a61bfcSLuis Machado 	force_sig_info(SIGTRAP, &info, current);
306d6a61bfcSLuis Machado }
3073bffb652SDave Kleikamp #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
308d6a61bfcSLuis Machado 
3099422de3eSMichael Neuling static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
310a2ceff5eSMichael Ellerman 
3113bffb652SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3123bffb652SDave Kleikamp /*
3133bffb652SDave Kleikamp  * Set the debug registers back to their default "safe" values.
3143bffb652SDave Kleikamp  */
3153bffb652SDave Kleikamp static void set_debug_reg_defaults(struct thread_struct *thread)
3163bffb652SDave Kleikamp {
3173bffb652SDave Kleikamp 	thread->iac1 = thread->iac2 = 0;
3183bffb652SDave Kleikamp #if CONFIG_PPC_ADV_DEBUG_IACS > 2
3193bffb652SDave Kleikamp 	thread->iac3 = thread->iac4 = 0;
3203bffb652SDave Kleikamp #endif
3213bffb652SDave Kleikamp 	thread->dac1 = thread->dac2 = 0;
3223bffb652SDave Kleikamp #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
3233bffb652SDave Kleikamp 	thread->dvc1 = thread->dvc2 = 0;
3243bffb652SDave Kleikamp #endif
3253bffb652SDave Kleikamp 	thread->dbcr0 = 0;
3263bffb652SDave Kleikamp #ifdef CONFIG_BOOKE
3273bffb652SDave Kleikamp 	/*
3283bffb652SDave Kleikamp 	 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
3293bffb652SDave Kleikamp 	 */
3303bffb652SDave Kleikamp 	thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |	\
3313bffb652SDave Kleikamp 			DBCR1_IAC3US | DBCR1_IAC4US;
3323bffb652SDave Kleikamp 	/*
3333bffb652SDave Kleikamp 	 * Force Data Address Compare User/Supervisor bits to be User-only
3343bffb652SDave Kleikamp 	 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
3353bffb652SDave Kleikamp 	 */
3363bffb652SDave Kleikamp 	thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
3373bffb652SDave Kleikamp #else
3383bffb652SDave Kleikamp 	thread->dbcr1 = 0;
3393bffb652SDave Kleikamp #endif
3403bffb652SDave Kleikamp }
3413bffb652SDave Kleikamp 
3423bffb652SDave Kleikamp static void prime_debug_regs(struct thread_struct *thread)
3433bffb652SDave Kleikamp {
3446cecf76bSScott Wood 	/*
3456cecf76bSScott Wood 	 * We could have inherited MSR_DE from userspace, since
3466cecf76bSScott Wood 	 * it doesn't get cleared on exception entry.  Make sure
3476cecf76bSScott Wood 	 * MSR_DE is clear before we enable any debug events.
3486cecf76bSScott Wood 	 */
3496cecf76bSScott Wood 	mtmsr(mfmsr() & ~MSR_DE);
3506cecf76bSScott Wood 
3513bffb652SDave Kleikamp 	mtspr(SPRN_IAC1, thread->iac1);
3523bffb652SDave Kleikamp 	mtspr(SPRN_IAC2, thread->iac2);
3533bffb652SDave Kleikamp #if CONFIG_PPC_ADV_DEBUG_IACS > 2
3543bffb652SDave Kleikamp 	mtspr(SPRN_IAC3, thread->iac3);
3553bffb652SDave Kleikamp 	mtspr(SPRN_IAC4, thread->iac4);
3563bffb652SDave Kleikamp #endif
3573bffb652SDave Kleikamp 	mtspr(SPRN_DAC1, thread->dac1);
3583bffb652SDave Kleikamp 	mtspr(SPRN_DAC2, thread->dac2);
3593bffb652SDave Kleikamp #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
3603bffb652SDave Kleikamp 	mtspr(SPRN_DVC1, thread->dvc1);
3613bffb652SDave Kleikamp 	mtspr(SPRN_DVC2, thread->dvc2);
3623bffb652SDave Kleikamp #endif
3633bffb652SDave Kleikamp 	mtspr(SPRN_DBCR0, thread->dbcr0);
3643bffb652SDave Kleikamp 	mtspr(SPRN_DBCR1, thread->dbcr1);
3653bffb652SDave Kleikamp #ifdef CONFIG_BOOKE
3663bffb652SDave Kleikamp 	mtspr(SPRN_DBCR2, thread->dbcr2);
3673bffb652SDave Kleikamp #endif
3683bffb652SDave Kleikamp }
3693bffb652SDave Kleikamp /*
3703bffb652SDave Kleikamp  * Unless neither the old or new thread are making use of the
3713bffb652SDave Kleikamp  * debug registers, set the debug registers from the values
3723bffb652SDave Kleikamp  * stored in the new thread.
3733bffb652SDave Kleikamp  */
3743bffb652SDave Kleikamp static void switch_booke_debug_regs(struct thread_struct *new_thread)
3753bffb652SDave Kleikamp {
3763bffb652SDave Kleikamp 	if ((current->thread.dbcr0 & DBCR0_IDM)
3773bffb652SDave Kleikamp 		|| (new_thread->dbcr0 & DBCR0_IDM))
3783bffb652SDave Kleikamp 			prime_debug_regs(new_thread);
3793bffb652SDave Kleikamp }
3803bffb652SDave Kleikamp #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
381e0780b72SK.Prasad #ifndef CONFIG_HAVE_HW_BREAKPOINT
3823bffb652SDave Kleikamp static void set_debug_reg_defaults(struct thread_struct *thread)
3833bffb652SDave Kleikamp {
3849422de3eSMichael Neuling 	thread->hw_brk.address = 0;
3859422de3eSMichael Neuling 	thread->hw_brk.type = 0;
386b9818c33SMichael Neuling 	set_breakpoint(&thread->hw_brk);
3873bffb652SDave Kleikamp }
388e0780b72SK.Prasad #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
3893bffb652SDave Kleikamp #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
3903bffb652SDave Kleikamp 
391172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3929422de3eSMichael Neuling static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
3939422de3eSMichael Neuling {
394c6c9eaceSBenjamin Herrenschmidt 	mtspr(SPRN_DAC1, dabr);
395221c185dSDave Kleikamp #ifdef CONFIG_PPC_47x
396221c185dSDave Kleikamp 	isync();
397221c185dSDave Kleikamp #endif
3989422de3eSMichael Neuling 	return 0;
3999422de3eSMichael Neuling }
400c6c9eaceSBenjamin Herrenschmidt #elif defined(CONFIG_PPC_BOOK3S)
4019422de3eSMichael Neuling static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
4029422de3eSMichael Neuling {
403cab0af98SMichael Ellerman 	mtspr(SPRN_DABR, dabr);
40482a9f16aSMichael Neuling 	if (cpu_has_feature(CPU_FTR_DABRX))
4054474ef05SMichael Neuling 		mtspr(SPRN_DABRX, dabrx);
406cab0af98SMichael Ellerman 	return 0;
40714cf11afSPaul Mackerras }
4089422de3eSMichael Neuling #else
4099422de3eSMichael Neuling static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
4109422de3eSMichael Neuling {
4119422de3eSMichael Neuling 	return -EINVAL;
4129422de3eSMichael Neuling }
4139422de3eSMichael Neuling #endif
4149422de3eSMichael Neuling 
4159422de3eSMichael Neuling static inline int set_dabr(struct arch_hw_breakpoint *brk)
4169422de3eSMichael Neuling {
4179422de3eSMichael Neuling 	unsigned long dabr, dabrx;
4189422de3eSMichael Neuling 
4199422de3eSMichael Neuling 	dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
4209422de3eSMichael Neuling 	dabrx = ((brk->type >> 3) & 0x7);
4219422de3eSMichael Neuling 
4229422de3eSMichael Neuling 	if (ppc_md.set_dabr)
4239422de3eSMichael Neuling 		return ppc_md.set_dabr(dabr, dabrx);
4249422de3eSMichael Neuling 
4259422de3eSMichael Neuling 	return __set_dabr(dabr, dabrx);
4269422de3eSMichael Neuling }
4279422de3eSMichael Neuling 
428bf99de36SMichael Neuling static inline int set_dawr(struct arch_hw_breakpoint *brk)
429bf99de36SMichael Neuling {
43005d694eaSMichael Neuling 	unsigned long dawr, dawrx, mrd;
431bf99de36SMichael Neuling 
432bf99de36SMichael Neuling 	dawr = brk->address;
433bf99de36SMichael Neuling 
434bf99de36SMichael Neuling 	dawrx  = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
435bf99de36SMichael Neuling 		                   << (63 - 58); //* read/write bits */
436bf99de36SMichael Neuling 	dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
437bf99de36SMichael Neuling 		                   << (63 - 59); //* translate */
438bf99de36SMichael Neuling 	dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
439bf99de36SMichael Neuling 		                   >> 3; //* PRIM bits */
44005d694eaSMichael Neuling 	/* dawr length is stored in field MDR bits 48:53.  Matches range in
44105d694eaSMichael Neuling 	   doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
44205d694eaSMichael Neuling 	   0b111111=64DW.
44305d694eaSMichael Neuling 	   brk->len is in bytes.
44405d694eaSMichael Neuling 	   This aligns up to double word size, shifts and does the bias.
44505d694eaSMichael Neuling 	*/
44605d694eaSMichael Neuling 	mrd = ((brk->len + 7) >> 3) - 1;
44705d694eaSMichael Neuling 	dawrx |= (mrd & 0x3f) << (63 - 53);
448bf99de36SMichael Neuling 
449bf99de36SMichael Neuling 	if (ppc_md.set_dawr)
450bf99de36SMichael Neuling 		return ppc_md.set_dawr(dawr, dawrx);
451bf99de36SMichael Neuling 	mtspr(SPRN_DAWR, dawr);
452bf99de36SMichael Neuling 	mtspr(SPRN_DAWRX, dawrx);
453bf99de36SMichael Neuling 	return 0;
454bf99de36SMichael Neuling }
455bf99de36SMichael Neuling 
456b9818c33SMichael Neuling int set_breakpoint(struct arch_hw_breakpoint *brk)
4579422de3eSMichael Neuling {
4589422de3eSMichael Neuling 	__get_cpu_var(current_brk) = *brk;
4599422de3eSMichael Neuling 
460bf99de36SMichael Neuling 	if (cpu_has_feature(CPU_FTR_DAWR))
461bf99de36SMichael Neuling 		return set_dawr(brk);
462bf99de36SMichael Neuling 
4639422de3eSMichael Neuling 	return set_dabr(brk);
4649422de3eSMichael Neuling }
46514cf11afSPaul Mackerras 
46606d67d54SPaul Mackerras #ifdef CONFIG_PPC64
46706d67d54SPaul Mackerras DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
46806d67d54SPaul Mackerras #endif
46914cf11afSPaul Mackerras 
4709422de3eSMichael Neuling static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
4719422de3eSMichael Neuling 			      struct arch_hw_breakpoint *b)
4729422de3eSMichael Neuling {
4739422de3eSMichael Neuling 	if (a->address != b->address)
4749422de3eSMichael Neuling 		return false;
4759422de3eSMichael Neuling 	if (a->type != b->type)
4769422de3eSMichael Neuling 		return false;
4779422de3eSMichael Neuling 	if (a->len != b->len)
4789422de3eSMichael Neuling 		return false;
4799422de3eSMichael Neuling 	return true;
4809422de3eSMichael Neuling }
481fb09692eSMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
482fb09692eSMichael Neuling static inline void tm_reclaim_task(struct task_struct *tsk)
483fb09692eSMichael Neuling {
484fb09692eSMichael Neuling 	/* We have to work out if we're switching from/to a task that's in the
485fb09692eSMichael Neuling 	 * middle of a transaction.
486fb09692eSMichael Neuling 	 *
487fb09692eSMichael Neuling 	 * In switching we need to maintain a 2nd register state as
488fb09692eSMichael Neuling 	 * oldtask->thread.ckpt_regs.  We tm_reclaim(oldproc); this saves the
489fb09692eSMichael Neuling 	 * checkpointed (tbegin) state in ckpt_regs and saves the transactional
490fb09692eSMichael Neuling 	 * (current) FPRs into oldtask->thread.transact_fpr[].
491fb09692eSMichael Neuling 	 *
492fb09692eSMichael Neuling 	 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
493fb09692eSMichael Neuling 	 */
494fb09692eSMichael Neuling 	struct thread_struct *thr = &tsk->thread;
495fb09692eSMichael Neuling 
496fb09692eSMichael Neuling 	if (!thr->regs)
497fb09692eSMichael Neuling 		return;
498fb09692eSMichael Neuling 
499fb09692eSMichael Neuling 	if (!MSR_TM_ACTIVE(thr->regs->msr))
500fb09692eSMichael Neuling 		goto out_and_saveregs;
501fb09692eSMichael Neuling 
502fb09692eSMichael Neuling 	/* Stash the original thread MSR, as giveup_fpu et al will
503fb09692eSMichael Neuling 	 * modify it.  We hold onto it to see whether the task used
504fb09692eSMichael Neuling 	 * FP & vector regs.
505fb09692eSMichael Neuling 	 */
506fb09692eSMichael Neuling 	thr->tm_orig_msr = thr->regs->msr;
507fb09692eSMichael Neuling 
508fb09692eSMichael Neuling 	TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
509fb09692eSMichael Neuling 		 "ccr=%lx, msr=%lx, trap=%lx)\n",
510fb09692eSMichael Neuling 		 tsk->pid, thr->regs->nip,
511fb09692eSMichael Neuling 		 thr->regs->ccr, thr->regs->msr,
512fb09692eSMichael Neuling 		 thr->regs->trap);
513fb09692eSMichael Neuling 
514fb09692eSMichael Neuling 	tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED);
515fb09692eSMichael Neuling 
516fb09692eSMichael Neuling 	TM_DEBUG("--- tm_reclaim on pid %d complete\n",
517fb09692eSMichael Neuling 		 tsk->pid);
518fb09692eSMichael Neuling 
519fb09692eSMichael Neuling out_and_saveregs:
520fb09692eSMichael Neuling 	/* Always save the regs here, even if a transaction's not active.
521fb09692eSMichael Neuling 	 * This context-switches a thread's TM info SPRs.  We do it here to
522fb09692eSMichael Neuling 	 * be consistent with the restore path (in recheckpoint) which
523fb09692eSMichael Neuling 	 * cannot happen later in _switch().
524fb09692eSMichael Neuling 	 */
525fb09692eSMichael Neuling 	tm_save_sprs(thr);
526fb09692eSMichael Neuling }
527fb09692eSMichael Neuling 
528bc2a9408SMichael Neuling static inline void tm_recheckpoint_new_task(struct task_struct *new)
529fb09692eSMichael Neuling {
530fb09692eSMichael Neuling 	unsigned long msr;
531fb09692eSMichael Neuling 
532fb09692eSMichael Neuling 	if (!cpu_has_feature(CPU_FTR_TM))
533fb09692eSMichael Neuling 		return;
534fb09692eSMichael Neuling 
535fb09692eSMichael Neuling 	/* Recheckpoint the registers of the thread we're about to switch to.
536fb09692eSMichael Neuling 	 *
537fb09692eSMichael Neuling 	 * If the task was using FP, we non-lazily reload both the original and
538fb09692eSMichael Neuling 	 * the speculative FP register states.  This is because the kernel
539fb09692eSMichael Neuling 	 * doesn't see if/when a TM rollback occurs, so if we take an FP
540fb09692eSMichael Neuling 	 * unavoidable later, we are unable to determine which set of FP regs
541fb09692eSMichael Neuling 	 * need to be restored.
542fb09692eSMichael Neuling 	 */
543fb09692eSMichael Neuling 	if (!new->thread.regs)
544fb09692eSMichael Neuling 		return;
545fb09692eSMichael Neuling 
546fb09692eSMichael Neuling 	/* The TM SPRs are restored here, so that TEXASR.FS can be set
547fb09692eSMichael Neuling 	 * before the trecheckpoint and no explosion occurs.
548fb09692eSMichael Neuling 	 */
549fb09692eSMichael Neuling 	tm_restore_sprs(&new->thread);
550fb09692eSMichael Neuling 
551fb09692eSMichael Neuling 	if (!MSR_TM_ACTIVE(new->thread.regs->msr))
552fb09692eSMichael Neuling 		return;
553fb09692eSMichael Neuling 	msr = new->thread.tm_orig_msr;
554fb09692eSMichael Neuling 	/* Recheckpoint to restore original checkpointed register state. */
555fb09692eSMichael Neuling 	TM_DEBUG("*** tm_recheckpoint of pid %d "
556fb09692eSMichael Neuling 		 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
557fb09692eSMichael Neuling 		 new->pid, new->thread.regs->msr, msr);
558fb09692eSMichael Neuling 
559fb09692eSMichael Neuling 	/* This loads the checkpointed FP/VEC state, if used */
560fb09692eSMichael Neuling 	tm_recheckpoint(&new->thread, msr);
561fb09692eSMichael Neuling 
562fb09692eSMichael Neuling 	/* This loads the speculative FP/VEC state, if used */
563fb09692eSMichael Neuling 	if (msr & MSR_FP) {
564fb09692eSMichael Neuling 		do_load_up_transact_fpu(&new->thread);
565fb09692eSMichael Neuling 		new->thread.regs->msr |=
566fb09692eSMichael Neuling 			(MSR_FP | new->thread.fpexc_mode);
567fb09692eSMichael Neuling 	}
568f110c0c1SMichael Neuling #ifdef CONFIG_ALTIVEC
569fb09692eSMichael Neuling 	if (msr & MSR_VEC) {
570fb09692eSMichael Neuling 		do_load_up_transact_altivec(&new->thread);
571fb09692eSMichael Neuling 		new->thread.regs->msr |= MSR_VEC;
572fb09692eSMichael Neuling 	}
573f110c0c1SMichael Neuling #endif
574fb09692eSMichael Neuling 	/* We may as well turn on VSX too since all the state is restored now */
575fb09692eSMichael Neuling 	if (msr & MSR_VSX)
576fb09692eSMichael Neuling 		new->thread.regs->msr |= MSR_VSX;
577fb09692eSMichael Neuling 
578fb09692eSMichael Neuling 	TM_DEBUG("*** tm_recheckpoint of pid %d complete "
579fb09692eSMichael Neuling 		 "(kernel msr 0x%lx)\n",
580fb09692eSMichael Neuling 		 new->pid, mfmsr());
581fb09692eSMichael Neuling }
582fb09692eSMichael Neuling 
583fb09692eSMichael Neuling static inline void __switch_to_tm(struct task_struct *prev)
584fb09692eSMichael Neuling {
585fb09692eSMichael Neuling 	if (cpu_has_feature(CPU_FTR_TM)) {
586fb09692eSMichael Neuling 		tm_enable();
587fb09692eSMichael Neuling 		tm_reclaim_task(prev);
588fb09692eSMichael Neuling 	}
589fb09692eSMichael Neuling }
590fb09692eSMichael Neuling #else
591fb09692eSMichael Neuling #define tm_recheckpoint_new_task(new)
592fb09692eSMichael Neuling #define __switch_to_tm(prev)
593fb09692eSMichael Neuling #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
5949422de3eSMichael Neuling 
59514cf11afSPaul Mackerras struct task_struct *__switch_to(struct task_struct *prev,
59614cf11afSPaul Mackerras 	struct task_struct *new)
59714cf11afSPaul Mackerras {
59814cf11afSPaul Mackerras 	struct thread_struct *new_thread, *old_thread;
59914cf11afSPaul Mackerras 	unsigned long flags;
60014cf11afSPaul Mackerras 	struct task_struct *last;
601d6bf29b4SPeter Zijlstra #ifdef CONFIG_PPC_BOOK3S_64
602d6bf29b4SPeter Zijlstra 	struct ppc64_tlb_batch *batch;
603d6bf29b4SPeter Zijlstra #endif
60414cf11afSPaul Mackerras 
605c2d52644SMichael Neuling 	/* Back up the TAR across context switches.
606c2d52644SMichael Neuling 	 * Note that the TAR is not available for use in the kernel.  (To
607c2d52644SMichael Neuling 	 * provide this, the TAR should be backed up/restored on exception
608c2d52644SMichael Neuling 	 * entry/exit instead, and be in pt_regs.  FIXME, this should be in
609c2d52644SMichael Neuling 	 * pt_regs anyway (for debug).)
610c2d52644SMichael Neuling 	 * Save the TAR here before we do treclaim/trecheckpoint as these
611c2d52644SMichael Neuling 	 * will change the TAR.
612c2d52644SMichael Neuling 	 */
613c2d52644SMichael Neuling 	save_tar(&prev->thread);
614c2d52644SMichael Neuling 
615bc2a9408SMichael Neuling 	__switch_to_tm(prev);
616bc2a9408SMichael Neuling 
61714cf11afSPaul Mackerras #ifdef CONFIG_SMP
61814cf11afSPaul Mackerras 	/* avoid complexity of lazy save/restore of fpu
61914cf11afSPaul Mackerras 	 * by just saving it every time we switch out if
62014cf11afSPaul Mackerras 	 * this task used the fpu during the last quantum.
62114cf11afSPaul Mackerras 	 *
62214cf11afSPaul Mackerras 	 * If it tries to use the fpu again, it'll trap and
62314cf11afSPaul Mackerras 	 * reload its fp regs.  So we don't have to do a restore
62414cf11afSPaul Mackerras 	 * every switch, just a save.
62514cf11afSPaul Mackerras 	 *  -- Cort
62614cf11afSPaul Mackerras 	 */
62714cf11afSPaul Mackerras 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
62814cf11afSPaul Mackerras 		giveup_fpu(prev);
62914cf11afSPaul Mackerras #ifdef CONFIG_ALTIVEC
63014cf11afSPaul Mackerras 	/*
63114cf11afSPaul Mackerras 	 * If the previous thread used altivec in the last quantum
63214cf11afSPaul Mackerras 	 * (thus changing altivec regs) then save them.
63314cf11afSPaul Mackerras 	 * We used to check the VRSAVE register but not all apps
63414cf11afSPaul Mackerras 	 * set it, so we don't rely on it now (and in fact we need
63514cf11afSPaul Mackerras 	 * to save & restore VSCR even if VRSAVE == 0).  -- paulus
63614cf11afSPaul Mackerras 	 *
63714cf11afSPaul Mackerras 	 * On SMP we always save/restore altivec regs just to avoid the
63814cf11afSPaul Mackerras 	 * complexity of changing processors.
63914cf11afSPaul Mackerras 	 *  -- Cort
64014cf11afSPaul Mackerras 	 */
64114cf11afSPaul Mackerras 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
64214cf11afSPaul Mackerras 		giveup_altivec(prev);
64314cf11afSPaul Mackerras #endif /* CONFIG_ALTIVEC */
644ce48b210SMichael Neuling #ifdef CONFIG_VSX
645ce48b210SMichael Neuling 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
6467c292170SMichael Neuling 		/* VMX and FPU registers are already save here */
6477c292170SMichael Neuling 		__giveup_vsx(prev);
648ce48b210SMichael Neuling #endif /* CONFIG_VSX */
64914cf11afSPaul Mackerras #ifdef CONFIG_SPE
65014cf11afSPaul Mackerras 	/*
65114cf11afSPaul Mackerras 	 * If the previous thread used spe in the last quantum
65214cf11afSPaul Mackerras 	 * (thus changing spe regs) then save them.
65314cf11afSPaul Mackerras 	 *
65414cf11afSPaul Mackerras 	 * On SMP we always save/restore spe regs just to avoid the
65514cf11afSPaul Mackerras 	 * complexity of changing processors.
65614cf11afSPaul Mackerras 	 */
65714cf11afSPaul Mackerras 	if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
65814cf11afSPaul Mackerras 		giveup_spe(prev);
659c0c0d996SPaul Mackerras #endif /* CONFIG_SPE */
660c0c0d996SPaul Mackerras 
661c0c0d996SPaul Mackerras #else  /* CONFIG_SMP */
662c0c0d996SPaul Mackerras #ifdef CONFIG_ALTIVEC
663c0c0d996SPaul Mackerras 	/* Avoid the trap.  On smp this this never happens since
664c0c0d996SPaul Mackerras 	 * we don't set last_task_used_altivec -- Cort
665c0c0d996SPaul Mackerras 	 */
666c0c0d996SPaul Mackerras 	if (new->thread.regs && last_task_used_altivec == new)
667c0c0d996SPaul Mackerras 		new->thread.regs->msr |= MSR_VEC;
668c0c0d996SPaul Mackerras #endif /* CONFIG_ALTIVEC */
669ce48b210SMichael Neuling #ifdef CONFIG_VSX
670ce48b210SMichael Neuling 	if (new->thread.regs && last_task_used_vsx == new)
671ce48b210SMichael Neuling 		new->thread.regs->msr |= MSR_VSX;
672ce48b210SMichael Neuling #endif /* CONFIG_VSX */
673c0c0d996SPaul Mackerras #ifdef CONFIG_SPE
67414cf11afSPaul Mackerras 	/* Avoid the trap.  On smp this this never happens since
67514cf11afSPaul Mackerras 	 * we don't set last_task_used_spe
67614cf11afSPaul Mackerras 	 */
67714cf11afSPaul Mackerras 	if (new->thread.regs && last_task_used_spe == new)
67814cf11afSPaul Mackerras 		new->thread.regs->msr |= MSR_SPE;
67914cf11afSPaul Mackerras #endif /* CONFIG_SPE */
680c0c0d996SPaul Mackerras 
68114cf11afSPaul Mackerras #endif /* CONFIG_SMP */
68214cf11afSPaul Mackerras 
683172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
6843bffb652SDave Kleikamp 	switch_booke_debug_regs(&new->thread);
685c6c9eaceSBenjamin Herrenschmidt #else
6865aae8a53SK.Prasad /*
6875aae8a53SK.Prasad  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
6885aae8a53SK.Prasad  * schedule DABR
6895aae8a53SK.Prasad  */
6905aae8a53SK.Prasad #ifndef CONFIG_HAVE_HW_BREAKPOINT
6919422de3eSMichael Neuling 	if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
692b9818c33SMichael Neuling 		set_breakpoint(&new->thread.hw_brk);
6935aae8a53SK.Prasad #endif /* CONFIG_HAVE_HW_BREAKPOINT */
694d6a61bfcSLuis Machado #endif
695d6a61bfcSLuis Machado 
696c6c9eaceSBenjamin Herrenschmidt 
69714cf11afSPaul Mackerras 	new_thread = &new->thread;
69814cf11afSPaul Mackerras 	old_thread = &current->thread;
69906d67d54SPaul Mackerras 
70006d67d54SPaul Mackerras #ifdef CONFIG_PPC64
70106d67d54SPaul Mackerras 	/*
70206d67d54SPaul Mackerras 	 * Collect processor utilization data per process
70306d67d54SPaul Mackerras 	 */
70406d67d54SPaul Mackerras 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
70506d67d54SPaul Mackerras 		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
70606d67d54SPaul Mackerras 		long unsigned start_tb, current_tb;
70706d67d54SPaul Mackerras 		start_tb = old_thread->start_tb;
70806d67d54SPaul Mackerras 		cu->current_tb = current_tb = mfspr(SPRN_PURR);
70906d67d54SPaul Mackerras 		old_thread->accum_tb += (current_tb - start_tb);
71006d67d54SPaul Mackerras 		new_thread->start_tb = current_tb;
71106d67d54SPaul Mackerras 	}
712d6bf29b4SPeter Zijlstra #endif /* CONFIG_PPC64 */
713d6bf29b4SPeter Zijlstra 
714d6bf29b4SPeter Zijlstra #ifdef CONFIG_PPC_BOOK3S_64
715d6bf29b4SPeter Zijlstra 	batch = &__get_cpu_var(ppc64_tlb_batch);
716d6bf29b4SPeter Zijlstra 	if (batch->active) {
717d6bf29b4SPeter Zijlstra 		current_thread_info()->local_flags |= _TLF_LAZY_MMU;
718d6bf29b4SPeter Zijlstra 		if (batch->index)
719d6bf29b4SPeter Zijlstra 			__flush_tlb_pending(batch);
720d6bf29b4SPeter Zijlstra 		batch->active = 0;
721d6bf29b4SPeter Zijlstra 	}
722d6bf29b4SPeter Zijlstra #endif /* CONFIG_PPC_BOOK3S_64 */
72306d67d54SPaul Mackerras 
72414cf11afSPaul Mackerras 	local_irq_save(flags);
725c6622f63SPaul Mackerras 
72644387e9fSAnton Blanchard 	/*
72744387e9fSAnton Blanchard 	 * We can't take a PMU exception inside _switch() since there is a
72844387e9fSAnton Blanchard 	 * window where the kernel stack SLB and the kernel stack are out
72944387e9fSAnton Blanchard 	 * of sync. Hard disable here.
73044387e9fSAnton Blanchard 	 */
73144387e9fSAnton Blanchard 	hard_irq_disable();
732bc2a9408SMichael Neuling 
733bc2a9408SMichael Neuling 	tm_recheckpoint_new_task(new);
734bc2a9408SMichael Neuling 
73514cf11afSPaul Mackerras 	last = _switch(old_thread, new_thread);
73614cf11afSPaul Mackerras 
737d6bf29b4SPeter Zijlstra #ifdef CONFIG_PPC_BOOK3S_64
738d6bf29b4SPeter Zijlstra 	if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
739d6bf29b4SPeter Zijlstra 		current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
740d6bf29b4SPeter Zijlstra 		batch = &__get_cpu_var(ppc64_tlb_batch);
741d6bf29b4SPeter Zijlstra 		batch->active = 1;
742d6bf29b4SPeter Zijlstra 	}
743d6bf29b4SPeter Zijlstra #endif /* CONFIG_PPC_BOOK3S_64 */
744d6bf29b4SPeter Zijlstra 
74514cf11afSPaul Mackerras 	local_irq_restore(flags);
74614cf11afSPaul Mackerras 
74714cf11afSPaul Mackerras 	return last;
74814cf11afSPaul Mackerras }
74914cf11afSPaul Mackerras 
75006d67d54SPaul Mackerras static int instructions_to_print = 16;
75106d67d54SPaul Mackerras 
75206d67d54SPaul Mackerras static void show_instructions(struct pt_regs *regs)
75306d67d54SPaul Mackerras {
75406d67d54SPaul Mackerras 	int i;
75506d67d54SPaul Mackerras 	unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
75606d67d54SPaul Mackerras 			sizeof(int));
75706d67d54SPaul Mackerras 
75806d67d54SPaul Mackerras 	printk("Instruction dump:");
75906d67d54SPaul Mackerras 
76006d67d54SPaul Mackerras 	for (i = 0; i < instructions_to_print; i++) {
76106d67d54SPaul Mackerras 		int instr;
76206d67d54SPaul Mackerras 
76306d67d54SPaul Mackerras 		if (!(i % 8))
76406d67d54SPaul Mackerras 			printk("\n");
76506d67d54SPaul Mackerras 
7660de2d820SScott Wood #if !defined(CONFIG_BOOKE)
7670de2d820SScott Wood 		/* If executing with the IMMU off, adjust pc rather
7680de2d820SScott Wood 		 * than print XXXXXXXX.
7690de2d820SScott Wood 		 */
7700de2d820SScott Wood 		if (!(regs->msr & MSR_IR))
7710de2d820SScott Wood 			pc = (unsigned long)phys_to_virt(pc);
7720de2d820SScott Wood #endif
7730de2d820SScott Wood 
774af308377SStephen Rothwell 		/* We use __get_user here *only* to avoid an OOPS on a
775af308377SStephen Rothwell 		 * bad address because the pc *should* only be a
776af308377SStephen Rothwell 		 * kernel address.
777af308377SStephen Rothwell 		 */
77800ae36deSAnton Blanchard 		if (!__kernel_text_address(pc) ||
77900ae36deSAnton Blanchard 		     __get_user(instr, (unsigned int __user *)pc)) {
78040c8cefaSIra Snyder 			printk(KERN_CONT "XXXXXXXX ");
78106d67d54SPaul Mackerras 		} else {
78206d67d54SPaul Mackerras 			if (regs->nip == pc)
78340c8cefaSIra Snyder 				printk(KERN_CONT "<%08x> ", instr);
78406d67d54SPaul Mackerras 			else
78540c8cefaSIra Snyder 				printk(KERN_CONT "%08x ", instr);
78606d67d54SPaul Mackerras 		}
78706d67d54SPaul Mackerras 
78806d67d54SPaul Mackerras 		pc += sizeof(int);
78906d67d54SPaul Mackerras 	}
79006d67d54SPaul Mackerras 
79106d67d54SPaul Mackerras 	printk("\n");
79206d67d54SPaul Mackerras }
79306d67d54SPaul Mackerras 
79406d67d54SPaul Mackerras static struct regbit {
79506d67d54SPaul Mackerras 	unsigned long bit;
79606d67d54SPaul Mackerras 	const char *name;
79706d67d54SPaul Mackerras } msr_bits[] = {
7983bfd0c9cSAnton Blanchard #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
7993bfd0c9cSAnton Blanchard 	{MSR_SF,	"SF"},
8003bfd0c9cSAnton Blanchard 	{MSR_HV,	"HV"},
8013bfd0c9cSAnton Blanchard #endif
8023bfd0c9cSAnton Blanchard 	{MSR_VEC,	"VEC"},
8033bfd0c9cSAnton Blanchard 	{MSR_VSX,	"VSX"},
8043bfd0c9cSAnton Blanchard #ifdef CONFIG_BOOKE
8053bfd0c9cSAnton Blanchard 	{MSR_CE,	"CE"},
8063bfd0c9cSAnton Blanchard #endif
80706d67d54SPaul Mackerras 	{MSR_EE,	"EE"},
80806d67d54SPaul Mackerras 	{MSR_PR,	"PR"},
80906d67d54SPaul Mackerras 	{MSR_FP,	"FP"},
81006d67d54SPaul Mackerras 	{MSR_ME,	"ME"},
8113bfd0c9cSAnton Blanchard #ifdef CONFIG_BOOKE
8121b98326bSKumar Gala 	{MSR_DE,	"DE"},
8133bfd0c9cSAnton Blanchard #else
8143bfd0c9cSAnton Blanchard 	{MSR_SE,	"SE"},
8153bfd0c9cSAnton Blanchard 	{MSR_BE,	"BE"},
8163bfd0c9cSAnton Blanchard #endif
81706d67d54SPaul Mackerras 	{MSR_IR,	"IR"},
81806d67d54SPaul Mackerras 	{MSR_DR,	"DR"},
8193bfd0c9cSAnton Blanchard 	{MSR_PMM,	"PMM"},
8203bfd0c9cSAnton Blanchard #ifndef CONFIG_BOOKE
8213bfd0c9cSAnton Blanchard 	{MSR_RI,	"RI"},
8223bfd0c9cSAnton Blanchard 	{MSR_LE,	"LE"},
8233bfd0c9cSAnton Blanchard #endif
82406d67d54SPaul Mackerras 	{0,		NULL}
82506d67d54SPaul Mackerras };
82606d67d54SPaul Mackerras 
82706d67d54SPaul Mackerras static void printbits(unsigned long val, struct regbit *bits)
82806d67d54SPaul Mackerras {
82906d67d54SPaul Mackerras 	const char *sep = "";
83006d67d54SPaul Mackerras 
83106d67d54SPaul Mackerras 	printk("<");
83206d67d54SPaul Mackerras 	for (; bits->bit; ++bits)
83306d67d54SPaul Mackerras 		if (val & bits->bit) {
83406d67d54SPaul Mackerras 			printk("%s%s", sep, bits->name);
83506d67d54SPaul Mackerras 			sep = ",";
83606d67d54SPaul Mackerras 		}
83706d67d54SPaul Mackerras 	printk(">");
83806d67d54SPaul Mackerras }
83906d67d54SPaul Mackerras 
84006d67d54SPaul Mackerras #ifdef CONFIG_PPC64
841f6f7dde3Santon@samba.org #define REG		"%016lx"
84206d67d54SPaul Mackerras #define REGS_PER_LINE	4
84306d67d54SPaul Mackerras #define LAST_VOLATILE	13
84406d67d54SPaul Mackerras #else
845f6f7dde3Santon@samba.org #define REG		"%08lx"
84606d67d54SPaul Mackerras #define REGS_PER_LINE	8
84706d67d54SPaul Mackerras #define LAST_VOLATILE	12
84806d67d54SPaul Mackerras #endif
84906d67d54SPaul Mackerras 
85014cf11afSPaul Mackerras void show_regs(struct pt_regs * regs)
85114cf11afSPaul Mackerras {
85214cf11afSPaul Mackerras 	int i, trap;
85314cf11afSPaul Mackerras 
854a43cb95dSTejun Heo 	show_regs_print_info(KERN_DEFAULT);
855a43cb95dSTejun Heo 
85606d67d54SPaul Mackerras 	printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
85706d67d54SPaul Mackerras 	       regs->nip, regs->link, regs->ctr);
85806d67d54SPaul Mackerras 	printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
85996b644bdSSerge E. Hallyn 	       regs, regs->trap, print_tainted(), init_utsname()->release);
86006d67d54SPaul Mackerras 	printk("MSR: "REG" ", regs->msr);
86106d67d54SPaul Mackerras 	printbits(regs->msr, msr_bits);
862f6f7dde3Santon@samba.org 	printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
8637230c564SBenjamin Herrenschmidt #ifdef CONFIG_PPC64
8647230c564SBenjamin Herrenschmidt 	printk("SOFTE: %ld\n", regs->softe);
8657230c564SBenjamin Herrenschmidt #endif
86614cf11afSPaul Mackerras 	trap = TRAP(regs);
8675115a026SMichael Neuling 	if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
8685115a026SMichael Neuling 		printk("CFAR: "REG"\n", regs->orig_gpr3);
86914cf11afSPaul Mackerras 	if (trap == 0x300 || trap == 0x600)
870ba28c9aaSKumar Gala #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
87114170789SKumar Gala 		printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
87214170789SKumar Gala #else
8737071854bSAnton Blanchard 		printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
87414170789SKumar Gala #endif
87514cf11afSPaul Mackerras 
87614cf11afSPaul Mackerras 	for (i = 0;  i < 32;  i++) {
87706d67d54SPaul Mackerras 		if ((i % REGS_PER_LINE) == 0)
878a2367194SKumar Gala 			printk("\nGPR%02d: ", i);
87906d67d54SPaul Mackerras 		printk(REG " ", regs->gpr[i]);
88006d67d54SPaul Mackerras 		if (i == LAST_VOLATILE && !FULL_REGS(regs))
88114cf11afSPaul Mackerras 			break;
88214cf11afSPaul Mackerras 	}
88314cf11afSPaul Mackerras 	printk("\n");
88414cf11afSPaul Mackerras #ifdef CONFIG_KALLSYMS
88514cf11afSPaul Mackerras 	/*
88614cf11afSPaul Mackerras 	 * Lookup NIP late so we have the best change of getting the
88714cf11afSPaul Mackerras 	 * above info out without failing
88814cf11afSPaul Mackerras 	 */
889058c78f4SBenjamin Herrenschmidt 	printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
890058c78f4SBenjamin Herrenschmidt 	printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
89114cf11afSPaul Mackerras #endif
892afc07701SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
893afc07701SMichael Neuling 	printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
894afc07701SMichael Neuling #endif
89514cf11afSPaul Mackerras 	show_stack(current, (unsigned long *) regs->gpr[1]);
89606d67d54SPaul Mackerras 	if (!user_mode(regs))
89706d67d54SPaul Mackerras 		show_instructions(regs);
89814cf11afSPaul Mackerras }
89914cf11afSPaul Mackerras 
90014cf11afSPaul Mackerras void exit_thread(void)
90114cf11afSPaul Mackerras {
90248abec07SPaul Mackerras 	discard_lazy_cpu_state();
90314cf11afSPaul Mackerras }
90414cf11afSPaul Mackerras 
90514cf11afSPaul Mackerras void flush_thread(void)
90614cf11afSPaul Mackerras {
90748abec07SPaul Mackerras 	discard_lazy_cpu_state();
90814cf11afSPaul Mackerras 
909e0780b72SK.Prasad #ifdef CONFIG_HAVE_HW_BREAKPOINT
9105aae8a53SK.Prasad 	flush_ptrace_hw_breakpoint(current);
911e0780b72SK.Prasad #else /* CONFIG_HAVE_HW_BREAKPOINT */
9123bffb652SDave Kleikamp 	set_debug_reg_defaults(&current->thread);
913e0780b72SK.Prasad #endif /* CONFIG_HAVE_HW_BREAKPOINT */
91414cf11afSPaul Mackerras }
91514cf11afSPaul Mackerras 
91614cf11afSPaul Mackerras void
91714cf11afSPaul Mackerras release_thread(struct task_struct *t)
91814cf11afSPaul Mackerras {
91914cf11afSPaul Mackerras }
92014cf11afSPaul Mackerras 
92114cf11afSPaul Mackerras /*
92255ccf3feSSuresh Siddha  * this gets called so that we can store coprocessor state into memory and
92355ccf3feSSuresh Siddha  * copy the current task into the new thread.
92414cf11afSPaul Mackerras  */
92555ccf3feSSuresh Siddha int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
92614cf11afSPaul Mackerras {
92755ccf3feSSuresh Siddha 	flush_fp_to_thread(src);
92855ccf3feSSuresh Siddha 	flush_altivec_to_thread(src);
92955ccf3feSSuresh Siddha 	flush_vsx_to_thread(src);
93055ccf3feSSuresh Siddha 	flush_spe_to_thread(src);
931330a1eb7SMichael Ellerman 
93255ccf3feSSuresh Siddha 	*dst = *src;
933330a1eb7SMichael Ellerman 
934330a1eb7SMichael Ellerman 	clear_task_ebb(dst);
935330a1eb7SMichael Ellerman 
93655ccf3feSSuresh Siddha 	return 0;
93714cf11afSPaul Mackerras }
93814cf11afSPaul Mackerras 
93914cf11afSPaul Mackerras /*
94014cf11afSPaul Mackerras  * Copy a thread..
94114cf11afSPaul Mackerras  */
942efcac658SAlexey Kardashevskiy extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
943efcac658SAlexey Kardashevskiy 
9446f2c55b8SAlexey Dobriyan int copy_thread(unsigned long clone_flags, unsigned long usp,
945afa86fc4SAl Viro 		unsigned long arg, struct task_struct *p)
94614cf11afSPaul Mackerras {
94714cf11afSPaul Mackerras 	struct pt_regs *childregs, *kregs;
94814cf11afSPaul Mackerras 	extern void ret_from_fork(void);
94958254e10SAl Viro 	extern void ret_from_kernel_thread(void);
95058254e10SAl Viro 	void (*f)(void);
9510cec6fd1SAl Viro 	unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
95214cf11afSPaul Mackerras 
95314cf11afSPaul Mackerras 	/* Copy registers */
95414cf11afSPaul Mackerras 	sp -= sizeof(struct pt_regs);
95514cf11afSPaul Mackerras 	childregs = (struct pt_regs *) sp;
956ab75819dSAl Viro 	if (unlikely(p->flags & PF_KTHREAD)) {
957138d1ce8SAl Viro 		struct thread_info *ti = (void *)task_stack_page(p);
95858254e10SAl Viro 		memset(childregs, 0, sizeof(struct pt_regs));
95914cf11afSPaul Mackerras 		childregs->gpr[1] = sp + sizeof(struct pt_regs);
96053b50f94SAl Viro 		childregs->gpr[14] = usp;	/* function */
96158254e10SAl Viro #ifdef CONFIG_PPC64
962b5e2fc1cSAl Viro 		clear_tsk_thread_flag(p, TIF_32BIT);
963138d1ce8SAl Viro 		childregs->softe = 1;
96406d67d54SPaul Mackerras #endif
96558254e10SAl Viro 		childregs->gpr[15] = arg;
96614cf11afSPaul Mackerras 		p->thread.regs = NULL;	/* no user register state */
967138d1ce8SAl Viro 		ti->flags |= _TIF_RESTOREALL;
96858254e10SAl Viro 		f = ret_from_kernel_thread;
96914cf11afSPaul Mackerras 	} else {
970afa86fc4SAl Viro 		struct pt_regs *regs = current_pt_regs();
97158254e10SAl Viro 		CHECK_FULL_REGS(regs);
97258254e10SAl Viro 		*childregs = *regs;
973ea516b11SAl Viro 		if (usp)
97414cf11afSPaul Mackerras 			childregs->gpr[1] = usp;
97514cf11afSPaul Mackerras 		p->thread.regs = childregs;
97658254e10SAl Viro 		childregs->gpr[3] = 0;  /* Result from fork() */
97706d67d54SPaul Mackerras 		if (clone_flags & CLONE_SETTLS) {
97806d67d54SPaul Mackerras #ifdef CONFIG_PPC64
9799904b005SDenis Kirjanov 			if (!is_32bit_task())
98006d67d54SPaul Mackerras 				childregs->gpr[13] = childregs->gpr[6];
98106d67d54SPaul Mackerras 			else
98206d67d54SPaul Mackerras #endif
98314cf11afSPaul Mackerras 				childregs->gpr[2] = childregs->gpr[6];
98414cf11afSPaul Mackerras 		}
98558254e10SAl Viro 
98658254e10SAl Viro 		f = ret_from_fork;
98706d67d54SPaul Mackerras 	}
98814cf11afSPaul Mackerras 	sp -= STACK_FRAME_OVERHEAD;
98914cf11afSPaul Mackerras 
99014cf11afSPaul Mackerras 	/*
99114cf11afSPaul Mackerras 	 * The way this works is that at some point in the future
99214cf11afSPaul Mackerras 	 * some task will call _switch to switch to the new task.
99314cf11afSPaul Mackerras 	 * That will pop off the stack frame created below and start
99414cf11afSPaul Mackerras 	 * the new task running at ret_from_fork.  The new task will
99514cf11afSPaul Mackerras 	 * do some house keeping and then return from the fork or clone
99614cf11afSPaul Mackerras 	 * system call, using the stack frame created above.
99714cf11afSPaul Mackerras 	 */
998af945cf4SLi Zhong 	((unsigned long *)sp)[0] = 0;
99914cf11afSPaul Mackerras 	sp -= sizeof(struct pt_regs);
100014cf11afSPaul Mackerras 	kregs = (struct pt_regs *) sp;
100114cf11afSPaul Mackerras 	sp -= STACK_FRAME_OVERHEAD;
100214cf11afSPaul Mackerras 	p->thread.ksp = sp;
1003cbc9565eSBenjamin Herrenschmidt #ifdef CONFIG_PPC32
100485218827SKumar Gala 	p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
100585218827SKumar Gala 				_ALIGN_UP(sizeof(struct thread_info), 16);
1006cbc9565eSBenjamin Herrenschmidt #endif
100728d170abSOleg Nesterov #ifdef CONFIG_HAVE_HW_BREAKPOINT
100828d170abSOleg Nesterov 	p->thread.ptrace_bps[0] = NULL;
100928d170abSOleg Nesterov #endif
101028d170abSOleg Nesterov 
1011*18461960SPaul Mackerras 	p->thread.fp_save_area = NULL;
1012*18461960SPaul Mackerras #ifdef CONFIG_ALTIVEC
1013*18461960SPaul Mackerras 	p->thread.vr_save_area = NULL;
1014*18461960SPaul Mackerras #endif
1015*18461960SPaul Mackerras 
101694491685SBenjamin Herrenschmidt #ifdef CONFIG_PPC_STD_MMU_64
101744ae3ab3SMatt Evans 	if (mmu_has_feature(MMU_FTR_SLB)) {
10181189be65SPaul Mackerras 		unsigned long sp_vsid;
10193c726f8dSBenjamin Herrenschmidt 		unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
102006d67d54SPaul Mackerras 
102144ae3ab3SMatt Evans 		if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
10221189be65SPaul Mackerras 			sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
10231189be65SPaul Mackerras 				<< SLB_VSID_SHIFT_1T;
10241189be65SPaul Mackerras 		else
10251189be65SPaul Mackerras 			sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
10261189be65SPaul Mackerras 				<< SLB_VSID_SHIFT;
10273c726f8dSBenjamin Herrenschmidt 		sp_vsid |= SLB_VSID_KERNEL | llp;
102806d67d54SPaul Mackerras 		p->thread.ksp_vsid = sp_vsid;
102906d67d54SPaul Mackerras 	}
1030747bea91SBenjamin Herrenschmidt #endif /* CONFIG_PPC_STD_MMU_64 */
1031efcac658SAlexey Kardashevskiy #ifdef CONFIG_PPC64
1032efcac658SAlexey Kardashevskiy 	if (cpu_has_feature(CPU_FTR_DSCR)) {
10331021cb26SAnton Blanchard 		p->thread.dscr_inherit = current->thread.dscr_inherit;
1034efcac658SAlexey Kardashevskiy 		p->thread.dscr = current->thread.dscr;
1035efcac658SAlexey Kardashevskiy 	}
103692779245SHaren Myneni 	if (cpu_has_feature(CPU_FTR_HAS_PPR))
103792779245SHaren Myneni 		p->thread.ppr = INIT_PPR;
1038efcac658SAlexey Kardashevskiy #endif
103906d67d54SPaul Mackerras 	/*
104006d67d54SPaul Mackerras 	 * The PPC64 ABI makes use of a TOC to contain function
104106d67d54SPaul Mackerras 	 * pointers.  The function (ret_from_except) is actually a pointer
104206d67d54SPaul Mackerras 	 * to the TOC entry.  The first entry is a pointer to the actual
104306d67d54SPaul Mackerras 	 * function.
104406d67d54SPaul Mackerras 	 */
1045747bea91SBenjamin Herrenschmidt #ifdef CONFIG_PPC64
104658254e10SAl Viro 	kregs->nip = *((unsigned long *)f);
104706d67d54SPaul Mackerras #else
104858254e10SAl Viro 	kregs->nip = (unsigned long)f;
104906d67d54SPaul Mackerras #endif
105014cf11afSPaul Mackerras 	return 0;
105114cf11afSPaul Mackerras }
105214cf11afSPaul Mackerras 
105314cf11afSPaul Mackerras /*
105414cf11afSPaul Mackerras  * Set up a thread for executing a new program
105514cf11afSPaul Mackerras  */
105606d67d54SPaul Mackerras void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
105714cf11afSPaul Mackerras {
105890eac727SMichael Ellerman #ifdef CONFIG_PPC64
105990eac727SMichael Ellerman 	unsigned long load_addr = regs->gpr[2];	/* saved by ELF_PLAT_INIT */
106090eac727SMichael Ellerman #endif
106190eac727SMichael Ellerman 
106206d67d54SPaul Mackerras 	/*
106306d67d54SPaul Mackerras 	 * If we exec out of a kernel thread then thread.regs will not be
106406d67d54SPaul Mackerras 	 * set.  Do it now.
106506d67d54SPaul Mackerras 	 */
106606d67d54SPaul Mackerras 	if (!current->thread.regs) {
10670cec6fd1SAl Viro 		struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
10680cec6fd1SAl Viro 		current->thread.regs = regs - 1;
106906d67d54SPaul Mackerras 	}
107006d67d54SPaul Mackerras 
107114cf11afSPaul Mackerras 	memset(regs->gpr, 0, sizeof(regs->gpr));
107214cf11afSPaul Mackerras 	regs->ctr = 0;
107314cf11afSPaul Mackerras 	regs->link = 0;
107414cf11afSPaul Mackerras 	regs->xer = 0;
107514cf11afSPaul Mackerras 	regs->ccr = 0;
107614cf11afSPaul Mackerras 	regs->gpr[1] = sp;
107706d67d54SPaul Mackerras 
1078474f8196SRoland McGrath 	/*
1079474f8196SRoland McGrath 	 * We have just cleared all the nonvolatile GPRs, so make
1080474f8196SRoland McGrath 	 * FULL_REGS(regs) return true.  This is necessary to allow
1081474f8196SRoland McGrath 	 * ptrace to examine the thread immediately after exec.
1082474f8196SRoland McGrath 	 */
1083474f8196SRoland McGrath 	regs->trap &= ~1UL;
1084474f8196SRoland McGrath 
108506d67d54SPaul Mackerras #ifdef CONFIG_PPC32
108606d67d54SPaul Mackerras 	regs->mq = 0;
108706d67d54SPaul Mackerras 	regs->nip = start;
108814cf11afSPaul Mackerras 	regs->msr = MSR_USER;
108906d67d54SPaul Mackerras #else
10909904b005SDenis Kirjanov 	if (!is_32bit_task()) {
109190eac727SMichael Ellerman 		unsigned long entry, toc;
109206d67d54SPaul Mackerras 
109306d67d54SPaul Mackerras 		/* start is a relocated pointer to the function descriptor for
109406d67d54SPaul Mackerras 		 * the elf _start routine.  The first entry in the function
109506d67d54SPaul Mackerras 		 * descriptor is the entry address of _start and the second
109606d67d54SPaul Mackerras 		 * entry is the TOC value we need to use.
109706d67d54SPaul Mackerras 		 */
109806d67d54SPaul Mackerras 		__get_user(entry, (unsigned long __user *)start);
109906d67d54SPaul Mackerras 		__get_user(toc, (unsigned long __user *)start+1);
110006d67d54SPaul Mackerras 
110106d67d54SPaul Mackerras 		/* Check whether the e_entry function descriptor entries
110206d67d54SPaul Mackerras 		 * need to be relocated before we can use them.
110306d67d54SPaul Mackerras 		 */
110406d67d54SPaul Mackerras 		if (load_addr != 0) {
110506d67d54SPaul Mackerras 			entry += load_addr;
110606d67d54SPaul Mackerras 			toc   += load_addr;
110706d67d54SPaul Mackerras 		}
110806d67d54SPaul Mackerras 		regs->nip = entry;
110906d67d54SPaul Mackerras 		regs->gpr[2] = toc;
111006d67d54SPaul Mackerras 		regs->msr = MSR_USER64;
1111d4bf9a78SStephen Rothwell 	} else {
1112d4bf9a78SStephen Rothwell 		regs->nip = start;
1113d4bf9a78SStephen Rothwell 		regs->gpr[2] = 0;
1114d4bf9a78SStephen Rothwell 		regs->msr = MSR_USER32;
111506d67d54SPaul Mackerras 	}
111606d67d54SPaul Mackerras #endif
111748abec07SPaul Mackerras 	discard_lazy_cpu_state();
1118ce48b210SMichael Neuling #ifdef CONFIG_VSX
1119ce48b210SMichael Neuling 	current->thread.used_vsr = 0;
1120ce48b210SMichael Neuling #endif
1121de79f7b9SPaul Mackerras 	memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
1122*18461960SPaul Mackerras 	current->thread.fp_save_area = NULL;
112314cf11afSPaul Mackerras #ifdef CONFIG_ALTIVEC
1124de79f7b9SPaul Mackerras 	memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
1125de79f7b9SPaul Mackerras 	current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1126*18461960SPaul Mackerras 	current->thread.vr_save_area = NULL;
112714cf11afSPaul Mackerras 	current->thread.vrsave = 0;
112814cf11afSPaul Mackerras 	current->thread.used_vr = 0;
112914cf11afSPaul Mackerras #endif /* CONFIG_ALTIVEC */
113014cf11afSPaul Mackerras #ifdef CONFIG_SPE
113114cf11afSPaul Mackerras 	memset(current->thread.evr, 0, sizeof(current->thread.evr));
113214cf11afSPaul Mackerras 	current->thread.acc = 0;
113314cf11afSPaul Mackerras 	current->thread.spefscr = 0;
113414cf11afSPaul Mackerras 	current->thread.used_spe = 0;
113514cf11afSPaul Mackerras #endif /* CONFIG_SPE */
1136bc2a9408SMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1137bc2a9408SMichael Neuling 	if (cpu_has_feature(CPU_FTR_TM))
1138bc2a9408SMichael Neuling 		regs->msr |= MSR_TM;
1139bc2a9408SMichael Neuling 	current->thread.tm_tfhar = 0;
1140bc2a9408SMichael Neuling 	current->thread.tm_texasr = 0;
1141bc2a9408SMichael Neuling 	current->thread.tm_tfiar = 0;
1142bc2a9408SMichael Neuling #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
114314cf11afSPaul Mackerras }
114414cf11afSPaul Mackerras 
114514cf11afSPaul Mackerras #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
114614cf11afSPaul Mackerras 		| PR_FP_EXC_RES | PR_FP_EXC_INV)
114714cf11afSPaul Mackerras 
114814cf11afSPaul Mackerras int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
114914cf11afSPaul Mackerras {
115014cf11afSPaul Mackerras 	struct pt_regs *regs = tsk->thread.regs;
115114cf11afSPaul Mackerras 
115214cf11afSPaul Mackerras 	/* This is a bit hairy.  If we are an SPE enabled  processor
115314cf11afSPaul Mackerras 	 * (have embedded fp) we store the IEEE exception enable flags in
115414cf11afSPaul Mackerras 	 * fpexc_mode.  fpexc_mode is also used for setting FP exception
115514cf11afSPaul Mackerras 	 * mode (asyn, precise, disabled) for 'Classic' FP. */
115614cf11afSPaul Mackerras 	if (val & PR_FP_EXC_SW_ENABLE) {
115714cf11afSPaul Mackerras #ifdef CONFIG_SPE
11585e14d21eSKumar Gala 		if (cpu_has_feature(CPU_FTR_SPE)) {
115914cf11afSPaul Mackerras 			tsk->thread.fpexc_mode = val &
116014cf11afSPaul Mackerras 				(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
116106d67d54SPaul Mackerras 			return 0;
11625e14d21eSKumar Gala 		} else {
11635e14d21eSKumar Gala 			return -EINVAL;
11645e14d21eSKumar Gala 		}
116514cf11afSPaul Mackerras #else
116614cf11afSPaul Mackerras 		return -EINVAL;
116714cf11afSPaul Mackerras #endif
116806d67d54SPaul Mackerras 	}
116906d67d54SPaul Mackerras 
117014cf11afSPaul Mackerras 	/* on a CONFIG_SPE this does not hurt us.  The bits that
117114cf11afSPaul Mackerras 	 * __pack_fe01 use do not overlap with bits used for
117214cf11afSPaul Mackerras 	 * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
117314cf11afSPaul Mackerras 	 * on CONFIG_SPE implementations are reserved so writing to
117414cf11afSPaul Mackerras 	 * them does not change anything */
117514cf11afSPaul Mackerras 	if (val > PR_FP_EXC_PRECISE)
117614cf11afSPaul Mackerras 		return -EINVAL;
117714cf11afSPaul Mackerras 	tsk->thread.fpexc_mode = __pack_fe01(val);
117814cf11afSPaul Mackerras 	if (regs != NULL && (regs->msr & MSR_FP) != 0)
117914cf11afSPaul Mackerras 		regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
118014cf11afSPaul Mackerras 			| tsk->thread.fpexc_mode;
118114cf11afSPaul Mackerras 	return 0;
118214cf11afSPaul Mackerras }
118314cf11afSPaul Mackerras 
118414cf11afSPaul Mackerras int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
118514cf11afSPaul Mackerras {
118614cf11afSPaul Mackerras 	unsigned int val;
118714cf11afSPaul Mackerras 
118814cf11afSPaul Mackerras 	if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
118914cf11afSPaul Mackerras #ifdef CONFIG_SPE
11905e14d21eSKumar Gala 		if (cpu_has_feature(CPU_FTR_SPE))
119114cf11afSPaul Mackerras 			val = tsk->thread.fpexc_mode;
11925e14d21eSKumar Gala 		else
11935e14d21eSKumar Gala 			return -EINVAL;
119414cf11afSPaul Mackerras #else
119514cf11afSPaul Mackerras 		return -EINVAL;
119614cf11afSPaul Mackerras #endif
119714cf11afSPaul Mackerras 	else
119814cf11afSPaul Mackerras 		val = __unpack_fe01(tsk->thread.fpexc_mode);
119914cf11afSPaul Mackerras 	return put_user(val, (unsigned int __user *) adr);
120014cf11afSPaul Mackerras }
120114cf11afSPaul Mackerras 
1202fab5db97SPaul Mackerras int set_endian(struct task_struct *tsk, unsigned int val)
1203fab5db97SPaul Mackerras {
1204fab5db97SPaul Mackerras 	struct pt_regs *regs = tsk->thread.regs;
1205fab5db97SPaul Mackerras 
1206fab5db97SPaul Mackerras 	if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1207fab5db97SPaul Mackerras 	    (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1208fab5db97SPaul Mackerras 		return -EINVAL;
1209fab5db97SPaul Mackerras 
1210fab5db97SPaul Mackerras 	if (regs == NULL)
1211fab5db97SPaul Mackerras 		return -EINVAL;
1212fab5db97SPaul Mackerras 
1213fab5db97SPaul Mackerras 	if (val == PR_ENDIAN_BIG)
1214fab5db97SPaul Mackerras 		regs->msr &= ~MSR_LE;
1215fab5db97SPaul Mackerras 	else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1216fab5db97SPaul Mackerras 		regs->msr |= MSR_LE;
1217fab5db97SPaul Mackerras 	else
1218fab5db97SPaul Mackerras 		return -EINVAL;
1219fab5db97SPaul Mackerras 
1220fab5db97SPaul Mackerras 	return 0;
1221fab5db97SPaul Mackerras }
1222fab5db97SPaul Mackerras 
1223fab5db97SPaul Mackerras int get_endian(struct task_struct *tsk, unsigned long adr)
1224fab5db97SPaul Mackerras {
1225fab5db97SPaul Mackerras 	struct pt_regs *regs = tsk->thread.regs;
1226fab5db97SPaul Mackerras 	unsigned int val;
1227fab5db97SPaul Mackerras 
1228fab5db97SPaul Mackerras 	if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1229fab5db97SPaul Mackerras 	    !cpu_has_feature(CPU_FTR_REAL_LE))
1230fab5db97SPaul Mackerras 		return -EINVAL;
1231fab5db97SPaul Mackerras 
1232fab5db97SPaul Mackerras 	if (regs == NULL)
1233fab5db97SPaul Mackerras 		return -EINVAL;
1234fab5db97SPaul Mackerras 
1235fab5db97SPaul Mackerras 	if (regs->msr & MSR_LE) {
1236fab5db97SPaul Mackerras 		if (cpu_has_feature(CPU_FTR_REAL_LE))
1237fab5db97SPaul Mackerras 			val = PR_ENDIAN_LITTLE;
1238fab5db97SPaul Mackerras 		else
1239fab5db97SPaul Mackerras 			val = PR_ENDIAN_PPC_LITTLE;
1240fab5db97SPaul Mackerras 	} else
1241fab5db97SPaul Mackerras 		val = PR_ENDIAN_BIG;
1242fab5db97SPaul Mackerras 
1243fab5db97SPaul Mackerras 	return put_user(val, (unsigned int __user *)adr);
1244fab5db97SPaul Mackerras }
1245fab5db97SPaul Mackerras 
1246e9370ae1SPaul Mackerras int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1247e9370ae1SPaul Mackerras {
1248e9370ae1SPaul Mackerras 	tsk->thread.align_ctl = val;
1249e9370ae1SPaul Mackerras 	return 0;
1250e9370ae1SPaul Mackerras }
1251e9370ae1SPaul Mackerras 
1252e9370ae1SPaul Mackerras int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1253e9370ae1SPaul Mackerras {
1254e9370ae1SPaul Mackerras 	return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1255e9370ae1SPaul Mackerras }
1256e9370ae1SPaul Mackerras 
1257bb72c481SPaul Mackerras static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1258bb72c481SPaul Mackerras 				  unsigned long nbytes)
1259bb72c481SPaul Mackerras {
1260bb72c481SPaul Mackerras 	unsigned long stack_page;
1261bb72c481SPaul Mackerras 	unsigned long cpu = task_cpu(p);
1262bb72c481SPaul Mackerras 
1263bb72c481SPaul Mackerras 	/*
1264bb72c481SPaul Mackerras 	 * Avoid crashing if the stack has overflowed and corrupted
1265bb72c481SPaul Mackerras 	 * task_cpu(p), which is in the thread_info struct.
1266bb72c481SPaul Mackerras 	 */
1267bb72c481SPaul Mackerras 	if (cpu < NR_CPUS && cpu_possible(cpu)) {
1268bb72c481SPaul Mackerras 		stack_page = (unsigned long) hardirq_ctx[cpu];
1269bb72c481SPaul Mackerras 		if (sp >= stack_page + sizeof(struct thread_struct)
1270bb72c481SPaul Mackerras 		    && sp <= stack_page + THREAD_SIZE - nbytes)
1271bb72c481SPaul Mackerras 			return 1;
1272bb72c481SPaul Mackerras 
1273bb72c481SPaul Mackerras 		stack_page = (unsigned long) softirq_ctx[cpu];
1274bb72c481SPaul Mackerras 		if (sp >= stack_page + sizeof(struct thread_struct)
1275bb72c481SPaul Mackerras 		    && sp <= stack_page + THREAD_SIZE - nbytes)
1276bb72c481SPaul Mackerras 			return 1;
1277bb72c481SPaul Mackerras 	}
1278bb72c481SPaul Mackerras 	return 0;
1279bb72c481SPaul Mackerras }
1280bb72c481SPaul Mackerras 
12812f25194dSAnton Blanchard int validate_sp(unsigned long sp, struct task_struct *p,
128214cf11afSPaul Mackerras 		       unsigned long nbytes)
128314cf11afSPaul Mackerras {
12840cec6fd1SAl Viro 	unsigned long stack_page = (unsigned long)task_stack_page(p);
128514cf11afSPaul Mackerras 
128614cf11afSPaul Mackerras 	if (sp >= stack_page + sizeof(struct thread_struct)
128714cf11afSPaul Mackerras 	    && sp <= stack_page + THREAD_SIZE - nbytes)
128814cf11afSPaul Mackerras 		return 1;
128914cf11afSPaul Mackerras 
1290bb72c481SPaul Mackerras 	return valid_irq_stack(sp, p, nbytes);
129114cf11afSPaul Mackerras }
129214cf11afSPaul Mackerras 
12932f25194dSAnton Blanchard EXPORT_SYMBOL(validate_sp);
12942f25194dSAnton Blanchard 
129506d67d54SPaul Mackerras unsigned long get_wchan(struct task_struct *p)
129606d67d54SPaul Mackerras {
129706d67d54SPaul Mackerras 	unsigned long ip, sp;
129806d67d54SPaul Mackerras 	int count = 0;
129906d67d54SPaul Mackerras 
130006d67d54SPaul Mackerras 	if (!p || p == current || p->state == TASK_RUNNING)
130106d67d54SPaul Mackerras 		return 0;
130206d67d54SPaul Mackerras 
130306d67d54SPaul Mackerras 	sp = p->thread.ksp;
1304ec2b36b9SBenjamin Herrenschmidt 	if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
130506d67d54SPaul Mackerras 		return 0;
130606d67d54SPaul Mackerras 
130706d67d54SPaul Mackerras 	do {
130806d67d54SPaul Mackerras 		sp = *(unsigned long *)sp;
1309ec2b36b9SBenjamin Herrenschmidt 		if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
131006d67d54SPaul Mackerras 			return 0;
131106d67d54SPaul Mackerras 		if (count > 0) {
1312ec2b36b9SBenjamin Herrenschmidt 			ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
131306d67d54SPaul Mackerras 			if (!in_sched_functions(ip))
131406d67d54SPaul Mackerras 				return ip;
131506d67d54SPaul Mackerras 		}
131606d67d54SPaul Mackerras 	} while (count++ < 16);
131706d67d54SPaul Mackerras 	return 0;
131806d67d54SPaul Mackerras }
131906d67d54SPaul Mackerras 
1320c4d04be1SJohannes Berg static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
132114cf11afSPaul Mackerras 
132214cf11afSPaul Mackerras void show_stack(struct task_struct *tsk, unsigned long *stack)
132314cf11afSPaul Mackerras {
132406d67d54SPaul Mackerras 	unsigned long sp, ip, lr, newsp;
132514cf11afSPaul Mackerras 	int count = 0;
132606d67d54SPaul Mackerras 	int firstframe = 1;
13276794c782SSteven Rostedt #ifdef CONFIG_FUNCTION_GRAPH_TRACER
13286794c782SSteven Rostedt 	int curr_frame = current->curr_ret_stack;
13296794c782SSteven Rostedt 	extern void return_to_handler(void);
13309135c3ccSSteven Rostedt 	unsigned long rth = (unsigned long)return_to_handler;
13319135c3ccSSteven Rostedt 	unsigned long mrth = -1;
13326794c782SSteven Rostedt #ifdef CONFIG_PPC64
13339135c3ccSSteven Rostedt 	extern void mod_return_to_handler(void);
13349135c3ccSSteven Rostedt 	rth = *(unsigned long *)rth;
13359135c3ccSSteven Rostedt 	mrth = (unsigned long)mod_return_to_handler;
13369135c3ccSSteven Rostedt 	mrth = *(unsigned long *)mrth;
13376794c782SSteven Rostedt #endif
13386794c782SSteven Rostedt #endif
133914cf11afSPaul Mackerras 
134014cf11afSPaul Mackerras 	sp = (unsigned long) stack;
134114cf11afSPaul Mackerras 	if (tsk == NULL)
134214cf11afSPaul Mackerras 		tsk = current;
134314cf11afSPaul Mackerras 	if (sp == 0) {
134414cf11afSPaul Mackerras 		if (tsk == current)
134514cf11afSPaul Mackerras 			asm("mr %0,1" : "=r" (sp));
134614cf11afSPaul Mackerras 		else
134714cf11afSPaul Mackerras 			sp = tsk->thread.ksp;
134814cf11afSPaul Mackerras 	}
134914cf11afSPaul Mackerras 
135006d67d54SPaul Mackerras 	lr = 0;
135106d67d54SPaul Mackerras 	printk("Call Trace:\n");
135214cf11afSPaul Mackerras 	do {
1353ec2b36b9SBenjamin Herrenschmidt 		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
135406d67d54SPaul Mackerras 			return;
135506d67d54SPaul Mackerras 
135606d67d54SPaul Mackerras 		stack = (unsigned long *) sp;
135706d67d54SPaul Mackerras 		newsp = stack[0];
1358ec2b36b9SBenjamin Herrenschmidt 		ip = stack[STACK_FRAME_LR_SAVE];
135906d67d54SPaul Mackerras 		if (!firstframe || ip != lr) {
1360058c78f4SBenjamin Herrenschmidt 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
13616794c782SSteven Rostedt #ifdef CONFIG_FUNCTION_GRAPH_TRACER
13629135c3ccSSteven Rostedt 			if ((ip == rth || ip == mrth) && curr_frame >= 0) {
13636794c782SSteven Rostedt 				printk(" (%pS)",
13646794c782SSteven Rostedt 				       (void *)current->ret_stack[curr_frame].ret);
13656794c782SSteven Rostedt 				curr_frame--;
13666794c782SSteven Rostedt 			}
13676794c782SSteven Rostedt #endif
136806d67d54SPaul Mackerras 			if (firstframe)
136906d67d54SPaul Mackerras 				printk(" (unreliable)");
137006d67d54SPaul Mackerras 			printk("\n");
137114cf11afSPaul Mackerras 		}
137206d67d54SPaul Mackerras 		firstframe = 0;
137306d67d54SPaul Mackerras 
137406d67d54SPaul Mackerras 		/*
137506d67d54SPaul Mackerras 		 * See if this is an exception frame.
137606d67d54SPaul Mackerras 		 * We look for the "regshere" marker in the current frame.
137706d67d54SPaul Mackerras 		 */
1378ec2b36b9SBenjamin Herrenschmidt 		if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1379ec2b36b9SBenjamin Herrenschmidt 		    && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
138006d67d54SPaul Mackerras 			struct pt_regs *regs = (struct pt_regs *)
138106d67d54SPaul Mackerras 				(sp + STACK_FRAME_OVERHEAD);
138206d67d54SPaul Mackerras 			lr = regs->link;
1383058c78f4SBenjamin Herrenschmidt 			printk("--- Exception: %lx at %pS\n    LR = %pS\n",
1384058c78f4SBenjamin Herrenschmidt 			       regs->trap, (void *)regs->nip, (void *)lr);
138506d67d54SPaul Mackerras 			firstframe = 1;
138614cf11afSPaul Mackerras 		}
138706d67d54SPaul Mackerras 
138806d67d54SPaul Mackerras 		sp = newsp;
138906d67d54SPaul Mackerras 	} while (count++ < kstack_depth_to_print);
139006d67d54SPaul Mackerras }
139106d67d54SPaul Mackerras 
1392cb2c9b27SAnton Blanchard #ifdef CONFIG_PPC64
1393fe1952fcSBenjamin Herrenschmidt /* Called with hard IRQs off */
13940e37739bSMichael Ellerman void notrace __ppc64_runlatch_on(void)
1395cb2c9b27SAnton Blanchard {
1396fe1952fcSBenjamin Herrenschmidt 	struct thread_info *ti = current_thread_info();
1397cb2c9b27SAnton Blanchard 	unsigned long ctrl;
1398cb2c9b27SAnton Blanchard 
1399cb2c9b27SAnton Blanchard 	ctrl = mfspr(SPRN_CTRLF);
1400cb2c9b27SAnton Blanchard 	ctrl |= CTRL_RUNLATCH;
1401cb2c9b27SAnton Blanchard 	mtspr(SPRN_CTRLT, ctrl);
1402cb2c9b27SAnton Blanchard 
1403fae2e0fbSBenjamin Herrenschmidt 	ti->local_flags |= _TLF_RUNLATCH;
1404cb2c9b27SAnton Blanchard }
1405cb2c9b27SAnton Blanchard 
1406fe1952fcSBenjamin Herrenschmidt /* Called with hard IRQs off */
14070e37739bSMichael Ellerman void notrace __ppc64_runlatch_off(void)
1408cb2c9b27SAnton Blanchard {
1409fe1952fcSBenjamin Herrenschmidt 	struct thread_info *ti = current_thread_info();
1410cb2c9b27SAnton Blanchard 	unsigned long ctrl;
1411cb2c9b27SAnton Blanchard 
1412fae2e0fbSBenjamin Herrenschmidt 	ti->local_flags &= ~_TLF_RUNLATCH;
1413cb2c9b27SAnton Blanchard 
1414cb2c9b27SAnton Blanchard 	ctrl = mfspr(SPRN_CTRLF);
1415cb2c9b27SAnton Blanchard 	ctrl &= ~CTRL_RUNLATCH;
1416cb2c9b27SAnton Blanchard 	mtspr(SPRN_CTRLT, ctrl);
1417cb2c9b27SAnton Blanchard }
1418fe1952fcSBenjamin Herrenschmidt #endif /* CONFIG_PPC64 */
1419f6a61680SBenjamin Herrenschmidt 
1420d839088cSAnton Blanchard unsigned long arch_align_stack(unsigned long sp)
1421d839088cSAnton Blanchard {
1422d839088cSAnton Blanchard 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1423d839088cSAnton Blanchard 		sp -= get_random_int() & ~PAGE_MASK;
1424d839088cSAnton Blanchard 	return sp & ~0xf;
1425d839088cSAnton Blanchard }
1426912f9ee2SAnton Blanchard 
1427912f9ee2SAnton Blanchard static inline unsigned long brk_rnd(void)
1428912f9ee2SAnton Blanchard {
1429912f9ee2SAnton Blanchard         unsigned long rnd = 0;
1430912f9ee2SAnton Blanchard 
1431912f9ee2SAnton Blanchard 	/* 8MB for 32bit, 1GB for 64bit */
1432912f9ee2SAnton Blanchard 	if (is_32bit_task())
1433912f9ee2SAnton Blanchard 		rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1434912f9ee2SAnton Blanchard 	else
1435912f9ee2SAnton Blanchard 		rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1436912f9ee2SAnton Blanchard 
1437912f9ee2SAnton Blanchard 	return rnd << PAGE_SHIFT;
1438912f9ee2SAnton Blanchard }
1439912f9ee2SAnton Blanchard 
1440912f9ee2SAnton Blanchard unsigned long arch_randomize_brk(struct mm_struct *mm)
1441912f9ee2SAnton Blanchard {
14428bbde7a7SAnton Blanchard 	unsigned long base = mm->brk;
14438bbde7a7SAnton Blanchard 	unsigned long ret;
14448bbde7a7SAnton Blanchard 
1445ce7a35c7SKumar Gala #ifdef CONFIG_PPC_STD_MMU_64
14468bbde7a7SAnton Blanchard 	/*
14478bbde7a7SAnton Blanchard 	 * If we are using 1TB segments and we are allowed to randomise
14488bbde7a7SAnton Blanchard 	 * the heap, we can put it above 1TB so it is backed by a 1TB
14498bbde7a7SAnton Blanchard 	 * segment. Otherwise the heap will be in the bottom 1TB
14508bbde7a7SAnton Blanchard 	 * which always uses 256MB segments and this may result in a
14518bbde7a7SAnton Blanchard 	 * performance penalty.
14528bbde7a7SAnton Blanchard 	 */
14538bbde7a7SAnton Blanchard 	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
14548bbde7a7SAnton Blanchard 		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
14558bbde7a7SAnton Blanchard #endif
14568bbde7a7SAnton Blanchard 
14578bbde7a7SAnton Blanchard 	ret = PAGE_ALIGN(base + brk_rnd());
1458912f9ee2SAnton Blanchard 
1459912f9ee2SAnton Blanchard 	if (ret < mm->brk)
1460912f9ee2SAnton Blanchard 		return mm->brk;
1461912f9ee2SAnton Blanchard 
1462912f9ee2SAnton Blanchard 	return ret;
1463912f9ee2SAnton Blanchard }
1464501cb16dSAnton Blanchard 
1465501cb16dSAnton Blanchard unsigned long randomize_et_dyn(unsigned long base)
1466501cb16dSAnton Blanchard {
1467501cb16dSAnton Blanchard 	unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1468501cb16dSAnton Blanchard 
1469501cb16dSAnton Blanchard 	if (ret < base)
1470501cb16dSAnton Blanchard 		return base;
1471501cb16dSAnton Blanchard 
1472501cb16dSAnton Blanchard 	return ret;
1473501cb16dSAnton Blanchard }
1474