xref: /linux/arch/powerpc/kernel/process.c (revision 48a8ab4eeb8271f2a0e2ca3cf80844a59acca153)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Derived from "arch/i386/kernel/process.c"
4  *    Copyright (C) 1995  Linus Torvalds
5  *
6  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
7  *  Paul Mackerras (paulus@cs.anu.edu.au)
8  *
9  *  PowerPC version
10  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11  */
12 
13 #include <linux/errno.h>
14 #include <linux/sched.h>
15 #include <linux/sched/debug.h>
16 #include <linux/sched/task.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/elf.h>
27 #include <linux/prctl.h>
28 #include <linux/init_task.h>
29 #include <linux/export.h>
30 #include <linux/kallsyms.h>
31 #include <linux/mqueue.h>
32 #include <linux/hardirq.h>
33 #include <linux/utsname.h>
34 #include <linux/ftrace.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/personality.h>
37 #include <linux/random.h>
38 #include <linux/hw_breakpoint.h>
39 #include <linux/uaccess.h>
40 #include <linux/elf-randomize.h>
41 #include <linux/pkeys.h>
42 #include <linux/seq_buf.h>
43 
44 #include <asm/io.h>
45 #include <asm/processor.h>
46 #include <asm/mmu.h>
47 #include <asm/prom.h>
48 #include <asm/machdep.h>
49 #include <asm/time.h>
50 #include <asm/runlatch.h>
51 #include <asm/syscalls.h>
52 #include <asm/switch_to.h>
53 #include <asm/tm.h>
54 #include <asm/debug.h>
55 #ifdef CONFIG_PPC64
56 #include <asm/firmware.h>
57 #include <asm/hw_irq.h>
58 #endif
59 #include <asm/code-patching.h>
60 #include <asm/exec.h>
61 #include <asm/livepatch.h>
62 #include <asm/cpu_has_feature.h>
63 #include <asm/asm-prototypes.h>
64 #include <asm/stacktrace.h>
65 #include <asm/hw_breakpoint.h>
66 
67 #include <linux/kprobes.h>
68 #include <linux/kdebug.h>
69 
70 /* Transactional Memory debug */
71 #ifdef TM_DEBUG_SW
72 #define TM_DEBUG(x...) printk(KERN_INFO x)
73 #else
74 #define TM_DEBUG(x...) do { } while(0)
75 #endif
76 
77 extern unsigned long _get_SP(void);
78 
79 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
80 /*
81  * Are we running in "Suspend disabled" mode? If so we have to block any
82  * sigreturn that would get us into suspended state, and we also warn in some
83  * other paths that we should never reach with suspend disabled.
84  */
85 bool tm_suspend_disabled __ro_after_init = false;
86 
87 static void check_if_tm_restore_required(struct task_struct *tsk)
88 {
89 	/*
90 	 * If we are saving the current thread's registers, and the
91 	 * thread is in a transactional state, set the TIF_RESTORE_TM
92 	 * bit so that we know to restore the registers before
93 	 * returning to userspace.
94 	 */
95 	if (tsk == current && tsk->thread.regs &&
96 	    MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
97 	    !test_thread_flag(TIF_RESTORE_TM)) {
98 		tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
99 		set_thread_flag(TIF_RESTORE_TM);
100 	}
101 }
102 
103 #else
104 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
105 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
106 
107 bool strict_msr_control;
108 EXPORT_SYMBOL(strict_msr_control);
109 
110 static int __init enable_strict_msr_control(char *str)
111 {
112 	strict_msr_control = true;
113 	pr_info("Enabling strict facility control\n");
114 
115 	return 0;
116 }
117 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
118 
119 /* notrace because it's called by restore_math */
120 unsigned long notrace msr_check_and_set(unsigned long bits)
121 {
122 	unsigned long oldmsr = mfmsr();
123 	unsigned long newmsr;
124 
125 	newmsr = oldmsr | bits;
126 
127 	if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
128 		newmsr |= MSR_VSX;
129 
130 	if (oldmsr != newmsr)
131 		mtmsr_isync(newmsr);
132 
133 	return newmsr;
134 }
135 EXPORT_SYMBOL_GPL(msr_check_and_set);
136 
137 /* notrace because it's called by restore_math */
138 void notrace __msr_check_and_clear(unsigned long bits)
139 {
140 	unsigned long oldmsr = mfmsr();
141 	unsigned long newmsr;
142 
143 	newmsr = oldmsr & ~bits;
144 
145 	if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
146 		newmsr &= ~MSR_VSX;
147 
148 	if (oldmsr != newmsr)
149 		mtmsr_isync(newmsr);
150 }
151 EXPORT_SYMBOL(__msr_check_and_clear);
152 
153 #ifdef CONFIG_PPC_FPU
154 static void __giveup_fpu(struct task_struct *tsk)
155 {
156 	unsigned long msr;
157 
158 	save_fpu(tsk);
159 	msr = tsk->thread.regs->msr;
160 	msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
161 	if (cpu_has_feature(CPU_FTR_VSX))
162 		msr &= ~MSR_VSX;
163 	tsk->thread.regs->msr = msr;
164 }
165 
166 void giveup_fpu(struct task_struct *tsk)
167 {
168 	check_if_tm_restore_required(tsk);
169 
170 	msr_check_and_set(MSR_FP);
171 	__giveup_fpu(tsk);
172 	msr_check_and_clear(MSR_FP);
173 }
174 EXPORT_SYMBOL(giveup_fpu);
175 
176 /*
177  * Make sure the floating-point register state in the
178  * the thread_struct is up to date for task tsk.
179  */
180 void flush_fp_to_thread(struct task_struct *tsk)
181 {
182 	if (tsk->thread.regs) {
183 		/*
184 		 * We need to disable preemption here because if we didn't,
185 		 * another process could get scheduled after the regs->msr
186 		 * test but before we have finished saving the FP registers
187 		 * to the thread_struct.  That process could take over the
188 		 * FPU, and then when we get scheduled again we would store
189 		 * bogus values for the remaining FP registers.
190 		 */
191 		preempt_disable();
192 		if (tsk->thread.regs->msr & MSR_FP) {
193 			/*
194 			 * This should only ever be called for current or
195 			 * for a stopped child process.  Since we save away
196 			 * the FP register state on context switch,
197 			 * there is something wrong if a stopped child appears
198 			 * to still have its FP state in the CPU registers.
199 			 */
200 			BUG_ON(tsk != current);
201 			giveup_fpu(tsk);
202 		}
203 		preempt_enable();
204 	}
205 }
206 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
207 
208 void enable_kernel_fp(void)
209 {
210 	unsigned long cpumsr;
211 
212 	WARN_ON(preemptible());
213 
214 	cpumsr = msr_check_and_set(MSR_FP);
215 
216 	if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
217 		check_if_tm_restore_required(current);
218 		/*
219 		 * If a thread has already been reclaimed then the
220 		 * checkpointed registers are on the CPU but have definitely
221 		 * been saved by the reclaim code. Don't need to and *cannot*
222 		 * giveup as this would save  to the 'live' structure not the
223 		 * checkpointed structure.
224 		 */
225 		if (!MSR_TM_ACTIVE(cpumsr) &&
226 		     MSR_TM_ACTIVE(current->thread.regs->msr))
227 			return;
228 		__giveup_fpu(current);
229 	}
230 }
231 EXPORT_SYMBOL(enable_kernel_fp);
232 #else
233 static inline void __giveup_fpu(struct task_struct *tsk) { }
234 #endif /* CONFIG_PPC_FPU */
235 
236 #ifdef CONFIG_ALTIVEC
237 static void __giveup_altivec(struct task_struct *tsk)
238 {
239 	unsigned long msr;
240 
241 	save_altivec(tsk);
242 	msr = tsk->thread.regs->msr;
243 	msr &= ~MSR_VEC;
244 	if (cpu_has_feature(CPU_FTR_VSX))
245 		msr &= ~MSR_VSX;
246 	tsk->thread.regs->msr = msr;
247 }
248 
249 void giveup_altivec(struct task_struct *tsk)
250 {
251 	check_if_tm_restore_required(tsk);
252 
253 	msr_check_and_set(MSR_VEC);
254 	__giveup_altivec(tsk);
255 	msr_check_and_clear(MSR_VEC);
256 }
257 EXPORT_SYMBOL(giveup_altivec);
258 
259 void enable_kernel_altivec(void)
260 {
261 	unsigned long cpumsr;
262 
263 	WARN_ON(preemptible());
264 
265 	cpumsr = msr_check_and_set(MSR_VEC);
266 
267 	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
268 		check_if_tm_restore_required(current);
269 		/*
270 		 * If a thread has already been reclaimed then the
271 		 * checkpointed registers are on the CPU but have definitely
272 		 * been saved by the reclaim code. Don't need to and *cannot*
273 		 * giveup as this would save  to the 'live' structure not the
274 		 * checkpointed structure.
275 		 */
276 		if (!MSR_TM_ACTIVE(cpumsr) &&
277 		     MSR_TM_ACTIVE(current->thread.regs->msr))
278 			return;
279 		__giveup_altivec(current);
280 	}
281 }
282 EXPORT_SYMBOL(enable_kernel_altivec);
283 
284 /*
285  * Make sure the VMX/Altivec register state in the
286  * the thread_struct is up to date for task tsk.
287  */
288 void flush_altivec_to_thread(struct task_struct *tsk)
289 {
290 	if (tsk->thread.regs) {
291 		preempt_disable();
292 		if (tsk->thread.regs->msr & MSR_VEC) {
293 			BUG_ON(tsk != current);
294 			giveup_altivec(tsk);
295 		}
296 		preempt_enable();
297 	}
298 }
299 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
300 #endif /* CONFIG_ALTIVEC */
301 
302 #ifdef CONFIG_VSX
303 static void __giveup_vsx(struct task_struct *tsk)
304 {
305 	unsigned long msr = tsk->thread.regs->msr;
306 
307 	/*
308 	 * We should never be ssetting MSR_VSX without also setting
309 	 * MSR_FP and MSR_VEC
310 	 */
311 	WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
312 
313 	/* __giveup_fpu will clear MSR_VSX */
314 	if (msr & MSR_FP)
315 		__giveup_fpu(tsk);
316 	if (msr & MSR_VEC)
317 		__giveup_altivec(tsk);
318 }
319 
320 static void giveup_vsx(struct task_struct *tsk)
321 {
322 	check_if_tm_restore_required(tsk);
323 
324 	msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
325 	__giveup_vsx(tsk);
326 	msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
327 }
328 
329 void enable_kernel_vsx(void)
330 {
331 	unsigned long cpumsr;
332 
333 	WARN_ON(preemptible());
334 
335 	cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
336 
337 	if (current->thread.regs &&
338 	    (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
339 		check_if_tm_restore_required(current);
340 		/*
341 		 * If a thread has already been reclaimed then the
342 		 * checkpointed registers are on the CPU but have definitely
343 		 * been saved by the reclaim code. Don't need to and *cannot*
344 		 * giveup as this would save  to the 'live' structure not the
345 		 * checkpointed structure.
346 		 */
347 		if (!MSR_TM_ACTIVE(cpumsr) &&
348 		     MSR_TM_ACTIVE(current->thread.regs->msr))
349 			return;
350 		__giveup_vsx(current);
351 	}
352 }
353 EXPORT_SYMBOL(enable_kernel_vsx);
354 
355 void flush_vsx_to_thread(struct task_struct *tsk)
356 {
357 	if (tsk->thread.regs) {
358 		preempt_disable();
359 		if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
360 			BUG_ON(tsk != current);
361 			giveup_vsx(tsk);
362 		}
363 		preempt_enable();
364 	}
365 }
366 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
367 #endif /* CONFIG_VSX */
368 
369 #ifdef CONFIG_SPE
370 void giveup_spe(struct task_struct *tsk)
371 {
372 	check_if_tm_restore_required(tsk);
373 
374 	msr_check_and_set(MSR_SPE);
375 	__giveup_spe(tsk);
376 	msr_check_and_clear(MSR_SPE);
377 }
378 EXPORT_SYMBOL(giveup_spe);
379 
380 void enable_kernel_spe(void)
381 {
382 	WARN_ON(preemptible());
383 
384 	msr_check_and_set(MSR_SPE);
385 
386 	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
387 		check_if_tm_restore_required(current);
388 		__giveup_spe(current);
389 	}
390 }
391 EXPORT_SYMBOL(enable_kernel_spe);
392 
393 void flush_spe_to_thread(struct task_struct *tsk)
394 {
395 	if (tsk->thread.regs) {
396 		preempt_disable();
397 		if (tsk->thread.regs->msr & MSR_SPE) {
398 			BUG_ON(tsk != current);
399 			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
400 			giveup_spe(tsk);
401 		}
402 		preempt_enable();
403 	}
404 }
405 #endif /* CONFIG_SPE */
406 
407 static unsigned long msr_all_available;
408 
409 static int __init init_msr_all_available(void)
410 {
411 	if (IS_ENABLED(CONFIG_PPC_FPU))
412 		msr_all_available |= MSR_FP;
413 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
414 		msr_all_available |= MSR_VEC;
415 	if (cpu_has_feature(CPU_FTR_VSX))
416 		msr_all_available |= MSR_VSX;
417 	if (cpu_has_feature(CPU_FTR_SPE))
418 		msr_all_available |= MSR_SPE;
419 
420 	return 0;
421 }
422 early_initcall(init_msr_all_available);
423 
424 void giveup_all(struct task_struct *tsk)
425 {
426 	unsigned long usermsr;
427 
428 	if (!tsk->thread.regs)
429 		return;
430 
431 	check_if_tm_restore_required(tsk);
432 
433 	usermsr = tsk->thread.regs->msr;
434 
435 	if ((usermsr & msr_all_available) == 0)
436 		return;
437 
438 	msr_check_and_set(msr_all_available);
439 
440 	WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
441 
442 	if (usermsr & MSR_FP)
443 		__giveup_fpu(tsk);
444 	if (usermsr & MSR_VEC)
445 		__giveup_altivec(tsk);
446 	if (usermsr & MSR_SPE)
447 		__giveup_spe(tsk);
448 
449 	msr_check_and_clear(msr_all_available);
450 }
451 EXPORT_SYMBOL(giveup_all);
452 
453 #ifdef CONFIG_PPC_BOOK3S_64
454 #ifdef CONFIG_PPC_FPU
455 static bool should_restore_fp(void)
456 {
457 	if (current->thread.load_fp) {
458 		current->thread.load_fp++;
459 		return true;
460 	}
461 	return false;
462 }
463 
464 static void do_restore_fp(void)
465 {
466 	load_fp_state(&current->thread.fp_state);
467 }
468 #else
469 static bool should_restore_fp(void) { return false; }
470 static void do_restore_fp(void) { }
471 #endif /* CONFIG_PPC_FPU */
472 
473 #ifdef CONFIG_ALTIVEC
474 static bool should_restore_altivec(void)
475 {
476 	if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
477 		current->thread.load_vec++;
478 		return true;
479 	}
480 	return false;
481 }
482 
483 static void do_restore_altivec(void)
484 {
485 	load_vr_state(&current->thread.vr_state);
486 	current->thread.used_vr = 1;
487 }
488 #else
489 static bool should_restore_altivec(void) { return false; }
490 static void do_restore_altivec(void) { }
491 #endif /* CONFIG_ALTIVEC */
492 
493 static bool should_restore_vsx(void)
494 {
495 	if (cpu_has_feature(CPU_FTR_VSX))
496 		return true;
497 	return false;
498 }
499 #ifdef CONFIG_VSX
500 static void do_restore_vsx(void)
501 {
502 	current->thread.used_vsr = 1;
503 }
504 #else
505 static void do_restore_vsx(void) { }
506 #endif /* CONFIG_VSX */
507 
508 /*
509  * The exception exit path calls restore_math() with interrupts hard disabled
510  * but the soft irq state not "reconciled". ftrace code that calls
511  * local_irq_save/restore causes warnings.
512  *
513  * Rather than complicate the exit path, just don't trace restore_math. This
514  * could be done by having ftrace entry code check for this un-reconciled
515  * condition where MSR[EE]=0 and PACA_IRQ_HARD_DIS is not set, and
516  * temporarily fix it up for the duration of the ftrace call.
517  */
518 void notrace restore_math(struct pt_regs *regs)
519 {
520 	unsigned long msr;
521 	unsigned long new_msr = 0;
522 
523 	msr = regs->msr;
524 
525 	/*
526 	 * new_msr tracks the facilities that are to be restored. Only reload
527 	 * if the bit is not set in the user MSR (if it is set, the registers
528 	 * are live for the user thread).
529 	 */
530 	if ((!(msr & MSR_FP)) && should_restore_fp())
531 		new_msr |= MSR_FP;
532 
533 	if ((!(msr & MSR_VEC)) && should_restore_altivec())
534 		new_msr |= MSR_VEC;
535 
536 	if ((!(msr & MSR_VSX)) && should_restore_vsx()) {
537 		if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
538 			new_msr |= MSR_VSX;
539 	}
540 
541 	if (new_msr) {
542 		unsigned long fpexc_mode = 0;
543 
544 		msr_check_and_set(new_msr);
545 
546 		if (new_msr & MSR_FP) {
547 			do_restore_fp();
548 
549 			// This also covers VSX, because VSX implies FP
550 			fpexc_mode = current->thread.fpexc_mode;
551 		}
552 
553 		if (new_msr & MSR_VEC)
554 			do_restore_altivec();
555 
556 		if (new_msr & MSR_VSX)
557 			do_restore_vsx();
558 
559 		msr_check_and_clear(new_msr);
560 
561 		regs->msr |= new_msr | fpexc_mode;
562 	}
563 }
564 #endif /* CONFIG_PPC_BOOK3S_64 */
565 
566 static void save_all(struct task_struct *tsk)
567 {
568 	unsigned long usermsr;
569 
570 	if (!tsk->thread.regs)
571 		return;
572 
573 	usermsr = tsk->thread.regs->msr;
574 
575 	if ((usermsr & msr_all_available) == 0)
576 		return;
577 
578 	msr_check_and_set(msr_all_available);
579 
580 	WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
581 
582 	if (usermsr & MSR_FP)
583 		save_fpu(tsk);
584 
585 	if (usermsr & MSR_VEC)
586 		save_altivec(tsk);
587 
588 	if (usermsr & MSR_SPE)
589 		__giveup_spe(tsk);
590 
591 	msr_check_and_clear(msr_all_available);
592 }
593 
594 void flush_all_to_thread(struct task_struct *tsk)
595 {
596 	if (tsk->thread.regs) {
597 		preempt_disable();
598 		BUG_ON(tsk != current);
599 #ifdef CONFIG_SPE
600 		if (tsk->thread.regs->msr & MSR_SPE)
601 			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
602 #endif
603 		save_all(tsk);
604 
605 		preempt_enable();
606 	}
607 }
608 EXPORT_SYMBOL(flush_all_to_thread);
609 
610 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
611 void do_send_trap(struct pt_regs *regs, unsigned long address,
612 		  unsigned long error_code, int breakpt)
613 {
614 	current->thread.trap_nr = TRAP_HWBKPT;
615 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
616 			11, SIGSEGV) == NOTIFY_STOP)
617 		return;
618 
619 	/* Deliver the signal to userspace */
620 	force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
621 				    (void __user *)address);
622 }
623 #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
624 
625 static void do_break_handler(struct pt_regs *regs)
626 {
627 	struct arch_hw_breakpoint null_brk = {0};
628 	struct arch_hw_breakpoint *info;
629 	struct ppc_inst instr = ppc_inst(0);
630 	int type = 0;
631 	int size = 0;
632 	unsigned long ea;
633 	int i;
634 
635 	/*
636 	 * If underneath hw supports only one watchpoint, we know it
637 	 * caused exception. 8xx also falls into this category.
638 	 */
639 	if (nr_wp_slots() == 1) {
640 		__set_breakpoint(0, &null_brk);
641 		current->thread.hw_brk[0] = null_brk;
642 		current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED;
643 		return;
644 	}
645 
646 	/* Otherwise findout which DAWR caused exception and disable it. */
647 	wp_get_instr_detail(regs, &instr, &type, &size, &ea);
648 
649 	for (i = 0; i < nr_wp_slots(); i++) {
650 		info = &current->thread.hw_brk[i];
651 		if (!info->address)
652 			continue;
653 
654 		if (wp_check_constraints(regs, instr, ea, type, size, info)) {
655 			__set_breakpoint(i, &null_brk);
656 			current->thread.hw_brk[i] = null_brk;
657 			current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED;
658 		}
659 	}
660 }
661 
662 void do_break (struct pt_regs *regs, unsigned long address,
663 		    unsigned long error_code)
664 {
665 	current->thread.trap_nr = TRAP_HWBKPT;
666 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
667 			11, SIGSEGV) == NOTIFY_STOP)
668 		return;
669 
670 	if (debugger_break_match(regs))
671 		return;
672 
673 	/*
674 	 * We reach here only when watchpoint exception is generated by ptrace
675 	 * event (or hw is buggy!). Now if CONFIG_HAVE_HW_BREAKPOINT is set,
676 	 * watchpoint is already handled by hw_breakpoint_handler() so we don't
677 	 * have to do anything. But when CONFIG_HAVE_HW_BREAKPOINT is not set,
678 	 * we need to manually handle the watchpoint here.
679 	 */
680 	if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT))
681 		do_break_handler(regs);
682 
683 	/* Deliver the signal to userspace */
684 	force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address);
685 }
686 #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
687 
688 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
689 
690 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
691 /*
692  * Set the debug registers back to their default "safe" values.
693  */
694 static void set_debug_reg_defaults(struct thread_struct *thread)
695 {
696 	thread->debug.iac1 = thread->debug.iac2 = 0;
697 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
698 	thread->debug.iac3 = thread->debug.iac4 = 0;
699 #endif
700 	thread->debug.dac1 = thread->debug.dac2 = 0;
701 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
702 	thread->debug.dvc1 = thread->debug.dvc2 = 0;
703 #endif
704 	thread->debug.dbcr0 = 0;
705 #ifdef CONFIG_BOOKE
706 	/*
707 	 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
708 	 */
709 	thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
710 			DBCR1_IAC3US | DBCR1_IAC4US;
711 	/*
712 	 * Force Data Address Compare User/Supervisor bits to be User-only
713 	 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
714 	 */
715 	thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
716 #else
717 	thread->debug.dbcr1 = 0;
718 #endif
719 }
720 
721 static void prime_debug_regs(struct debug_reg *debug)
722 {
723 	/*
724 	 * We could have inherited MSR_DE from userspace, since
725 	 * it doesn't get cleared on exception entry.  Make sure
726 	 * MSR_DE is clear before we enable any debug events.
727 	 */
728 	mtmsr(mfmsr() & ~MSR_DE);
729 
730 	mtspr(SPRN_IAC1, debug->iac1);
731 	mtspr(SPRN_IAC2, debug->iac2);
732 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
733 	mtspr(SPRN_IAC3, debug->iac3);
734 	mtspr(SPRN_IAC4, debug->iac4);
735 #endif
736 	mtspr(SPRN_DAC1, debug->dac1);
737 	mtspr(SPRN_DAC2, debug->dac2);
738 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
739 	mtspr(SPRN_DVC1, debug->dvc1);
740 	mtspr(SPRN_DVC2, debug->dvc2);
741 #endif
742 	mtspr(SPRN_DBCR0, debug->dbcr0);
743 	mtspr(SPRN_DBCR1, debug->dbcr1);
744 #ifdef CONFIG_BOOKE
745 	mtspr(SPRN_DBCR2, debug->dbcr2);
746 #endif
747 }
748 /*
749  * Unless neither the old or new thread are making use of the
750  * debug registers, set the debug registers from the values
751  * stored in the new thread.
752  */
753 void switch_booke_debug_regs(struct debug_reg *new_debug)
754 {
755 	if ((current->thread.debug.dbcr0 & DBCR0_IDM)
756 		|| (new_debug->dbcr0 & DBCR0_IDM))
757 			prime_debug_regs(new_debug);
758 }
759 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
760 #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
761 #ifndef CONFIG_HAVE_HW_BREAKPOINT
762 static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
763 {
764 	preempt_disable();
765 	__set_breakpoint(i, brk);
766 	preempt_enable();
767 }
768 
769 static void set_debug_reg_defaults(struct thread_struct *thread)
770 {
771 	int i;
772 	struct arch_hw_breakpoint null_brk = {0};
773 
774 	for (i = 0; i < nr_wp_slots(); i++) {
775 		thread->hw_brk[i] = null_brk;
776 		if (ppc_breakpoint_available())
777 			set_breakpoint(i, &thread->hw_brk[i]);
778 	}
779 }
780 
781 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
782 				struct arch_hw_breakpoint *b)
783 {
784 	if (a->address != b->address)
785 		return false;
786 	if (a->type != b->type)
787 		return false;
788 	if (a->len != b->len)
789 		return false;
790 	/* no need to check hw_len. it's calculated from address and len */
791 	return true;
792 }
793 
794 static void switch_hw_breakpoint(struct task_struct *new)
795 {
796 	int i;
797 
798 	for (i = 0; i < nr_wp_slots(); i++) {
799 		if (likely(hw_brk_match(this_cpu_ptr(&current_brk[i]),
800 					&new->thread.hw_brk[i])))
801 			continue;
802 
803 		__set_breakpoint(i, &new->thread.hw_brk[i]);
804 	}
805 }
806 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
807 #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
808 
809 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
810 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
811 {
812 	mtspr(SPRN_DAC1, dabr);
813 	if (IS_ENABLED(CONFIG_PPC_47x))
814 		isync();
815 	return 0;
816 }
817 #elif defined(CONFIG_PPC_BOOK3S)
818 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
819 {
820 	mtspr(SPRN_DABR, dabr);
821 	if (cpu_has_feature(CPU_FTR_DABRX))
822 		mtspr(SPRN_DABRX, dabrx);
823 	return 0;
824 }
825 #else
826 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
827 {
828 	return -EINVAL;
829 }
830 #endif
831 
832 static inline int set_dabr(struct arch_hw_breakpoint *brk)
833 {
834 	unsigned long dabr, dabrx;
835 
836 	dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
837 	dabrx = ((brk->type >> 3) & 0x7);
838 
839 	if (ppc_md.set_dabr)
840 		return ppc_md.set_dabr(dabr, dabrx);
841 
842 	return __set_dabr(dabr, dabrx);
843 }
844 
845 static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
846 {
847 	unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
848 			       LCTRL1_CRWF_RW;
849 	unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
850 	unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
851 	unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
852 
853 	if (start_addr == 0)
854 		lctrl2 |= LCTRL2_LW0LA_F;
855 	else if (end_addr == 0)
856 		lctrl2 |= LCTRL2_LW0LA_E;
857 	else
858 		lctrl2 |= LCTRL2_LW0LA_EandF;
859 
860 	mtspr(SPRN_LCTRL2, 0);
861 
862 	if ((brk->type & HW_BRK_TYPE_RDWR) == 0)
863 		return 0;
864 
865 	if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
866 		lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO;
867 	if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
868 		lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
869 
870 	mtspr(SPRN_CMPE, start_addr - 1);
871 	mtspr(SPRN_CMPF, end_addr);
872 	mtspr(SPRN_LCTRL1, lctrl1);
873 	mtspr(SPRN_LCTRL2, lctrl2);
874 
875 	return 0;
876 }
877 
878 void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
879 {
880 	memcpy(this_cpu_ptr(&current_brk[nr]), brk, sizeof(*brk));
881 
882 	if (dawr_enabled())
883 		// Power8 or later
884 		set_dawr(nr, brk);
885 	else if (IS_ENABLED(CONFIG_PPC_8xx))
886 		set_breakpoint_8xx(brk);
887 	else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
888 		// Power7 or earlier
889 		set_dabr(brk);
890 	else
891 		// Shouldn't happen due to higher level checks
892 		WARN_ON_ONCE(1);
893 }
894 
895 /* Check if we have DAWR or DABR hardware */
896 bool ppc_breakpoint_available(void)
897 {
898 	if (dawr_enabled())
899 		return true; /* POWER8 DAWR or POWER9 forced DAWR */
900 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
901 		return false; /* POWER9 with DAWR disabled */
902 	/* DABR: Everything but POWER8 and POWER9 */
903 	return true;
904 }
905 EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
906 
907 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
908 
909 static inline bool tm_enabled(struct task_struct *tsk)
910 {
911 	return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
912 }
913 
914 static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
915 {
916 	/*
917 	 * Use the current MSR TM suspended bit to track if we have
918 	 * checkpointed state outstanding.
919 	 * On signal delivery, we'd normally reclaim the checkpointed
920 	 * state to obtain stack pointer (see:get_tm_stackpointer()).
921 	 * This will then directly return to userspace without going
922 	 * through __switch_to(). However, if the stack frame is bad,
923 	 * we need to exit this thread which calls __switch_to() which
924 	 * will again attempt to reclaim the already saved tm state.
925 	 * Hence we need to check that we've not already reclaimed
926 	 * this state.
927 	 * We do this using the current MSR, rather tracking it in
928 	 * some specific thread_struct bit, as it has the additional
929 	 * benefit of checking for a potential TM bad thing exception.
930 	 */
931 	if (!MSR_TM_SUSPENDED(mfmsr()))
932 		return;
933 
934 	giveup_all(container_of(thr, struct task_struct, thread));
935 
936 	tm_reclaim(thr, cause);
937 
938 	/*
939 	 * If we are in a transaction and FP is off then we can't have
940 	 * used FP inside that transaction. Hence the checkpointed
941 	 * state is the same as the live state. We need to copy the
942 	 * live state to the checkpointed state so that when the
943 	 * transaction is restored, the checkpointed state is correct
944 	 * and the aborted transaction sees the correct state. We use
945 	 * ckpt_regs.msr here as that's what tm_reclaim will use to
946 	 * determine if it's going to write the checkpointed state or
947 	 * not. So either this will write the checkpointed registers,
948 	 * or reclaim will. Similarly for VMX.
949 	 */
950 	if ((thr->ckpt_regs.msr & MSR_FP) == 0)
951 		memcpy(&thr->ckfp_state, &thr->fp_state,
952 		       sizeof(struct thread_fp_state));
953 	if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
954 		memcpy(&thr->ckvr_state, &thr->vr_state,
955 		       sizeof(struct thread_vr_state));
956 }
957 
958 void tm_reclaim_current(uint8_t cause)
959 {
960 	tm_enable();
961 	tm_reclaim_thread(&current->thread, cause);
962 }
963 
964 static inline void tm_reclaim_task(struct task_struct *tsk)
965 {
966 	/* We have to work out if we're switching from/to a task that's in the
967 	 * middle of a transaction.
968 	 *
969 	 * In switching we need to maintain a 2nd register state as
970 	 * oldtask->thread.ckpt_regs.  We tm_reclaim(oldproc); this saves the
971 	 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
972 	 * ckvr_state
973 	 *
974 	 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
975 	 */
976 	struct thread_struct *thr = &tsk->thread;
977 
978 	if (!thr->regs)
979 		return;
980 
981 	if (!MSR_TM_ACTIVE(thr->regs->msr))
982 		goto out_and_saveregs;
983 
984 	WARN_ON(tm_suspend_disabled);
985 
986 	TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
987 		 "ccr=%lx, msr=%lx, trap=%lx)\n",
988 		 tsk->pid, thr->regs->nip,
989 		 thr->regs->ccr, thr->regs->msr,
990 		 thr->regs->trap);
991 
992 	tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
993 
994 	TM_DEBUG("--- tm_reclaim on pid %d complete\n",
995 		 tsk->pid);
996 
997 out_and_saveregs:
998 	/* Always save the regs here, even if a transaction's not active.
999 	 * This context-switches a thread's TM info SPRs.  We do it here to
1000 	 * be consistent with the restore path (in recheckpoint) which
1001 	 * cannot happen later in _switch().
1002 	 */
1003 	tm_save_sprs(thr);
1004 }
1005 
1006 extern void __tm_recheckpoint(struct thread_struct *thread);
1007 
1008 void tm_recheckpoint(struct thread_struct *thread)
1009 {
1010 	unsigned long flags;
1011 
1012 	if (!(thread->regs->msr & MSR_TM))
1013 		return;
1014 
1015 	/* We really can't be interrupted here as the TEXASR registers can't
1016 	 * change and later in the trecheckpoint code, we have a userspace R1.
1017 	 * So let's hard disable over this region.
1018 	 */
1019 	local_irq_save(flags);
1020 	hard_irq_disable();
1021 
1022 	/* The TM SPRs are restored here, so that TEXASR.FS can be set
1023 	 * before the trecheckpoint and no explosion occurs.
1024 	 */
1025 	tm_restore_sprs(thread);
1026 
1027 	__tm_recheckpoint(thread);
1028 
1029 	local_irq_restore(flags);
1030 }
1031 
1032 static inline void tm_recheckpoint_new_task(struct task_struct *new)
1033 {
1034 	if (!cpu_has_feature(CPU_FTR_TM))
1035 		return;
1036 
1037 	/* Recheckpoint the registers of the thread we're about to switch to.
1038 	 *
1039 	 * If the task was using FP, we non-lazily reload both the original and
1040 	 * the speculative FP register states.  This is because the kernel
1041 	 * doesn't see if/when a TM rollback occurs, so if we take an FP
1042 	 * unavailable later, we are unable to determine which set of FP regs
1043 	 * need to be restored.
1044 	 */
1045 	if (!tm_enabled(new))
1046 		return;
1047 
1048 	if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
1049 		tm_restore_sprs(&new->thread);
1050 		return;
1051 	}
1052 	/* Recheckpoint to restore original checkpointed register state. */
1053 	TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1054 		 new->pid, new->thread.regs->msr);
1055 
1056 	tm_recheckpoint(&new->thread);
1057 
1058 	/*
1059 	 * The checkpointed state has been restored but the live state has
1060 	 * not, ensure all the math functionality is turned off to trigger
1061 	 * restore_math() to reload.
1062 	 */
1063 	new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
1064 
1065 	TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1066 		 "(kernel msr 0x%lx)\n",
1067 		 new->pid, mfmsr());
1068 }
1069 
1070 static inline void __switch_to_tm(struct task_struct *prev,
1071 		struct task_struct *new)
1072 {
1073 	if (cpu_has_feature(CPU_FTR_TM)) {
1074 		if (tm_enabled(prev) || tm_enabled(new))
1075 			tm_enable();
1076 
1077 		if (tm_enabled(prev)) {
1078 			prev->thread.load_tm++;
1079 			tm_reclaim_task(prev);
1080 			if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1081 				prev->thread.regs->msr &= ~MSR_TM;
1082 		}
1083 
1084 		tm_recheckpoint_new_task(new);
1085 	}
1086 }
1087 
1088 /*
1089  * This is called if we are on the way out to userspace and the
1090  * TIF_RESTORE_TM flag is set.  It checks if we need to reload
1091  * FP and/or vector state and does so if necessary.
1092  * If userspace is inside a transaction (whether active or
1093  * suspended) and FP/VMX/VSX instructions have ever been enabled
1094  * inside that transaction, then we have to keep them enabled
1095  * and keep the FP/VMX/VSX state loaded while ever the transaction
1096  * continues.  The reason is that if we didn't, and subsequently
1097  * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1098  * we don't know whether it's the same transaction, and thus we
1099  * don't know which of the checkpointed state and the transactional
1100  * state to use.
1101  */
1102 void restore_tm_state(struct pt_regs *regs)
1103 {
1104 	unsigned long msr_diff;
1105 
1106 	/*
1107 	 * This is the only moment we should clear TIF_RESTORE_TM as
1108 	 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1109 	 * again, anything else could lead to an incorrect ckpt_msr being
1110 	 * saved and therefore incorrect signal contexts.
1111 	 */
1112 	clear_thread_flag(TIF_RESTORE_TM);
1113 	if (!MSR_TM_ACTIVE(regs->msr))
1114 		return;
1115 
1116 	msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1117 	msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1118 
1119 	/* Ensure that restore_math() will restore */
1120 	if (msr_diff & MSR_FP)
1121 		current->thread.load_fp = 1;
1122 #ifdef CONFIG_ALTIVEC
1123 	if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1124 		current->thread.load_vec = 1;
1125 #endif
1126 	restore_math(regs);
1127 
1128 	regs->msr |= msr_diff;
1129 }
1130 
1131 #else
1132 #define tm_recheckpoint_new_task(new)
1133 #define __switch_to_tm(prev, new)
1134 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1135 
1136 static inline void save_sprs(struct thread_struct *t)
1137 {
1138 #ifdef CONFIG_ALTIVEC
1139 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
1140 		t->vrsave = mfspr(SPRN_VRSAVE);
1141 #endif
1142 #ifdef CONFIG_PPC_BOOK3S_64
1143 	if (cpu_has_feature(CPU_FTR_DSCR))
1144 		t->dscr = mfspr(SPRN_DSCR);
1145 
1146 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1147 		t->bescr = mfspr(SPRN_BESCR);
1148 		t->ebbhr = mfspr(SPRN_EBBHR);
1149 		t->ebbrr = mfspr(SPRN_EBBRR);
1150 
1151 		t->fscr = mfspr(SPRN_FSCR);
1152 
1153 		/*
1154 		 * Note that the TAR is not available for use in the kernel.
1155 		 * (To provide this, the TAR should be backed up/restored on
1156 		 * exception entry/exit instead, and be in pt_regs.  FIXME,
1157 		 * this should be in pt_regs anyway (for debug).)
1158 		 */
1159 		t->tar = mfspr(SPRN_TAR);
1160 	}
1161 #endif
1162 }
1163 
1164 static inline void restore_sprs(struct thread_struct *old_thread,
1165 				struct thread_struct *new_thread)
1166 {
1167 #ifdef CONFIG_ALTIVEC
1168 	if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1169 	    old_thread->vrsave != new_thread->vrsave)
1170 		mtspr(SPRN_VRSAVE, new_thread->vrsave);
1171 #endif
1172 #ifdef CONFIG_PPC_BOOK3S_64
1173 	if (cpu_has_feature(CPU_FTR_DSCR)) {
1174 		u64 dscr = get_paca()->dscr_default;
1175 		if (new_thread->dscr_inherit)
1176 			dscr = new_thread->dscr;
1177 
1178 		if (old_thread->dscr != dscr)
1179 			mtspr(SPRN_DSCR, dscr);
1180 	}
1181 
1182 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1183 		if (old_thread->bescr != new_thread->bescr)
1184 			mtspr(SPRN_BESCR, new_thread->bescr);
1185 		if (old_thread->ebbhr != new_thread->ebbhr)
1186 			mtspr(SPRN_EBBHR, new_thread->ebbhr);
1187 		if (old_thread->ebbrr != new_thread->ebbrr)
1188 			mtspr(SPRN_EBBRR, new_thread->ebbrr);
1189 
1190 		if (old_thread->fscr != new_thread->fscr)
1191 			mtspr(SPRN_FSCR, new_thread->fscr);
1192 
1193 		if (old_thread->tar != new_thread->tar)
1194 			mtspr(SPRN_TAR, new_thread->tar);
1195 	}
1196 
1197 	if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1198 	    old_thread->tidr != new_thread->tidr)
1199 		mtspr(SPRN_TIDR, new_thread->tidr);
1200 #endif
1201 
1202 }
1203 
1204 struct task_struct *__switch_to(struct task_struct *prev,
1205 	struct task_struct *new)
1206 {
1207 	struct thread_struct *new_thread, *old_thread;
1208 	struct task_struct *last;
1209 #ifdef CONFIG_PPC_BOOK3S_64
1210 	struct ppc64_tlb_batch *batch;
1211 #endif
1212 
1213 	new_thread = &new->thread;
1214 	old_thread = &current->thread;
1215 
1216 	WARN_ON(!irqs_disabled());
1217 
1218 #ifdef CONFIG_PPC_BOOK3S_64
1219 	batch = this_cpu_ptr(&ppc64_tlb_batch);
1220 	if (batch->active) {
1221 		current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1222 		if (batch->index)
1223 			__flush_tlb_pending(batch);
1224 		batch->active = 0;
1225 	}
1226 #endif /* CONFIG_PPC_BOOK3S_64 */
1227 
1228 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1229 	switch_booke_debug_regs(&new->thread.debug);
1230 #else
1231 /*
1232  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1233  * schedule DABR
1234  */
1235 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1236 	switch_hw_breakpoint(new);
1237 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1238 #endif
1239 
1240 	/*
1241 	 * We need to save SPRs before treclaim/trecheckpoint as these will
1242 	 * change a number of them.
1243 	 */
1244 	save_sprs(&prev->thread);
1245 
1246 	/* Save FPU, Altivec, VSX and SPE state */
1247 	giveup_all(prev);
1248 
1249 	__switch_to_tm(prev, new);
1250 
1251 	if (!radix_enabled()) {
1252 		/*
1253 		 * We can't take a PMU exception inside _switch() since there
1254 		 * is a window where the kernel stack SLB and the kernel stack
1255 		 * are out of sync. Hard disable here.
1256 		 */
1257 		hard_irq_disable();
1258 	}
1259 
1260 	/*
1261 	 * Call restore_sprs() before calling _switch(). If we move it after
1262 	 * _switch() then we miss out on calling it for new tasks. The reason
1263 	 * for this is we manually create a stack frame for new tasks that
1264 	 * directly returns through ret_from_fork() or
1265 	 * ret_from_kernel_thread(). See copy_thread() for details.
1266 	 */
1267 	restore_sprs(old_thread, new_thread);
1268 
1269 	last = _switch(old_thread, new_thread);
1270 
1271 #ifdef CONFIG_PPC_BOOK3S_64
1272 	if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1273 		current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1274 		batch = this_cpu_ptr(&ppc64_tlb_batch);
1275 		batch->active = 1;
1276 	}
1277 
1278 	if (current->thread.regs) {
1279 		restore_math(current->thread.regs);
1280 
1281 		/*
1282 		 * On POWER9 the copy-paste buffer can only paste into
1283 		 * foreign real addresses, so unprivileged processes can not
1284 		 * see the data or use it in any way unless they have
1285 		 * foreign real mappings. If the new process has the foreign
1286 		 * real address mappings, we must issue a cp_abort to clear
1287 		 * any state and prevent snooping, corruption or a covert
1288 		 * channel. ISA v3.1 supports paste into local memory.
1289 		 */
1290 		if (current->mm &&
1291 			(cpu_has_feature(CPU_FTR_ARCH_31) ||
1292 			atomic_read(&current->mm->context.vas_windows)))
1293 			asm volatile(PPC_CP_ABORT);
1294 	}
1295 #endif /* CONFIG_PPC_BOOK3S_64 */
1296 
1297 	return last;
1298 }
1299 
1300 #define NR_INSN_TO_PRINT	16
1301 
1302 static void show_instructions(struct pt_regs *regs)
1303 {
1304 	int i;
1305 	unsigned long nip = regs->nip;
1306 	unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1307 
1308 	printk("Instruction dump:");
1309 
1310 	/*
1311 	 * If we were executing with the MMU off for instructions, adjust pc
1312 	 * rather than printing XXXXXXXX.
1313 	 */
1314 	if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
1315 		pc = (unsigned long)phys_to_virt(pc);
1316 		nip = (unsigned long)phys_to_virt(regs->nip);
1317 	}
1318 
1319 	for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1320 		int instr;
1321 
1322 		if (!(i % 8))
1323 			pr_cont("\n");
1324 
1325 		if (!__kernel_text_address(pc) ||
1326 		    get_kernel_nofault(instr, (const void *)pc)) {
1327 			pr_cont("XXXXXXXX ");
1328 		} else {
1329 			if (nip == pc)
1330 				pr_cont("<%08x> ", instr);
1331 			else
1332 				pr_cont("%08x ", instr);
1333 		}
1334 
1335 		pc += sizeof(int);
1336 	}
1337 
1338 	pr_cont("\n");
1339 }
1340 
1341 void show_user_instructions(struct pt_regs *regs)
1342 {
1343 	unsigned long pc;
1344 	int n = NR_INSN_TO_PRINT;
1345 	struct seq_buf s;
1346 	char buf[96]; /* enough for 8 times 9 + 2 chars */
1347 
1348 	pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1349 
1350 	seq_buf_init(&s, buf, sizeof(buf));
1351 
1352 	while (n) {
1353 		int i;
1354 
1355 		seq_buf_clear(&s);
1356 
1357 		for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1358 			int instr;
1359 
1360 			if (copy_from_user_nofault(&instr, (void __user *)pc,
1361 					sizeof(instr))) {
1362 				seq_buf_printf(&s, "XXXXXXXX ");
1363 				continue;
1364 			}
1365 			seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1366 		}
1367 
1368 		if (!seq_buf_has_overflowed(&s))
1369 			pr_info("%s[%d]: code: %s\n", current->comm,
1370 				current->pid, s.buffer);
1371 	}
1372 }
1373 
1374 struct regbit {
1375 	unsigned long bit;
1376 	const char *name;
1377 };
1378 
1379 static struct regbit msr_bits[] = {
1380 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1381 	{MSR_SF,	"SF"},
1382 	{MSR_HV,	"HV"},
1383 #endif
1384 	{MSR_VEC,	"VEC"},
1385 	{MSR_VSX,	"VSX"},
1386 #ifdef CONFIG_BOOKE
1387 	{MSR_CE,	"CE"},
1388 #endif
1389 	{MSR_EE,	"EE"},
1390 	{MSR_PR,	"PR"},
1391 	{MSR_FP,	"FP"},
1392 	{MSR_ME,	"ME"},
1393 #ifdef CONFIG_BOOKE
1394 	{MSR_DE,	"DE"},
1395 #else
1396 	{MSR_SE,	"SE"},
1397 	{MSR_BE,	"BE"},
1398 #endif
1399 	{MSR_IR,	"IR"},
1400 	{MSR_DR,	"DR"},
1401 	{MSR_PMM,	"PMM"},
1402 #ifndef CONFIG_BOOKE
1403 	{MSR_RI,	"RI"},
1404 	{MSR_LE,	"LE"},
1405 #endif
1406 	{0,		NULL}
1407 };
1408 
1409 static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1410 {
1411 	const char *s = "";
1412 
1413 	for (; bits->bit; ++bits)
1414 		if (val & bits->bit) {
1415 			pr_cont("%s%s", s, bits->name);
1416 			s = sep;
1417 		}
1418 }
1419 
1420 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1421 static struct regbit msr_tm_bits[] = {
1422 	{MSR_TS_T,	"T"},
1423 	{MSR_TS_S,	"S"},
1424 	{MSR_TM,	"E"},
1425 	{0,		NULL}
1426 };
1427 
1428 static void print_tm_bits(unsigned long val)
1429 {
1430 /*
1431  * This only prints something if at least one of the TM bit is set.
1432  * Inside the TM[], the output means:
1433  *   E: Enabled		(bit 32)
1434  *   S: Suspended	(bit 33)
1435  *   T: Transactional	(bit 34)
1436  */
1437 	if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1438 		pr_cont(",TM[");
1439 		print_bits(val, msr_tm_bits, "");
1440 		pr_cont("]");
1441 	}
1442 }
1443 #else
1444 static void print_tm_bits(unsigned long val) {}
1445 #endif
1446 
1447 static void print_msr_bits(unsigned long val)
1448 {
1449 	pr_cont("<");
1450 	print_bits(val, msr_bits, ",");
1451 	print_tm_bits(val);
1452 	pr_cont(">");
1453 }
1454 
1455 #ifdef CONFIG_PPC64
1456 #define REG		"%016lx"
1457 #define REGS_PER_LINE	4
1458 #define LAST_VOLATILE	13
1459 #else
1460 #define REG		"%08lx"
1461 #define REGS_PER_LINE	8
1462 #define LAST_VOLATILE	12
1463 #endif
1464 
1465 static void __show_regs(struct pt_regs *regs)
1466 {
1467 	int i, trap;
1468 
1469 	printk("NIP:  "REG" LR: "REG" CTR: "REG"\n",
1470 	       regs->nip, regs->link, regs->ctr);
1471 	printk("REGS: %px TRAP: %04lx   %s  (%s)\n",
1472 	       regs, regs->trap, print_tainted(), init_utsname()->release);
1473 	printk("MSR:  "REG" ", regs->msr);
1474 	print_msr_bits(regs->msr);
1475 	pr_cont("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
1476 	trap = TRAP(regs);
1477 	if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
1478 		pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1479 	if (trap == 0x200 || trap == 0x300 || trap == 0x600) {
1480 		if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE))
1481 			pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1482 		else
1483 			pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1484 	}
1485 
1486 #ifdef CONFIG_PPC64
1487 	pr_cont("IRQMASK: %lx ", regs->softe);
1488 #endif
1489 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1490 	if (MSR_TM_ACTIVE(regs->msr))
1491 		pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1492 #endif
1493 
1494 	for (i = 0;  i < 32;  i++) {
1495 		if ((i % REGS_PER_LINE) == 0)
1496 			pr_cont("\nGPR%02d: ", i);
1497 		pr_cont(REG " ", regs->gpr[i]);
1498 		if (i == LAST_VOLATILE && !FULL_REGS(regs))
1499 			break;
1500 	}
1501 	pr_cont("\n");
1502 	/*
1503 	 * Lookup NIP late so we have the best change of getting the
1504 	 * above info out without failing
1505 	 */
1506 	if (IS_ENABLED(CONFIG_KALLSYMS)) {
1507 		printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1508 		printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1509 	}
1510 }
1511 
1512 void show_regs(struct pt_regs *regs)
1513 {
1514 	show_regs_print_info(KERN_DEFAULT);
1515 	__show_regs(regs);
1516 	show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
1517 	if (!user_mode(regs))
1518 		show_instructions(regs);
1519 }
1520 
1521 void flush_thread(void)
1522 {
1523 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1524 	flush_ptrace_hw_breakpoint(current);
1525 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1526 	set_debug_reg_defaults(&current->thread);
1527 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1528 }
1529 
1530 void arch_setup_new_exec(void)
1531 {
1532 
1533 #ifdef CONFIG_PPC_BOOK3S_64
1534 	if (!radix_enabled())
1535 		hash__setup_new_exec();
1536 #endif
1537 	/*
1538 	 * If we exec out of a kernel thread then thread.regs will not be
1539 	 * set.  Do it now.
1540 	 */
1541 	if (!current->thread.regs) {
1542 		struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1543 		current->thread.regs = regs - 1;
1544 	}
1545 
1546 #ifdef CONFIG_PPC_MEM_KEYS
1547 	current->thread.regs->amr  = default_amr;
1548 	current->thread.regs->iamr  = default_iamr;
1549 #endif
1550 }
1551 
1552 #ifdef CONFIG_PPC64
1553 /**
1554  * Assign a TIDR (thread ID) for task @t and set it in the thread
1555  * structure. For now, we only support setting TIDR for 'current' task.
1556  *
1557  * Since the TID value is a truncated form of it PID, it is possible
1558  * (but unlikely) for 2 threads to have the same TID. In the unlikely event
1559  * that 2 threads share the same TID and are waiting, one of the following
1560  * cases will happen:
1561  *
1562  * 1. The correct thread is running, the wrong thread is not
1563  * In this situation, the correct thread is woken and proceeds to pass it's
1564  * condition check.
1565  *
1566  * 2. Neither threads are running
1567  * In this situation, neither thread will be woken. When scheduled, the waiting
1568  * threads will execute either a wait, which will return immediately, followed
1569  * by a condition check, which will pass for the correct thread and fail
1570  * for the wrong thread, or they will execute the condition check immediately.
1571  *
1572  * 3. The wrong thread is running, the correct thread is not
1573  * The wrong thread will be woken, but will fail it's condition check and
1574  * re-execute wait. The correct thread, when scheduled, will execute either
1575  * it's condition check (which will pass), or wait, which returns immediately
1576  * when called the first time after the thread is scheduled, followed by it's
1577  * condition check (which will pass).
1578  *
1579  * 4. Both threads are running
1580  * Both threads will be woken. The wrong thread will fail it's condition check
1581  * and execute another wait, while the correct thread will pass it's condition
1582  * check.
1583  *
1584  * @t: the task to set the thread ID for
1585  */
1586 int set_thread_tidr(struct task_struct *t)
1587 {
1588 	if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1589 		return -EINVAL;
1590 
1591 	if (t != current)
1592 		return -EINVAL;
1593 
1594 	if (t->thread.tidr)
1595 		return 0;
1596 
1597 	t->thread.tidr = (u16)task_pid_nr(t);
1598 	mtspr(SPRN_TIDR, t->thread.tidr);
1599 
1600 	return 0;
1601 }
1602 EXPORT_SYMBOL_GPL(set_thread_tidr);
1603 
1604 #endif /* CONFIG_PPC64 */
1605 
1606 void
1607 release_thread(struct task_struct *t)
1608 {
1609 }
1610 
1611 /*
1612  * this gets called so that we can store coprocessor state into memory and
1613  * copy the current task into the new thread.
1614  */
1615 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1616 {
1617 	flush_all_to_thread(src);
1618 	/*
1619 	 * Flush TM state out so we can copy it.  __switch_to_tm() does this
1620 	 * flush but it removes the checkpointed state from the current CPU and
1621 	 * transitions the CPU out of TM mode.  Hence we need to call
1622 	 * tm_recheckpoint_new_task() (on the same task) to restore the
1623 	 * checkpointed state back and the TM mode.
1624 	 *
1625 	 * Can't pass dst because it isn't ready. Doesn't matter, passing
1626 	 * dst is only important for __switch_to()
1627 	 */
1628 	__switch_to_tm(src, src);
1629 
1630 	*dst = *src;
1631 
1632 	clear_task_ebb(dst);
1633 
1634 	return 0;
1635 }
1636 
1637 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1638 {
1639 #ifdef CONFIG_PPC_BOOK3S_64
1640 	unsigned long sp_vsid;
1641 	unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1642 
1643 	if (radix_enabled())
1644 		return;
1645 
1646 	if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1647 		sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1648 			<< SLB_VSID_SHIFT_1T;
1649 	else
1650 		sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1651 			<< SLB_VSID_SHIFT;
1652 	sp_vsid |= SLB_VSID_KERNEL | llp;
1653 	p->thread.ksp_vsid = sp_vsid;
1654 #endif
1655 }
1656 
1657 /*
1658  * Copy a thread..
1659  */
1660 
1661 /*
1662  * Copy architecture-specific thread state
1663  */
1664 int copy_thread(unsigned long clone_flags, unsigned long usp,
1665 		unsigned long kthread_arg, struct task_struct *p,
1666 		unsigned long tls)
1667 {
1668 	struct pt_regs *childregs, *kregs;
1669 	extern void ret_from_fork(void);
1670 	extern void ret_from_fork_scv(void);
1671 	extern void ret_from_kernel_thread(void);
1672 	void (*f)(void);
1673 	unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1674 	struct thread_info *ti = task_thread_info(p);
1675 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1676 	int i;
1677 #endif
1678 
1679 	klp_init_thread_info(p);
1680 
1681 	/* Copy registers */
1682 	sp -= sizeof(struct pt_regs);
1683 	childregs = (struct pt_regs *) sp;
1684 	if (unlikely(p->flags & PF_KTHREAD)) {
1685 		/* kernel thread */
1686 		memset(childregs, 0, sizeof(struct pt_regs));
1687 		childregs->gpr[1] = sp + sizeof(struct pt_regs);
1688 		/* function */
1689 		if (usp)
1690 			childregs->gpr[14] = ppc_function_entry((void *)usp);
1691 #ifdef CONFIG_PPC64
1692 		clear_tsk_thread_flag(p, TIF_32BIT);
1693 		childregs->softe = IRQS_ENABLED;
1694 #endif
1695 		childregs->gpr[15] = kthread_arg;
1696 		p->thread.regs = NULL;	/* no user register state */
1697 		ti->flags |= _TIF_RESTOREALL;
1698 		f = ret_from_kernel_thread;
1699 	} else {
1700 		/* user thread */
1701 		struct pt_regs *regs = current_pt_regs();
1702 		CHECK_FULL_REGS(regs);
1703 		*childregs = *regs;
1704 		if (usp)
1705 			childregs->gpr[1] = usp;
1706 		p->thread.regs = childregs;
1707 		/* 64s sets this in ret_from_fork */
1708 		if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1709 			childregs->gpr[3] = 0;  /* Result from fork() */
1710 		if (clone_flags & CLONE_SETTLS) {
1711 			if (!is_32bit_task())
1712 				childregs->gpr[13] = tls;
1713 			else
1714 				childregs->gpr[2] = tls;
1715 		}
1716 
1717 		if (trap_is_scv(regs))
1718 			f = ret_from_fork_scv;
1719 		else
1720 			f = ret_from_fork;
1721 	}
1722 	childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1723 	sp -= STACK_FRAME_OVERHEAD;
1724 
1725 	/*
1726 	 * The way this works is that at some point in the future
1727 	 * some task will call _switch to switch to the new task.
1728 	 * That will pop off the stack frame created below and start
1729 	 * the new task running at ret_from_fork.  The new task will
1730 	 * do some house keeping and then return from the fork or clone
1731 	 * system call, using the stack frame created above.
1732 	 */
1733 	((unsigned long *)sp)[0] = 0;
1734 	sp -= sizeof(struct pt_regs);
1735 	kregs = (struct pt_regs *) sp;
1736 	sp -= STACK_FRAME_OVERHEAD;
1737 	p->thread.ksp = sp;
1738 #ifdef CONFIG_PPC32
1739 	p->thread.ksp_limit = (unsigned long)end_of_stack(p);
1740 #endif
1741 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1742 	for (i = 0; i < nr_wp_slots(); i++)
1743 		p->thread.ptrace_bps[i] = NULL;
1744 #endif
1745 
1746 #ifdef CONFIG_PPC_FPU_REGS
1747 	p->thread.fp_save_area = NULL;
1748 #endif
1749 #ifdef CONFIG_ALTIVEC
1750 	p->thread.vr_save_area = NULL;
1751 #endif
1752 
1753 	setup_ksp_vsid(p, sp);
1754 
1755 #ifdef CONFIG_PPC64
1756 	if (cpu_has_feature(CPU_FTR_DSCR)) {
1757 		p->thread.dscr_inherit = current->thread.dscr_inherit;
1758 		p->thread.dscr = mfspr(SPRN_DSCR);
1759 	}
1760 	if (cpu_has_feature(CPU_FTR_HAS_PPR))
1761 		childregs->ppr = DEFAULT_PPR;
1762 
1763 	p->thread.tidr = 0;
1764 #endif
1765 	/*
1766 	 * Run with the current AMR value of the kernel
1767 	 */
1768 #ifdef CONFIG_PPC_PKEY
1769 	if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
1770 		kregs->amr = AMR_KUAP_BLOCKED;
1771 
1772 	if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP))
1773 		kregs->iamr = AMR_KUEP_BLOCKED;
1774 #endif
1775 	kregs->nip = ppc_function_entry(f);
1776 	return 0;
1777 }
1778 
1779 void preload_new_slb_context(unsigned long start, unsigned long sp);
1780 
1781 /*
1782  * Set up a thread for executing a new program
1783  */
1784 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1785 {
1786 #ifdef CONFIG_PPC64
1787 	unsigned long load_addr = regs->gpr[2];	/* saved by ELF_PLAT_INIT */
1788 
1789 	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
1790 		preload_new_slb_context(start, sp);
1791 #endif
1792 
1793 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1794 	/*
1795 	 * Clear any transactional state, we're exec()ing. The cause is
1796 	 * not important as there will never be a recheckpoint so it's not
1797 	 * user visible.
1798 	 */
1799 	if (MSR_TM_SUSPENDED(mfmsr()))
1800 		tm_reclaim_current(0);
1801 #endif
1802 
1803 	memset(regs->gpr, 0, sizeof(regs->gpr));
1804 	regs->ctr = 0;
1805 	regs->link = 0;
1806 	regs->xer = 0;
1807 	regs->ccr = 0;
1808 	regs->gpr[1] = sp;
1809 
1810 	/*
1811 	 * We have just cleared all the nonvolatile GPRs, so make
1812 	 * FULL_REGS(regs) return true.  This is necessary to allow
1813 	 * ptrace to examine the thread immediately after exec.
1814 	 */
1815 	SET_FULL_REGS(regs);
1816 
1817 #ifdef CONFIG_PPC32
1818 	regs->mq = 0;
1819 	regs->nip = start;
1820 	regs->msr = MSR_USER;
1821 #else
1822 	if (!is_32bit_task()) {
1823 		unsigned long entry;
1824 
1825 		if (is_elf2_task()) {
1826 			/* Look ma, no function descriptors! */
1827 			entry = start;
1828 
1829 			/*
1830 			 * Ulrich says:
1831 			 *   The latest iteration of the ABI requires that when
1832 			 *   calling a function (at its global entry point),
1833 			 *   the caller must ensure r12 holds the entry point
1834 			 *   address (so that the function can quickly
1835 			 *   establish addressability).
1836 			 */
1837 			regs->gpr[12] = start;
1838 			/* Make sure that's restored on entry to userspace. */
1839 			set_thread_flag(TIF_RESTOREALL);
1840 		} else {
1841 			unsigned long toc;
1842 
1843 			/* start is a relocated pointer to the function
1844 			 * descriptor for the elf _start routine.  The first
1845 			 * entry in the function descriptor is the entry
1846 			 * address of _start and the second entry is the TOC
1847 			 * value we need to use.
1848 			 */
1849 			__get_user(entry, (unsigned long __user *)start);
1850 			__get_user(toc, (unsigned long __user *)start+1);
1851 
1852 			/* Check whether the e_entry function descriptor entries
1853 			 * need to be relocated before we can use them.
1854 			 */
1855 			if (load_addr != 0) {
1856 				entry += load_addr;
1857 				toc   += load_addr;
1858 			}
1859 			regs->gpr[2] = toc;
1860 		}
1861 		regs->nip = entry;
1862 		regs->msr = MSR_USER64;
1863 	} else {
1864 		regs->nip = start;
1865 		regs->gpr[2] = 0;
1866 		regs->msr = MSR_USER32;
1867 	}
1868 #endif
1869 #ifdef CONFIG_VSX
1870 	current->thread.used_vsr = 0;
1871 #endif
1872 	current->thread.load_slb = 0;
1873 	current->thread.load_fp = 0;
1874 #ifdef CONFIG_PPC_FPU_REGS
1875 	memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
1876 	current->thread.fp_save_area = NULL;
1877 #endif
1878 #ifdef CONFIG_ALTIVEC
1879 	memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
1880 	current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1881 	current->thread.vr_save_area = NULL;
1882 	current->thread.vrsave = 0;
1883 	current->thread.used_vr = 0;
1884 	current->thread.load_vec = 0;
1885 #endif /* CONFIG_ALTIVEC */
1886 #ifdef CONFIG_SPE
1887 	memset(current->thread.evr, 0, sizeof(current->thread.evr));
1888 	current->thread.acc = 0;
1889 	current->thread.spefscr = 0;
1890 	current->thread.used_spe = 0;
1891 #endif /* CONFIG_SPE */
1892 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1893 	current->thread.tm_tfhar = 0;
1894 	current->thread.tm_texasr = 0;
1895 	current->thread.tm_tfiar = 0;
1896 	current->thread.load_tm = 0;
1897 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1898 
1899 }
1900 EXPORT_SYMBOL(start_thread);
1901 
1902 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1903 		| PR_FP_EXC_RES | PR_FP_EXC_INV)
1904 
1905 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1906 {
1907 	struct pt_regs *regs = tsk->thread.regs;
1908 
1909 	/* This is a bit hairy.  If we are an SPE enabled  processor
1910 	 * (have embedded fp) we store the IEEE exception enable flags in
1911 	 * fpexc_mode.  fpexc_mode is also used for setting FP exception
1912 	 * mode (asyn, precise, disabled) for 'Classic' FP. */
1913 	if (val & PR_FP_EXC_SW_ENABLE) {
1914 		if (cpu_has_feature(CPU_FTR_SPE)) {
1915 			/*
1916 			 * When the sticky exception bits are set
1917 			 * directly by userspace, it must call prctl
1918 			 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1919 			 * in the existing prctl settings) or
1920 			 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1921 			 * the bits being set).  <fenv.h> functions
1922 			 * saving and restoring the whole
1923 			 * floating-point environment need to do so
1924 			 * anyway to restore the prctl settings from
1925 			 * the saved environment.
1926 			 */
1927 #ifdef CONFIG_SPE
1928 			tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1929 			tsk->thread.fpexc_mode = val &
1930 				(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1931 #endif
1932 			return 0;
1933 		} else {
1934 			return -EINVAL;
1935 		}
1936 	}
1937 
1938 	/* on a CONFIG_SPE this does not hurt us.  The bits that
1939 	 * __pack_fe01 use do not overlap with bits used for
1940 	 * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
1941 	 * on CONFIG_SPE implementations are reserved so writing to
1942 	 * them does not change anything */
1943 	if (val > PR_FP_EXC_PRECISE)
1944 		return -EINVAL;
1945 	tsk->thread.fpexc_mode = __pack_fe01(val);
1946 	if (regs != NULL && (regs->msr & MSR_FP) != 0)
1947 		regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1948 			| tsk->thread.fpexc_mode;
1949 	return 0;
1950 }
1951 
1952 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1953 {
1954 	unsigned int val = 0;
1955 
1956 	if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
1957 		if (cpu_has_feature(CPU_FTR_SPE)) {
1958 			/*
1959 			 * When the sticky exception bits are set
1960 			 * directly by userspace, it must call prctl
1961 			 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1962 			 * in the existing prctl settings) or
1963 			 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1964 			 * the bits being set).  <fenv.h> functions
1965 			 * saving and restoring the whole
1966 			 * floating-point environment need to do so
1967 			 * anyway to restore the prctl settings from
1968 			 * the saved environment.
1969 			 */
1970 #ifdef CONFIG_SPE
1971 			tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1972 			val = tsk->thread.fpexc_mode;
1973 #endif
1974 		} else
1975 			return -EINVAL;
1976 	} else {
1977 		val = __unpack_fe01(tsk->thread.fpexc_mode);
1978 	}
1979 	return put_user(val, (unsigned int __user *) adr);
1980 }
1981 
1982 int set_endian(struct task_struct *tsk, unsigned int val)
1983 {
1984 	struct pt_regs *regs = tsk->thread.regs;
1985 
1986 	if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1987 	    (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1988 		return -EINVAL;
1989 
1990 	if (regs == NULL)
1991 		return -EINVAL;
1992 
1993 	if (val == PR_ENDIAN_BIG)
1994 		regs->msr &= ~MSR_LE;
1995 	else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1996 		regs->msr |= MSR_LE;
1997 	else
1998 		return -EINVAL;
1999 
2000 	return 0;
2001 }
2002 
2003 int get_endian(struct task_struct *tsk, unsigned long adr)
2004 {
2005 	struct pt_regs *regs = tsk->thread.regs;
2006 	unsigned int val;
2007 
2008 	if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
2009 	    !cpu_has_feature(CPU_FTR_REAL_LE))
2010 		return -EINVAL;
2011 
2012 	if (regs == NULL)
2013 		return -EINVAL;
2014 
2015 	if (regs->msr & MSR_LE) {
2016 		if (cpu_has_feature(CPU_FTR_REAL_LE))
2017 			val = PR_ENDIAN_LITTLE;
2018 		else
2019 			val = PR_ENDIAN_PPC_LITTLE;
2020 	} else
2021 		val = PR_ENDIAN_BIG;
2022 
2023 	return put_user(val, (unsigned int __user *)adr);
2024 }
2025 
2026 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
2027 {
2028 	tsk->thread.align_ctl = val;
2029 	return 0;
2030 }
2031 
2032 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
2033 {
2034 	return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
2035 }
2036 
2037 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
2038 				  unsigned long nbytes)
2039 {
2040 	unsigned long stack_page;
2041 	unsigned long cpu = task_cpu(p);
2042 
2043 	stack_page = (unsigned long)hardirq_ctx[cpu];
2044 	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2045 		return 1;
2046 
2047 	stack_page = (unsigned long)softirq_ctx[cpu];
2048 	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2049 		return 1;
2050 
2051 	return 0;
2052 }
2053 
2054 static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
2055 					unsigned long nbytes)
2056 {
2057 #ifdef CONFIG_PPC64
2058 	unsigned long stack_page;
2059 	unsigned long cpu = task_cpu(p);
2060 
2061 	stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
2062 	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2063 		return 1;
2064 
2065 # ifdef CONFIG_PPC_BOOK3S_64
2066 	stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
2067 	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2068 		return 1;
2069 
2070 	stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
2071 	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2072 		return 1;
2073 # endif
2074 #endif
2075 
2076 	return 0;
2077 }
2078 
2079 
2080 int validate_sp(unsigned long sp, struct task_struct *p,
2081 		       unsigned long nbytes)
2082 {
2083 	unsigned long stack_page = (unsigned long)task_stack_page(p);
2084 
2085 	if (sp < THREAD_SIZE)
2086 		return 0;
2087 
2088 	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2089 		return 1;
2090 
2091 	if (valid_irq_stack(sp, p, nbytes))
2092 		return 1;
2093 
2094 	return valid_emergency_stack(sp, p, nbytes);
2095 }
2096 
2097 EXPORT_SYMBOL(validate_sp);
2098 
2099 static unsigned long __get_wchan(struct task_struct *p)
2100 {
2101 	unsigned long ip, sp;
2102 	int count = 0;
2103 
2104 	if (!p || p == current || p->state == TASK_RUNNING)
2105 		return 0;
2106 
2107 	sp = p->thread.ksp;
2108 	if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
2109 		return 0;
2110 
2111 	do {
2112 		sp = *(unsigned long *)sp;
2113 		if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2114 		    p->state == TASK_RUNNING)
2115 			return 0;
2116 		if (count > 0) {
2117 			ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
2118 			if (!in_sched_functions(ip))
2119 				return ip;
2120 		}
2121 	} while (count++ < 16);
2122 	return 0;
2123 }
2124 
2125 unsigned long get_wchan(struct task_struct *p)
2126 {
2127 	unsigned long ret;
2128 
2129 	if (!try_get_task_stack(p))
2130 		return 0;
2131 
2132 	ret = __get_wchan(p);
2133 
2134 	put_task_stack(p);
2135 
2136 	return ret;
2137 }
2138 
2139 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2140 
2141 void show_stack(struct task_struct *tsk, unsigned long *stack,
2142 		const char *loglvl)
2143 {
2144 	unsigned long sp, ip, lr, newsp;
2145 	int count = 0;
2146 	int firstframe = 1;
2147 	unsigned long ret_addr;
2148 	int ftrace_idx = 0;
2149 
2150 	if (tsk == NULL)
2151 		tsk = current;
2152 
2153 	if (!try_get_task_stack(tsk))
2154 		return;
2155 
2156 	sp = (unsigned long) stack;
2157 	if (sp == 0) {
2158 		if (tsk == current)
2159 			sp = current_stack_frame();
2160 		else
2161 			sp = tsk->thread.ksp;
2162 	}
2163 
2164 	lr = 0;
2165 	printk("%sCall Trace:\n", loglvl);
2166 	do {
2167 		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2168 			break;
2169 
2170 		stack = (unsigned long *) sp;
2171 		newsp = stack[0];
2172 		ip = stack[STACK_FRAME_LR_SAVE];
2173 		if (!firstframe || ip != lr) {
2174 			printk("%s["REG"] ["REG"] %pS",
2175 				loglvl, sp, ip, (void *)ip);
2176 			ret_addr = ftrace_graph_ret_addr(current,
2177 						&ftrace_idx, ip, stack);
2178 			if (ret_addr != ip)
2179 				pr_cont(" (%pS)", (void *)ret_addr);
2180 			if (firstframe)
2181 				pr_cont(" (unreliable)");
2182 			pr_cont("\n");
2183 		}
2184 		firstframe = 0;
2185 
2186 		/*
2187 		 * See if this is an exception frame.
2188 		 * We look for the "regshere" marker in the current frame.
2189 		 */
2190 		if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
2191 		    && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2192 			struct pt_regs *regs = (struct pt_regs *)
2193 				(sp + STACK_FRAME_OVERHEAD);
2194 
2195 			lr = regs->link;
2196 			printk("%s--- interrupt: %lx at %pS\n",
2197 			       loglvl, regs->trap, (void *)regs->nip);
2198 			__show_regs(regs);
2199 			printk("%s--- interrupt: %lx\n",
2200 			       loglvl, regs->trap);
2201 
2202 			firstframe = 1;
2203 		}
2204 
2205 		sp = newsp;
2206 	} while (count++ < kstack_depth_to_print);
2207 
2208 	put_task_stack(tsk);
2209 }
2210 
2211 #ifdef CONFIG_PPC64
2212 /* Called with hard IRQs off */
2213 void notrace __ppc64_runlatch_on(void)
2214 {
2215 	struct thread_info *ti = current_thread_info();
2216 
2217 	if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2218 		/*
2219 		 * Least significant bit (RUN) is the only writable bit of
2220 		 * the CTRL register, so we can avoid mfspr. 2.06 is not the
2221 		 * earliest ISA where this is the case, but it's convenient.
2222 		 */
2223 		mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2224 	} else {
2225 		unsigned long ctrl;
2226 
2227 		/*
2228 		 * Some architectures (e.g., Cell) have writable fields other
2229 		 * than RUN, so do the read-modify-write.
2230 		 */
2231 		ctrl = mfspr(SPRN_CTRLF);
2232 		ctrl |= CTRL_RUNLATCH;
2233 		mtspr(SPRN_CTRLT, ctrl);
2234 	}
2235 
2236 	ti->local_flags |= _TLF_RUNLATCH;
2237 }
2238 
2239 /* Called with hard IRQs off */
2240 void notrace __ppc64_runlatch_off(void)
2241 {
2242 	struct thread_info *ti = current_thread_info();
2243 
2244 	ti->local_flags &= ~_TLF_RUNLATCH;
2245 
2246 	if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2247 		mtspr(SPRN_CTRLT, 0);
2248 	} else {
2249 		unsigned long ctrl;
2250 
2251 		ctrl = mfspr(SPRN_CTRLF);
2252 		ctrl &= ~CTRL_RUNLATCH;
2253 		mtspr(SPRN_CTRLT, ctrl);
2254 	}
2255 }
2256 #endif /* CONFIG_PPC64 */
2257 
2258 unsigned long arch_align_stack(unsigned long sp)
2259 {
2260 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2261 		sp -= get_random_int() & ~PAGE_MASK;
2262 	return sp & ~0xf;
2263 }
2264 
2265 static inline unsigned long brk_rnd(void)
2266 {
2267         unsigned long rnd = 0;
2268 
2269 	/* 8MB for 32bit, 1GB for 64bit */
2270 	if (is_32bit_task())
2271 		rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2272 	else
2273 		rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2274 
2275 	return rnd << PAGE_SHIFT;
2276 }
2277 
2278 unsigned long arch_randomize_brk(struct mm_struct *mm)
2279 {
2280 	unsigned long base = mm->brk;
2281 	unsigned long ret;
2282 
2283 #ifdef CONFIG_PPC_BOOK3S_64
2284 	/*
2285 	 * If we are using 1TB segments and we are allowed to randomise
2286 	 * the heap, we can put it above 1TB so it is backed by a 1TB
2287 	 * segment. Otherwise the heap will be in the bottom 1TB
2288 	 * which always uses 256MB segments and this may result in a
2289 	 * performance penalty. We don't need to worry about radix. For
2290 	 * radix, mmu_highuser_ssize remains unchanged from 256MB.
2291 	 */
2292 	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2293 		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2294 #endif
2295 
2296 	ret = PAGE_ALIGN(base + brk_rnd());
2297 
2298 	if (ret < mm->brk)
2299 		return mm->brk;
2300 
2301 	return ret;
2302 }
2303 
2304