xref: /linux/arch/powerpc/kernel/signal_32.c (revision a1ea0ca8a6f17d7b79bbc4d05dd4e6ca162d8f15)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
4  *
5  *  PowerPC version
6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7  * Copyright (C) 2001 IBM
8  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
10  *
11  *  Derived from "arch/i386/kernel/signal.c"
12  *    Copyright (C) 1991, 1992 Linus Torvalds
13  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
14  */
15 
16 #include <linux/sched.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/kernel.h>
20 #include <linux/signal.h>
21 #include <linux/errno.h>
22 #include <linux/elf.h>
23 #include <linux/ptrace.h>
24 #include <linux/pagemap.h>
25 #include <linux/ratelimit.h>
26 #include <linux/syscalls.h>
27 #ifdef CONFIG_PPC64
28 #include <linux/compat.h>
29 #else
30 #include <linux/wait.h>
31 #include <linux/unistd.h>
32 #include <linux/stddef.h>
33 #include <linux/tty.h>
34 #include <linux/binfmts.h>
35 #endif
36 
37 #include <linux/uaccess.h>
38 #include <asm/cacheflush.h>
39 #include <asm/syscalls.h>
40 #include <asm/sigcontext.h>
41 #include <asm/vdso.h>
42 #include <asm/switch_to.h>
43 #include <asm/tm.h>
44 #include <asm/asm-prototypes.h>
45 #ifdef CONFIG_PPC64
46 #include "ppc32.h"
47 #include <asm/unistd.h>
48 #else
49 #include <asm/ucontext.h>
50 #endif
51 
52 #include "signal.h"
53 
54 
55 #ifdef CONFIG_PPC64
56 #define old_sigaction	old_sigaction32
57 #define sigcontext	sigcontext32
58 #define mcontext	mcontext32
59 #define ucontext	ucontext32
60 
61 /*
62  * Userspace code may pass a ucontext which doesn't include VSX added
63  * at the end.  We need to check for this case.
64  */
65 #define UCONTEXTSIZEWITHOUTVSX \
66 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
67 
68 /*
69  * Returning 0 means we return to userspace via
70  * ret_from_except and thus restore all user
71  * registers from *regs.  This is what we need
72  * to do when a signal has been delivered.
73  */
74 
75 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
76 #undef __SIGNAL_FRAMESIZE
77 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
78 #undef ELF_NVRREG
79 #define ELF_NVRREG	ELF_NVRREG32
80 
81 /*
82  * Functions for flipping sigsets (thanks to brain dead generic
83  * implementation that makes things simple for little endian only)
84  */
85 #define unsafe_put_sigset_t	unsafe_put_compat_sigset
86 #define unsafe_get_sigset_t	unsafe_get_compat_sigset
87 
88 #define to_user_ptr(p)		ptr_to_compat(p)
89 #define from_user_ptr(p)	compat_ptr(p)
90 
91 static __always_inline int
92 __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
93 {
94 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
95 	int val, i;
96 
97 	for (i = 0; i <= PT_RESULT; i ++) {
98 		/* Force usr to alway see softe as 1 (interrupts enabled) */
99 		if (i == PT_SOFTE)
100 			val = 1;
101 		else
102 			val = gregs[i];
103 
104 		unsafe_put_user(val, &frame->mc_gregs[i], failed);
105 	}
106 	return 0;
107 
108 failed:
109 	return 1;
110 }
111 
112 static __always_inline int
113 __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
114 {
115 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
116 	int i;
117 
118 	for (i = 0; i <= PT_RESULT; i++) {
119 		if ((i == PT_MSR) || (i == PT_SOFTE))
120 			continue;
121 		unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed);
122 	}
123 	return 0;
124 
125 failed:
126 	return 1;
127 }
128 
129 #else /* CONFIG_PPC64 */
130 
131 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
132 
133 #define unsafe_put_sigset_t(uset, set, label) do {			\
134 	sigset_t __user *__us = uset	;				\
135 	const sigset_t *__s = set;					\
136 									\
137 	unsafe_copy_to_user(__us, __s, sizeof(*__us), label);		\
138 } while (0)
139 
140 #define unsafe_get_sigset_t	unsafe_get_user_sigset
141 
142 #define to_user_ptr(p)		((unsigned long)(p))
143 #define from_user_ptr(p)	((void __user *)(p))
144 
145 static __always_inline int
146 __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
147 {
148 	unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
149 	return 0;
150 
151 failed:
152 	return 1;
153 }
154 
155 static __always_inline
156 int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
157 {
158 	/* copy up to but not including MSR */
159 	unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed);
160 
161 	/* copy from orig_r3 (the word after the MSR) up to the end */
162 	unsafe_copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
163 			      GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed);
164 
165 	return 0;
166 
167 failed:
168 	return 1;
169 }
170 #endif
171 
172 #define unsafe_save_general_regs(regs, frame, label) do {	\
173 	if (__unsafe_save_general_regs(regs, frame))		\
174 		goto label;					\
175 } while (0)
176 
177 #define unsafe_restore_general_regs(regs, frame, label) do {	\
178 	if (__unsafe_restore_general_regs(regs, frame))		\
179 		goto label;					\
180 } while (0)
181 
182 /*
183  * When we have signals to deliver, we set up on the
184  * user stack, going down from the original stack pointer:
185  *	an ABI gap of 56 words
186  *	an mcontext struct
187  *	a sigcontext struct
188  *	a gap of __SIGNAL_FRAMESIZE bytes
189  *
190  * Each of these things must be a multiple of 16 bytes in size. The following
191  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
192  *
193  */
194 struct sigframe {
195 	struct sigcontext sctx;		/* the sigcontext */
196 	struct mcontext	mctx;		/* all the register values */
197 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
198 	struct sigcontext sctx_transact;
199 	struct mcontext	mctx_transact;
200 #endif
201 	/*
202 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
203 	 * regs and 18 fp regs below sp before decrementing it.
204 	 */
205 	int			abigap[56];
206 };
207 
208 /*
209  *  When we have rt signals to deliver, we set up on the
210  *  user stack, going down from the original stack pointer:
211  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
212  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
213  *  (the +16 is to get the siginfo and ucontext in the same
214  *  positions as in older kernels).
215  *
216  *  Each of these things must be a multiple of 16 bytes in size.
217  *
218  */
219 struct rt_sigframe {
220 #ifdef CONFIG_PPC64
221 	compat_siginfo_t info;
222 #else
223 	struct siginfo info;
224 #endif
225 	struct ucontext	uc;
226 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
227 	struct ucontext	uc_transact;
228 #endif
229 	/*
230 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
231 	 * regs and 18 fp regs below sp before decrementing it.
232 	 */
233 	int			abigap[56];
234 };
235 
236 /*
237  * Save the current user registers on the user stack.
238  * We only save the altivec/spe registers if the process has used
239  * altivec/spe instructions at some point.
240  */
241 static void prepare_save_user_regs(int ctx_has_vsx_region)
242 {
243 	/* Make sure floating point registers are stored in regs */
244 	flush_fp_to_thread(current);
245 #ifdef CONFIG_ALTIVEC
246 	if (current->thread.used_vr)
247 		flush_altivec_to_thread(current);
248 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
249 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
250 #endif
251 #ifdef CONFIG_VSX
252 	if (current->thread.used_vsr && ctx_has_vsx_region)
253 		flush_vsx_to_thread(current);
254 #endif
255 #ifdef CONFIG_SPE
256 	if (current->thread.used_spe)
257 		flush_spe_to_thread(current);
258 #endif
259 }
260 
261 static int __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
262 				   struct mcontext __user *tm_frame, int ctx_has_vsx_region)
263 {
264 	unsigned long msr = regs->msr;
265 
266 	/* save general registers */
267 	unsafe_save_general_regs(regs, frame, failed);
268 
269 #ifdef CONFIG_ALTIVEC
270 	/* save altivec registers */
271 	if (current->thread.used_vr) {
272 		unsafe_copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
273 				    ELF_NVRREG * sizeof(vector128), failed);
274 		/* set MSR_VEC in the saved MSR value to indicate that
275 		   frame->mc_vregs contains valid data */
276 		msr |= MSR_VEC;
277 	}
278 	/* else assert((regs->msr & MSR_VEC) == 0) */
279 
280 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
281 	 * use altivec. Since VSCR only contains 32 bits saved in the least
282 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
283 	 * most significant bits of that same vector. --BenH
284 	 * Note that the current VRSAVE value is in the SPR at this point.
285 	 */
286 	unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
287 			failed);
288 #endif /* CONFIG_ALTIVEC */
289 	unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed);
290 
291 	/*
292 	 * Clear the MSR VSX bit to indicate there is no valid state attached
293 	 * to this context, except in the specific case below where we set it.
294 	 */
295 	msr &= ~MSR_VSX;
296 #ifdef CONFIG_VSX
297 	/*
298 	 * Copy VSR 0-31 upper half from thread_struct to local
299 	 * buffer, then write that to userspace.  Also set MSR_VSX in
300 	 * the saved MSR value to indicate that frame->mc_vregs
301 	 * contains valid data
302 	 */
303 	if (current->thread.used_vsr && ctx_has_vsx_region) {
304 		unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed);
305 		msr |= MSR_VSX;
306 	}
307 #endif /* CONFIG_VSX */
308 #ifdef CONFIG_SPE
309 	/* save spe registers */
310 	if (current->thread.used_spe) {
311 		unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
312 				    ELF_NEVRREG * sizeof(u32), failed);
313 		/* set MSR_SPE in the saved MSR value to indicate that
314 		   frame->mc_vregs contains valid data */
315 		msr |= MSR_SPE;
316 	}
317 	/* else assert((regs->msr & MSR_SPE) == 0) */
318 
319 	/* We always copy to/from spefscr */
320 	unsafe_put_user(current->thread.spefscr,
321 			(u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
322 #endif /* CONFIG_SPE */
323 
324 	unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
325 
326 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
327 	 * can check it on the restore to see if TM is active
328 	 */
329 	if (tm_frame)
330 		unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed);
331 
332 	return 0;
333 
334 failed:
335 	return 1;
336 }
337 
338 #define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
339 	if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx))	\
340 		goto label;						\
341 } while (0)
342 
343 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
344 /*
345  * Save the current user registers on the user stack.
346  * We only save the altivec/spe registers if the process has used
347  * altivec/spe instructions at some point.
348  * We also save the transactional registers to a second ucontext in the
349  * frame.
350  *
351  * See __unsafe_save_user_regs() and signal_64.c:setup_tm_sigcontexts().
352  */
353 static void prepare_save_tm_user_regs(void)
354 {
355 	WARN_ON(tm_suspend_disabled);
356 
357 #ifdef CONFIG_ALTIVEC
358 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
359 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
360 #endif
361 #ifdef CONFIG_SPE
362 	if (current->thread.used_spe)
363 		flush_spe_to_thread(current);
364 #endif
365 }
366 
367 static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
368 				    struct mcontext __user *tm_frame, unsigned long msr)
369 {
370 	/* Save both sets of general registers */
371 	unsafe_save_general_regs(&current->thread.ckpt_regs, frame, failed);
372 	unsafe_save_general_regs(regs, tm_frame, failed);
373 
374 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
375 	 * of the transactional mcontext.  This way we have a backward-compatible
376 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
377 	 * also look at what type of transaction (T or S) was active at the
378 	 * time of the signal.
379 	 */
380 	unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
381 
382 #ifdef CONFIG_ALTIVEC
383 	/* save altivec registers */
384 	if (current->thread.used_vr) {
385 		unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
386 				    ELF_NVRREG * sizeof(vector128), failed);
387 		if (msr & MSR_VEC)
388 			unsafe_copy_to_user(&tm_frame->mc_vregs,
389 					    &current->thread.vr_state,
390 					    ELF_NVRREG * sizeof(vector128), failed);
391 		else
392 			unsafe_copy_to_user(&tm_frame->mc_vregs,
393 					    &current->thread.ckvr_state,
394 					    ELF_NVRREG * sizeof(vector128), failed);
395 
396 		/* set MSR_VEC in the saved MSR value to indicate that
397 		 * frame->mc_vregs contains valid data
398 		 */
399 		msr |= MSR_VEC;
400 	}
401 
402 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
403 	 * use altivec. Since VSCR only contains 32 bits saved in the least
404 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
405 	 * most significant bits of that same vector. --BenH
406 	 */
407 	unsafe_put_user(current->thread.ckvrsave,
408 			(u32 __user *)&frame->mc_vregs[32], failed);
409 	if (msr & MSR_VEC)
410 		unsafe_put_user(current->thread.vrsave,
411 				(u32 __user *)&tm_frame->mc_vregs[32], failed);
412 	else
413 		unsafe_put_user(current->thread.ckvrsave,
414 				(u32 __user *)&tm_frame->mc_vregs[32], failed);
415 #endif /* CONFIG_ALTIVEC */
416 
417 	unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
418 	if (msr & MSR_FP)
419 		unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed);
420 	else
421 		unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
422 
423 #ifdef CONFIG_VSX
424 	/*
425 	 * Copy VSR 0-31 upper half from thread_struct to local
426 	 * buffer, then write that to userspace.  Also set MSR_VSX in
427 	 * the saved MSR value to indicate that frame->mc_vregs
428 	 * contains valid data
429 	 */
430 	if (current->thread.used_vsr) {
431 		unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed);
432 		if (msr & MSR_VSX)
433 			unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed);
434 		else
435 			unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed);
436 
437 		msr |= MSR_VSX;
438 	}
439 #endif /* CONFIG_VSX */
440 #ifdef CONFIG_SPE
441 	/* SPE regs are not checkpointed with TM, so this section is
442 	 * simply the same as in __unsafe_save_user_regs().
443 	 */
444 	if (current->thread.used_spe) {
445 		unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
446 				    ELF_NEVRREG * sizeof(u32), failed);
447 		/* set MSR_SPE in the saved MSR value to indicate that
448 		 * frame->mc_vregs contains valid data */
449 		msr |= MSR_SPE;
450 	}
451 
452 	/* We always copy to/from spefscr */
453 	unsafe_put_user(current->thread.spefscr,
454 			(u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
455 #endif /* CONFIG_SPE */
456 
457 	unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
458 
459 	return 0;
460 
461 failed:
462 	return 1;
463 }
464 #else
465 static void prepare_save_tm_user_regs(void) { }
466 
467 static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
468 				    struct mcontext __user *tm_frame, unsigned long msr)
469 {
470 	return 0;
471 }
472 #endif
473 
474 #define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \
475 	if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr))	\
476 		goto label;						\
477 } while (0)
478 
479 /*
480  * Restore the current user register values from the user stack,
481  * (except for MSR).
482  */
483 static long restore_user_regs(struct pt_regs *regs,
484 			      struct mcontext __user *sr, int sig)
485 {
486 	unsigned int save_r2 = 0;
487 	unsigned long msr;
488 #ifdef CONFIG_VSX
489 	int i;
490 #endif
491 
492 	if (!user_read_access_begin(sr, sizeof(*sr)))
493 		return 1;
494 	/*
495 	 * restore general registers but not including MSR or SOFTE. Also
496 	 * take care of keeping r2 (TLS) intact if not a signal
497 	 */
498 	if (!sig)
499 		save_r2 = (unsigned int)regs->gpr[2];
500 	unsafe_restore_general_regs(regs, sr, failed);
501 	set_trap_norestart(regs);
502 	unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
503 	if (!sig)
504 		regs->gpr[2] = (unsigned long) save_r2;
505 
506 	/* if doing signal return, restore the previous little-endian mode */
507 	if (sig)
508 		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
509 
510 #ifdef CONFIG_ALTIVEC
511 	/*
512 	 * Force the process to reload the altivec registers from
513 	 * current->thread when it next does altivec instructions
514 	 */
515 	regs->msr &= ~MSR_VEC;
516 	if (msr & MSR_VEC) {
517 		/* restore altivec registers from the stack */
518 		unsafe_copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
519 				      sizeof(sr->mc_vregs), failed);
520 		current->thread.used_vr = true;
521 	} else if (current->thread.used_vr)
522 		memset(&current->thread.vr_state, 0,
523 		       ELF_NVRREG * sizeof(vector128));
524 
525 	/* Always get VRSAVE back */
526 	unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
527 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
528 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
529 #endif /* CONFIG_ALTIVEC */
530 	unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
531 
532 #ifdef CONFIG_VSX
533 	/*
534 	 * Force the process to reload the VSX registers from
535 	 * current->thread when it next does VSX instruction.
536 	 */
537 	regs->msr &= ~MSR_VSX;
538 	if (msr & MSR_VSX) {
539 		/*
540 		 * Restore altivec registers from the stack to a local
541 		 * buffer, then write this out to the thread_struct
542 		 */
543 		unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed);
544 		current->thread.used_vsr = true;
545 	} else if (current->thread.used_vsr)
546 		for (i = 0; i < 32 ; i++)
547 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
548 #endif /* CONFIG_VSX */
549 	/*
550 	 * force the process to reload the FP registers from
551 	 * current->thread when it next does FP instructions
552 	 */
553 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
554 
555 #ifdef CONFIG_SPE
556 	/* force the process to reload the spe registers from
557 	   current->thread when it next does spe instructions */
558 	regs->msr &= ~MSR_SPE;
559 	if (msr & MSR_SPE) {
560 		/* restore spe registers from the stack */
561 		unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs,
562 				      ELF_NEVRREG * sizeof(u32), failed);
563 		current->thread.used_spe = true;
564 	} else if (current->thread.used_spe)
565 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
566 
567 	/* Always get SPEFSCR back */
568 	unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
569 #endif /* CONFIG_SPE */
570 
571 	user_read_access_end();
572 	return 0;
573 
574 failed:
575 	user_read_access_end();
576 	return 1;
577 }
578 
579 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
580 /*
581  * Restore the current user register values from the user stack, except for
582  * MSR, and recheckpoint the original checkpointed register state for processes
583  * in transactions.
584  */
585 static long restore_tm_user_regs(struct pt_regs *regs,
586 				 struct mcontext __user *sr,
587 				 struct mcontext __user *tm_sr)
588 {
589 	unsigned long msr, msr_hi;
590 #ifdef CONFIG_VSX
591 	int i;
592 #endif
593 
594 	if (tm_suspend_disabled)
595 		return 1;
596 	/*
597 	 * restore general registers but not including MSR or SOFTE. Also
598 	 * take care of keeping r2 (TLS) intact if not a signal.
599 	 * See comment in signal_64.c:restore_tm_sigcontexts();
600 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
601 	 * were set by the signal delivery.
602 	 */
603 	if (!user_read_access_begin(sr, sizeof(*sr)))
604 		return 1;
605 
606 	unsafe_restore_general_regs(&current->thread.ckpt_regs, sr, failed);
607 	unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
608 	unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
609 
610 	/* Restore the previous little-endian mode */
611 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
612 
613 #ifdef CONFIG_ALTIVEC
614 	regs->msr &= ~MSR_VEC;
615 	if (msr & MSR_VEC) {
616 		/* restore altivec registers from the stack */
617 		unsafe_copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
618 				      sizeof(sr->mc_vregs), failed);
619 		current->thread.used_vr = true;
620 	} else if (current->thread.used_vr) {
621 		memset(&current->thread.vr_state, 0,
622 		       ELF_NVRREG * sizeof(vector128));
623 		memset(&current->thread.ckvr_state, 0,
624 		       ELF_NVRREG * sizeof(vector128));
625 	}
626 
627 	/* Always get VRSAVE back */
628 	unsafe_get_user(current->thread.ckvrsave,
629 			(u32 __user *)&sr->mc_vregs[32], failed);
630 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
631 		mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
632 #endif /* CONFIG_ALTIVEC */
633 
634 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
635 
636 	unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
637 
638 #ifdef CONFIG_VSX
639 	regs->msr &= ~MSR_VSX;
640 	if (msr & MSR_VSX) {
641 		/*
642 		 * Restore altivec registers from the stack to a local
643 		 * buffer, then write this out to the thread_struct
644 		 */
645 		unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed);
646 		current->thread.used_vsr = true;
647 	} else if (current->thread.used_vsr)
648 		for (i = 0; i < 32 ; i++) {
649 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
650 			current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
651 		}
652 #endif /* CONFIG_VSX */
653 
654 #ifdef CONFIG_SPE
655 	/* SPE regs are not checkpointed with TM, so this section is
656 	 * simply the same as in restore_user_regs().
657 	 */
658 	regs->msr &= ~MSR_SPE;
659 	if (msr & MSR_SPE) {
660 		unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs,
661 				      ELF_NEVRREG * sizeof(u32), failed);
662 		current->thread.used_spe = true;
663 	} else if (current->thread.used_spe)
664 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
665 
666 	/* Always get SPEFSCR back */
667 	unsafe_get_user(current->thread.spefscr,
668 			(u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
669 #endif /* CONFIG_SPE */
670 
671 	user_read_access_end();
672 
673 	if (!user_read_access_begin(tm_sr, sizeof(*tm_sr)))
674 		return 1;
675 
676 	unsafe_restore_general_regs(regs, tm_sr, failed);
677 
678 #ifdef CONFIG_ALTIVEC
679 	/* restore altivec registers from the stack */
680 	if (msr & MSR_VEC)
681 		unsafe_copy_from_user(&current->thread.vr_state, &tm_sr->mc_vregs,
682 				      sizeof(sr->mc_vregs), failed);
683 
684 	/* Always get VRSAVE back */
685 	unsafe_get_user(current->thread.vrsave,
686 			(u32 __user *)&tm_sr->mc_vregs[32], failed);
687 #endif /* CONFIG_ALTIVEC */
688 
689 	unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
690 
691 #ifdef CONFIG_VSX
692 	if (msr & MSR_VSX) {
693 		/*
694 		 * Restore altivec registers from the stack to a local
695 		 * buffer, then write this out to the thread_struct
696 		 */
697 		unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
698 		current->thread.used_vsr = true;
699 	}
700 #endif /* CONFIG_VSX */
701 
702 	/* Get the top half of the MSR from the user context */
703 	unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
704 	msr_hi <<= 32;
705 
706 	user_read_access_end();
707 
708 	/* If TM bits are set to the reserved value, it's an invalid context */
709 	if (MSR_TM_RESV(msr_hi))
710 		return 1;
711 
712 	/*
713 	 * Disabling preemption, since it is unsafe to be preempted
714 	 * with MSR[TS] set without recheckpointing.
715 	 */
716 	preempt_disable();
717 
718 	/*
719 	 * CAUTION:
720 	 * After regs->MSR[TS] being updated, make sure that get_user(),
721 	 * put_user() or similar functions are *not* called. These
722 	 * functions can generate page faults which will cause the process
723 	 * to be de-scheduled with MSR[TS] set but without calling
724 	 * tm_recheckpoint(). This can cause a bug.
725 	 *
726 	 * Pull in the MSR TM bits from the user context
727 	 */
728 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
729 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
730 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
731 	 * transactional versions should be loaded.
732 	 */
733 	tm_enable();
734 	/* Make sure the transaction is marked as failed */
735 	current->thread.tm_texasr |= TEXASR_FS;
736 	/* This loads the checkpointed FP/VEC state, if used */
737 	tm_recheckpoint(&current->thread);
738 
739 	/* This loads the speculative FP/VEC state, if used */
740 	msr_check_and_set(msr & (MSR_FP | MSR_VEC));
741 	if (msr & MSR_FP) {
742 		load_fp_state(&current->thread.fp_state);
743 		regs->msr |= (MSR_FP | current->thread.fpexc_mode);
744 	}
745 #ifdef CONFIG_ALTIVEC
746 	if (msr & MSR_VEC) {
747 		load_vr_state(&current->thread.vr_state);
748 		regs->msr |= MSR_VEC;
749 	}
750 #endif
751 
752 	preempt_enable();
753 
754 	return 0;
755 
756 failed:
757 	user_read_access_end();
758 	return 1;
759 }
760 #else
761 static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr,
762 				 struct mcontext __user *tm_sr)
763 {
764 	return 0;
765 }
766 #endif
767 
768 #ifdef CONFIG_PPC64
769 
770 #define copy_siginfo_to_user	copy_siginfo_to_user32
771 
772 #endif /* CONFIG_PPC64 */
773 
774 /*
775  * Set up a signal frame for a "real-time" signal handler
776  * (one which gets siginfo).
777  */
778 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
779 		       struct task_struct *tsk)
780 {
781 	struct rt_sigframe __user *frame;
782 	struct mcontext __user *mctx;
783 	struct mcontext __user *tm_mctx = NULL;
784 	unsigned long newsp = 0;
785 	unsigned long tramp;
786 	struct pt_regs *regs = tsk->thread.regs;
787 	/* Save the thread's msr before get_tm_stackpointer() changes it */
788 	unsigned long msr = regs->msr;
789 
790 	/* Set up Signal Frame */
791 	frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
792 	mctx = &frame->uc.uc_mcontext;
793 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
794 	tm_mctx = &frame->uc_transact.uc_mcontext;
795 #endif
796 	if (MSR_TM_ACTIVE(msr))
797 		prepare_save_tm_user_regs();
798 	else
799 		prepare_save_user_regs(1);
800 
801 	if (!user_access_begin(frame, sizeof(*frame)))
802 		goto badframe;
803 
804 	/* Put the siginfo & fill in most of the ucontext */
805 	unsafe_put_user(0, &frame->uc.uc_flags, failed);
806 #ifdef CONFIG_PPC64
807 	unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
808 #else
809 	unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
810 #endif
811 	unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed);
812 
813 	if (MSR_TM_ACTIVE(msr)) {
814 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
815 		unsafe_put_user((unsigned long)&frame->uc_transact,
816 				&frame->uc.uc_link, failed);
817 		unsafe_put_user((unsigned long)tm_mctx,
818 				&frame->uc_transact.uc_regs, failed);
819 #endif
820 		unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
821 	} else {
822 		unsafe_put_user(0, &frame->uc.uc_link, failed);
823 		unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
824 	}
825 
826 	/* Save user registers on the stack */
827 	if (tsk->mm->context.vdso) {
828 		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
829 	} else {
830 		tramp = (unsigned long)mctx->mc_pad;
831 		unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed);
832 		unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
833 		asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
834 	}
835 	unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
836 
837 	user_access_end();
838 
839 	if (copy_siginfo_to_user(&frame->info, &ksig->info))
840 		goto badframe;
841 
842 	regs->link = tramp;
843 
844 #ifdef CONFIG_PPC_FPU_REGS
845 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
846 #endif
847 
848 	/* create a stack frame for the caller of the handler */
849 	newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16);
850 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
851 		goto badframe;
852 
853 	/* Fill registers for signal handler */
854 	regs->gpr[1] = newsp;
855 	regs->gpr[3] = ksig->sig;
856 	regs->gpr[4] = (unsigned long)&frame->info;
857 	regs->gpr[5] = (unsigned long)&frame->uc;
858 	regs->gpr[6] = (unsigned long)frame;
859 	regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
860 	/* enter the signal handler in native-endian mode */
861 	regs->msr &= ~MSR_LE;
862 	regs->msr |= (MSR_KERNEL & MSR_LE);
863 	return 0;
864 
865 failed:
866 	user_access_end();
867 
868 badframe:
869 	signal_fault(tsk, regs, "handle_rt_signal32", frame);
870 
871 	return 1;
872 }
873 
874 /*
875  * OK, we're invoking a handler
876  */
877 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
878 		struct task_struct *tsk)
879 {
880 	struct sigcontext __user *sc;
881 	struct sigframe __user *frame;
882 	struct mcontext __user *mctx;
883 	struct mcontext __user *tm_mctx = NULL;
884 	unsigned long newsp = 0;
885 	unsigned long tramp;
886 	struct pt_regs *regs = tsk->thread.regs;
887 	/* Save the thread's msr before get_tm_stackpointer() changes it */
888 	unsigned long msr = regs->msr;
889 
890 	/* Set up Signal Frame */
891 	frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
892 	mctx = &frame->mctx;
893 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
894 	tm_mctx = &frame->mctx_transact;
895 #endif
896 	if (MSR_TM_ACTIVE(msr))
897 		prepare_save_tm_user_regs();
898 	else
899 		prepare_save_user_regs(1);
900 
901 	if (!user_access_begin(frame, sizeof(*frame)))
902 		goto badframe;
903 	sc = (struct sigcontext __user *) &frame->sctx;
904 
905 #if _NSIG != 64
906 #error "Please adjust handle_signal()"
907 #endif
908 	unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed);
909 	unsafe_put_user(oldset->sig[0], &sc->oldmask, failed);
910 #ifdef CONFIG_PPC64
911 	unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed);
912 #else
913 	unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed);
914 #endif
915 	unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed);
916 	unsafe_put_user(ksig->sig, &sc->signal, failed);
917 
918 	if (MSR_TM_ACTIVE(msr))
919 		unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
920 	else
921 		unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
922 
923 	if (tsk->mm->context.vdso) {
924 		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
925 	} else {
926 		tramp = (unsigned long)mctx->mc_pad;
927 		unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed);
928 		unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
929 		asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
930 	}
931 	user_access_end();
932 
933 	regs->link = tramp;
934 
935 #ifdef CONFIG_PPC_FPU_REGS
936 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
937 #endif
938 
939 	/* create a stack frame for the caller of the handler */
940 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
941 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
942 		goto badframe;
943 
944 	regs->gpr[1] = newsp;
945 	regs->gpr[3] = ksig->sig;
946 	regs->gpr[4] = (unsigned long) sc;
947 	regs->nip = (unsigned long)ksig->ka.sa.sa_handler;
948 	/* enter the signal handler in native-endian mode */
949 	regs->msr &= ~MSR_LE;
950 	regs->msr |= (MSR_KERNEL & MSR_LE);
951 	return 0;
952 
953 failed:
954 	user_access_end();
955 
956 badframe:
957 	signal_fault(tsk, regs, "handle_signal32", frame);
958 
959 	return 1;
960 }
961 
962 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
963 {
964 	sigset_t set;
965 	struct mcontext __user *mcp;
966 
967 	if (!user_read_access_begin(ucp, sizeof(*ucp)))
968 		return -EFAULT;
969 
970 	unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
971 #ifdef CONFIG_PPC64
972 	{
973 		u32 cmcp;
974 
975 		unsafe_get_user(cmcp, &ucp->uc_regs, failed);
976 		mcp = (struct mcontext __user *)(u64)cmcp;
977 	}
978 #else
979 	unsafe_get_user(mcp, &ucp->uc_regs, failed);
980 #endif
981 	user_read_access_end();
982 
983 	set_current_blocked(&set);
984 	if (restore_user_regs(regs, mcp, sig))
985 		return -EFAULT;
986 
987 	return 0;
988 
989 failed:
990 	user_read_access_end();
991 	return -EFAULT;
992 }
993 
994 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
995 static int do_setcontext_tm(struct ucontext __user *ucp,
996 			    struct ucontext __user *tm_ucp,
997 			    struct pt_regs *regs)
998 {
999 	sigset_t set;
1000 	struct mcontext __user *mcp;
1001 	struct mcontext __user *tm_mcp;
1002 	u32 cmcp;
1003 	u32 tm_cmcp;
1004 
1005 	if (!user_read_access_begin(ucp, sizeof(*ucp)))
1006 		return -EFAULT;
1007 
1008 	unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
1009 	unsafe_get_user(cmcp, &ucp->uc_regs, failed);
1010 
1011 	user_read_access_end();
1012 
1013 	if (__get_user(tm_cmcp, &tm_ucp->uc_regs))
1014 		return -EFAULT;
1015 	mcp = (struct mcontext __user *)(u64)cmcp;
1016 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1017 	/* no need to check access_ok(mcp), since mcp < 4GB */
1018 
1019 	set_current_blocked(&set);
1020 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
1021 		return -EFAULT;
1022 
1023 	return 0;
1024 
1025 failed:
1026 	user_read_access_end();
1027 	return -EFAULT;
1028 }
1029 #endif
1030 
1031 #ifdef CONFIG_PPC64
1032 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1033 		       struct ucontext __user *, new_ctx, int, ctx_size)
1034 #else
1035 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1036 		       struct ucontext __user *, new_ctx, long, ctx_size)
1037 #endif
1038 {
1039 	struct pt_regs *regs = current_pt_regs();
1040 	int ctx_has_vsx_region = 0;
1041 
1042 #ifdef CONFIG_PPC64
1043 	unsigned long new_msr = 0;
1044 
1045 	if (new_ctx) {
1046 		struct mcontext __user *mcp;
1047 		u32 cmcp;
1048 
1049 		/*
1050 		 * Get pointer to the real mcontext.  No need for
1051 		 * access_ok since we are dealing with compat
1052 		 * pointers.
1053 		 */
1054 		if (__get_user(cmcp, &new_ctx->uc_regs))
1055 			return -EFAULT;
1056 		mcp = (struct mcontext __user *)(u64)cmcp;
1057 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1058 			return -EFAULT;
1059 	}
1060 	/*
1061 	 * Check that the context is not smaller than the original
1062 	 * size (with VMX but without VSX)
1063 	 */
1064 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1065 		return -EINVAL;
1066 	/*
1067 	 * If the new context state sets the MSR VSX bits but
1068 	 * it doesn't provide VSX state.
1069 	 */
1070 	if ((ctx_size < sizeof(struct ucontext)) &&
1071 	    (new_msr & MSR_VSX))
1072 		return -EINVAL;
1073 	/* Does the context have enough room to store VSX data? */
1074 	if (ctx_size >= sizeof(struct ucontext))
1075 		ctx_has_vsx_region = 1;
1076 #else
1077 	/* Context size is for future use. Right now, we only make sure
1078 	 * we are passed something we understand
1079 	 */
1080 	if (ctx_size < sizeof(struct ucontext))
1081 		return -EINVAL;
1082 #endif
1083 	if (old_ctx != NULL) {
1084 		struct mcontext __user *mctx;
1085 
1086 		/*
1087 		 * old_ctx might not be 16-byte aligned, in which
1088 		 * case old_ctx->uc_mcontext won't be either.
1089 		 * Because we have the old_ctx->uc_pad2 field
1090 		 * before old_ctx->uc_mcontext, we need to round down
1091 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1092 		 */
1093 		mctx = (struct mcontext __user *)
1094 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1095 		prepare_save_user_regs(ctx_has_vsx_region);
1096 		if (!user_write_access_begin(old_ctx, ctx_size))
1097 			return -EFAULT;
1098 		unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed);
1099 		unsafe_put_sigset_t(&old_ctx->uc_sigmask, &current->blocked, failed);
1100 		unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed);
1101 		user_write_access_end();
1102 	}
1103 	if (new_ctx == NULL)
1104 		return 0;
1105 	if (!access_ok(new_ctx, ctx_size) ||
1106 	    fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
1107 		return -EFAULT;
1108 
1109 	/*
1110 	 * If we get a fault copying the context into the kernel's
1111 	 * image of the user's registers, we can't just return -EFAULT
1112 	 * because the user's registers will be corrupted.  For instance
1113 	 * the NIP value may have been updated but not some of the
1114 	 * other registers.  Given that we have done the access_ok
1115 	 * and successfully read the first and last bytes of the region
1116 	 * above, this should only happen in an out-of-memory situation
1117 	 * or if another thread unmaps the region containing the context.
1118 	 * We kill the task with a SIGSEGV in this situation.
1119 	 */
1120 	if (do_setcontext(new_ctx, regs, 0))
1121 		do_exit(SIGSEGV);
1122 
1123 	set_thread_flag(TIF_RESTOREALL);
1124 	return 0;
1125 
1126 failed:
1127 	user_write_access_end();
1128 	return -EFAULT;
1129 }
1130 
1131 #ifdef CONFIG_PPC64
1132 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1133 #else
1134 SYSCALL_DEFINE0(rt_sigreturn)
1135 #endif
1136 {
1137 	struct rt_sigframe __user *rt_sf;
1138 	struct pt_regs *regs = current_pt_regs();
1139 	int tm_restore = 0;
1140 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1141 	struct ucontext __user *uc_transact;
1142 	unsigned long msr_hi;
1143 	unsigned long tmp;
1144 #endif
1145 	/* Always make any pending restarted system calls return -EINTR */
1146 	current->restart_block.fn = do_no_restart_syscall;
1147 
1148 	rt_sf = (struct rt_sigframe __user *)
1149 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1150 	if (!access_ok(rt_sf, sizeof(*rt_sf)))
1151 		goto bad;
1152 
1153 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1154 	/*
1155 	 * If there is a transactional state then throw it away.
1156 	 * The purpose of a sigreturn is to destroy all traces of the
1157 	 * signal frame, this includes any transactional state created
1158 	 * within in. We only check for suspended as we can never be
1159 	 * active in the kernel, we are active, there is nothing better to
1160 	 * do than go ahead and Bad Thing later.
1161 	 * The cause is not important as there will never be a
1162 	 * recheckpoint so it's not user visible.
1163 	 */
1164 	if (MSR_TM_SUSPENDED(mfmsr()))
1165 		tm_reclaim_current(0);
1166 
1167 	if (__get_user(tmp, &rt_sf->uc.uc_link))
1168 		goto bad;
1169 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1170 	if (uc_transact) {
1171 		u32 cmcp;
1172 		struct mcontext __user *mcp;
1173 
1174 		if (__get_user(cmcp, &uc_transact->uc_regs))
1175 			return -EFAULT;
1176 		mcp = (struct mcontext __user *)(u64)cmcp;
1177 		/* The top 32 bits of the MSR are stashed in the transactional
1178 		 * ucontext. */
1179 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1180 			goto bad;
1181 
1182 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1183 			/* Trying to start TM on non TM system */
1184 			if (!cpu_has_feature(CPU_FTR_TM))
1185 				goto bad;
1186 			/* We only recheckpoint on return if we're
1187 			 * transaction.
1188 			 */
1189 			tm_restore = 1;
1190 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1191 				goto bad;
1192 		}
1193 	}
1194 	if (!tm_restore) {
1195 		/*
1196 		 * Unset regs->msr because ucontext MSR TS is not
1197 		 * set, and recheckpoint was not called. This avoid
1198 		 * hitting a TM Bad thing at RFID
1199 		 */
1200 		regs->msr &= ~MSR_TS_MASK;
1201 	}
1202 	/* Fall through, for non-TM restore */
1203 #endif
1204 	if (!tm_restore)
1205 		if (do_setcontext(&rt_sf->uc, regs, 1))
1206 			goto bad;
1207 
1208 	/*
1209 	 * It's not clear whether or why it is desirable to save the
1210 	 * sigaltstack setting on signal delivery and restore it on
1211 	 * signal return.  But other architectures do this and we have
1212 	 * always done it up until now so it is probably better not to
1213 	 * change it.  -- paulus
1214 	 */
1215 #ifdef CONFIG_PPC64
1216 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1217 		goto bad;
1218 #else
1219 	if (restore_altstack(&rt_sf->uc.uc_stack))
1220 		goto bad;
1221 #endif
1222 	set_thread_flag(TIF_RESTOREALL);
1223 	return 0;
1224 
1225  bad:
1226 	signal_fault(current, regs, "sys_rt_sigreturn", rt_sf);
1227 
1228 	force_sig(SIGSEGV);
1229 	return 0;
1230 }
1231 
1232 #ifdef CONFIG_PPC32
1233 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1234 			 int, ndbg, struct sig_dbg_op __user *, dbg)
1235 {
1236 	struct pt_regs *regs = current_pt_regs();
1237 	struct sig_dbg_op op;
1238 	int i;
1239 	unsigned long new_msr = regs->msr;
1240 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1241 	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1242 #endif
1243 
1244 	for (i=0; i<ndbg; i++) {
1245 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1246 			return -EFAULT;
1247 		switch (op.dbg_type) {
1248 		case SIG_DBG_SINGLE_STEPPING:
1249 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1250 			if (op.dbg_value) {
1251 				new_msr |= MSR_DE;
1252 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1253 			} else {
1254 				new_dbcr0 &= ~DBCR0_IC;
1255 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1256 						current->thread.debug.dbcr1)) {
1257 					new_msr &= ~MSR_DE;
1258 					new_dbcr0 &= ~DBCR0_IDM;
1259 				}
1260 			}
1261 #else
1262 			if (op.dbg_value)
1263 				new_msr |= MSR_SE;
1264 			else
1265 				new_msr &= ~MSR_SE;
1266 #endif
1267 			break;
1268 		case SIG_DBG_BRANCH_TRACING:
1269 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1270 			return -EINVAL;
1271 #else
1272 			if (op.dbg_value)
1273 				new_msr |= MSR_BE;
1274 			else
1275 				new_msr &= ~MSR_BE;
1276 #endif
1277 			break;
1278 
1279 		default:
1280 			return -EINVAL;
1281 		}
1282 	}
1283 
1284 	/* We wait until here to actually install the values in the
1285 	   registers so if we fail in the above loop, it will not
1286 	   affect the contents of these registers.  After this point,
1287 	   failure is a problem, anyway, and it's very unlikely unless
1288 	   the user is really doing something wrong. */
1289 	regs->msr = new_msr;
1290 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1291 	current->thread.debug.dbcr0 = new_dbcr0;
1292 #endif
1293 
1294 	if (!access_ok(ctx, sizeof(*ctx)) ||
1295 	    fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1296 		return -EFAULT;
1297 
1298 	/*
1299 	 * If we get a fault copying the context into the kernel's
1300 	 * image of the user's registers, we can't just return -EFAULT
1301 	 * because the user's registers will be corrupted.  For instance
1302 	 * the NIP value may have been updated but not some of the
1303 	 * other registers.  Given that we have done the access_ok
1304 	 * and successfully read the first and last bytes of the region
1305 	 * above, this should only happen in an out-of-memory situation
1306 	 * or if another thread unmaps the region containing the context.
1307 	 * We kill the task with a SIGSEGV in this situation.
1308 	 */
1309 	if (do_setcontext(ctx, regs, 1)) {
1310 		signal_fault(current, regs, "sys_debug_setcontext", ctx);
1311 
1312 		force_sig(SIGSEGV);
1313 		goto out;
1314 	}
1315 
1316 	/*
1317 	 * It's not clear whether or why it is desirable to save the
1318 	 * sigaltstack setting on signal delivery and restore it on
1319 	 * signal return.  But other architectures do this and we have
1320 	 * always done it up until now so it is probably better not to
1321 	 * change it.  -- paulus
1322 	 */
1323 	restore_altstack(&ctx->uc_stack);
1324 
1325 	set_thread_flag(TIF_RESTOREALL);
1326  out:
1327 	return 0;
1328 }
1329 #endif
1330 
1331 /*
1332  * Do a signal return; undo the signal stack.
1333  */
1334 #ifdef CONFIG_PPC64
1335 COMPAT_SYSCALL_DEFINE0(sigreturn)
1336 #else
1337 SYSCALL_DEFINE0(sigreturn)
1338 #endif
1339 {
1340 	struct pt_regs *regs = current_pt_regs();
1341 	struct sigframe __user *sf;
1342 	struct sigcontext __user *sc;
1343 	struct sigcontext sigctx;
1344 	struct mcontext __user *sr;
1345 	sigset_t set;
1346 	struct mcontext __user *mcp;
1347 	struct mcontext __user *tm_mcp = NULL;
1348 	unsigned long long msr_hi = 0;
1349 
1350 	/* Always make any pending restarted system calls return -EINTR */
1351 	current->restart_block.fn = do_no_restart_syscall;
1352 
1353 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1354 	sc = &sf->sctx;
1355 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1356 		goto badframe;
1357 
1358 #ifdef CONFIG_PPC64
1359 	/*
1360 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1361 	 * unused part of the signal stackframe
1362 	 */
1363 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1364 #else
1365 	set.sig[0] = sigctx.oldmask;
1366 	set.sig[1] = sigctx._unused[3];
1367 #endif
1368 	set_current_blocked(&set);
1369 
1370 	mcp = (struct mcontext __user *)&sf->mctx;
1371 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1372 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1373 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1374 		goto badframe;
1375 #endif
1376 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
1377 		if (!cpu_has_feature(CPU_FTR_TM))
1378 			goto badframe;
1379 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
1380 			goto badframe;
1381 	} else {
1382 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1383 		if (restore_user_regs(regs, sr, 1)) {
1384 			signal_fault(current, regs, "sys_sigreturn", sr);
1385 
1386 			force_sig(SIGSEGV);
1387 			return 0;
1388 		}
1389 	}
1390 
1391 	set_thread_flag(TIF_RESTOREALL);
1392 	return 0;
1393 
1394 badframe:
1395 	signal_fault(current, regs, "sys_sigreturn", sc);
1396 
1397 	force_sig(SIGSEGV);
1398 	return 0;
1399 }
1400