xref: /linux/arch/powerpc/kernel/signal_32.c (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
281e7009eSStephen Rothwell /*
381e7009eSStephen Rothwell  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
481e7009eSStephen Rothwell  *
581e7009eSStephen Rothwell  *  PowerPC version
681e7009eSStephen Rothwell  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
781e7009eSStephen Rothwell  * Copyright (C) 2001 IBM
881e7009eSStephen Rothwell  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
981e7009eSStephen Rothwell  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
1081e7009eSStephen Rothwell  *
1181e7009eSStephen Rothwell  *  Derived from "arch/i386/kernel/signal.c"
1281e7009eSStephen Rothwell  *    Copyright (C) 1991, 1992 Linus Torvalds
1381e7009eSStephen Rothwell  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
1481e7009eSStephen Rothwell  */
1581e7009eSStephen Rothwell 
1681e7009eSStephen Rothwell #include <linux/sched.h>
1781e7009eSStephen Rothwell #include <linux/mm.h>
1881e7009eSStephen Rothwell #include <linux/smp.h>
1981e7009eSStephen Rothwell #include <linux/kernel.h>
2081e7009eSStephen Rothwell #include <linux/signal.h>
2181e7009eSStephen Rothwell #include <linux/errno.h>
2281e7009eSStephen Rothwell #include <linux/elf.h>
2305ead015SLucas Woods #include <linux/ptrace.h>
2456b04d56SChristophe Leroy #include <linux/pagemap.h>
2576462232SChristian Dietrich #include <linux/ratelimit.h>
2681e7009eSStephen Rothwell #include <linux/syscalls.h>
27f3675644SAl Viro #ifdef CONFIG_PPC64
2881e7009eSStephen Rothwell #include <linux/compat.h>
2981e7009eSStephen Rothwell #else
3081e7009eSStephen Rothwell #include <linux/wait.h>
3181e7009eSStephen Rothwell #include <linux/unistd.h>
3281e7009eSStephen Rothwell #include <linux/stddef.h>
3381e7009eSStephen Rothwell #include <linux/tty.h>
3481e7009eSStephen Rothwell #include <linux/binfmts.h>
3581e7009eSStephen Rothwell #endif
3681e7009eSStephen Rothwell 
377c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
3881e7009eSStephen Rothwell #include <asm/cacheflush.h>
39a7f31841SArnd Bergmann #include <asm/syscalls.h>
40c5ff7001SDavid Gibson #include <asm/sigcontext.h>
41a7f290daSBenjamin Herrenschmidt #include <asm/vdso.h>
42ae3a197eSDavid Howells #include <asm/switch_to.h>
432b0a576dSMichael Neuling #include <asm/tm.h>
440545d543SDaniel Axtens #include <asm/asm-prototypes.h>
4581e7009eSStephen Rothwell #ifdef CONFIG_PPC64
468cd1def4SRohan McLure #include <asm/syscalls_32.h>
4781e7009eSStephen Rothwell #include <asm/unistd.h>
4881e7009eSStephen Rothwell #else
4981e7009eSStephen Rothwell #include <asm/ucontext.h>
5081e7009eSStephen Rothwell #endif
5181e7009eSStephen Rothwell 
5222e38f29SBenjamin Herrenschmidt #include "signal.h"
5322e38f29SBenjamin Herrenschmidt 
5481e7009eSStephen Rothwell 
5581e7009eSStephen Rothwell #ifdef CONFIG_PPC64
5681e7009eSStephen Rothwell #define old_sigaction	old_sigaction32
5781e7009eSStephen Rothwell #define sigcontext	sigcontext32
5881e7009eSStephen Rothwell #define mcontext	mcontext32
5981e7009eSStephen Rothwell #define ucontext	ucontext32
6081e7009eSStephen Rothwell 
6181e7009eSStephen Rothwell /*
62c1cb299eSMichael Neuling  * Userspace code may pass a ucontext which doesn't include VSX added
63c1cb299eSMichael Neuling  * at the end.  We need to check for this case.
64c1cb299eSMichael Neuling  */
65c1cb299eSMichael Neuling #define UCONTEXTSIZEWITHOUTVSX \
66c1cb299eSMichael Neuling 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
67c1cb299eSMichael Neuling 
68c1cb299eSMichael Neuling /*
6981e7009eSStephen Rothwell  * Returning 0 means we return to userspace via
7081e7009eSStephen Rothwell  * ret_from_except and thus restore all user
7181e7009eSStephen Rothwell  * registers from *regs.  This is what we need
7281e7009eSStephen Rothwell  * to do when a signal has been delivered.
7381e7009eSStephen Rothwell  */
7481e7009eSStephen Rothwell 
7581e7009eSStephen Rothwell #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
7681e7009eSStephen Rothwell #undef __SIGNAL_FRAMESIZE
7781e7009eSStephen Rothwell #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
7881e7009eSStephen Rothwell #undef ELF_NVRREG
7981e7009eSStephen Rothwell #define ELF_NVRREG	ELF_NVRREG32
8081e7009eSStephen Rothwell 
8181e7009eSStephen Rothwell /*
8281e7009eSStephen Rothwell  * Functions for flipping sigsets (thanks to brain dead generic
8381e7009eSStephen Rothwell  * implementation that makes things simple for little endian only)
8481e7009eSStephen Rothwell  */
85de781ebdSChristophe Leroy #define unsafe_put_sigset_t	unsafe_put_compat_sigset
86887f3cebSChristophe Leroy #define unsafe_get_sigset_t	unsafe_get_compat_sigset
8781e7009eSStephen Rothwell 
8829e646dfSAl Viro #define to_user_ptr(p)		ptr_to_compat(p)
8981e7009eSStephen Rothwell #define from_user_ptr(p)	compat_ptr(p)
9081e7009eSStephen Rothwell 
91ef75e731SChristophe Leroy static __always_inline int
__unsafe_save_general_regs(struct pt_regs * regs,struct mcontext __user * frame)92f918a81eSChristophe Leroy __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
9381e7009eSStephen Rothwell {
9481e7009eSStephen Rothwell 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
95020c4831SChristophe Leroy 	int val, i;
9681e7009eSStephen Rothwell 
97401d1f02SDavid Woodhouse 	for (i = 0; i <= PT_RESULT; i ++) {
98020c4831SChristophe Leroy 		/* Force usr to alway see softe as 1 (interrupts enabled) */
99020c4831SChristophe Leroy 		if (i == PT_SOFTE)
100020c4831SChristophe Leroy 			val = 1;
101a8a4b03aSMadhavan Srinivasan 		else
102020c4831SChristophe Leroy 			val = gregs[i];
103020c4831SChristophe Leroy 
104ef75e731SChristophe Leroy 		unsafe_put_user(val, &frame->mc_gregs[i], failed);
105401d1f02SDavid Woodhouse 	}
10681e7009eSStephen Rothwell 	return 0;
107ef75e731SChristophe Leroy 
108ef75e731SChristophe Leroy failed:
109ef75e731SChristophe Leroy 	return 1;
11081e7009eSStephen Rothwell }
11181e7009eSStephen Rothwell 
112627b72beSChristophe Leroy static __always_inline int
__unsafe_restore_general_regs(struct pt_regs * regs,struct mcontext __user * sr)113627b72beSChristophe Leroy __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
11481e7009eSStephen Rothwell {
11581e7009eSStephen Rothwell 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
11681e7009eSStephen Rothwell 	int i;
11781e7009eSStephen Rothwell 
11881e7009eSStephen Rothwell 	for (i = 0; i <= PT_RESULT; i++) {
11981e7009eSStephen Rothwell 		if ((i == PT_MSR) || (i == PT_SOFTE))
12081e7009eSStephen Rothwell 			continue;
121627b72beSChristophe Leroy 		unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed);
12281e7009eSStephen Rothwell 	}
12381e7009eSStephen Rothwell 	return 0;
124627b72beSChristophe Leroy 
125627b72beSChristophe Leroy failed:
126627b72beSChristophe Leroy 	return 1;
12781e7009eSStephen Rothwell }
12881e7009eSStephen Rothwell 
12981e7009eSStephen Rothwell #else /* CONFIG_PPC64 */
13081e7009eSStephen Rothwell 
13181e7009eSStephen Rothwell #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
13281e7009eSStephen Rothwell 
133de781ebdSChristophe Leroy #define unsafe_put_sigset_t(uset, set, label) do {			\
134de781ebdSChristophe Leroy 	sigset_t __user *__us = uset	;				\
135de781ebdSChristophe Leroy 	const sigset_t *__s = set;					\
136de781ebdSChristophe Leroy 									\
137de781ebdSChristophe Leroy 	unsafe_copy_to_user(__us, __s, sizeof(*__us), label);		\
138de781ebdSChristophe Leroy } while (0)
139de781ebdSChristophe Leroy 
140887f3cebSChristophe Leroy #define unsafe_get_sigset_t	unsafe_get_user_sigset
14181e7009eSStephen Rothwell 
14229e646dfSAl Viro #define to_user_ptr(p)		((unsigned long)(p))
14329e646dfSAl Viro #define from_user_ptr(p)	((void __user *)(p))
14481e7009eSStephen Rothwell 
145ef75e731SChristophe Leroy static __always_inline int
__unsafe_save_general_regs(struct pt_regs * regs,struct mcontext __user * frame)146f918a81eSChristophe Leroy __unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
14781e7009eSStephen Rothwell {
148ef75e731SChristophe Leroy 	unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
149ef75e731SChristophe Leroy 	return 0;
150ef75e731SChristophe Leroy 
151ef75e731SChristophe Leroy failed:
152ef75e731SChristophe Leroy 	return 1;
15381e7009eSStephen Rothwell }
15481e7009eSStephen Rothwell 
155627b72beSChristophe Leroy static __always_inline
__unsafe_restore_general_regs(struct pt_regs * regs,struct mcontext __user * sr)156627b72beSChristophe Leroy int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
15781e7009eSStephen Rothwell {
15881e7009eSStephen Rothwell 	/* copy up to but not including MSR */
159627b72beSChristophe Leroy 	unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed);
160627b72beSChristophe Leroy 
16181e7009eSStephen Rothwell 	/* copy from orig_r3 (the word after the MSR) up to the end */
162627b72beSChristophe Leroy 	unsafe_copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
163627b72beSChristophe Leroy 			      GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed);
164627b72beSChristophe Leroy 
16581e7009eSStephen Rothwell 	return 0;
166627b72beSChristophe Leroy 
167627b72beSChristophe Leroy failed:
168627b72beSChristophe Leroy 	return 1;
16981e7009eSStephen Rothwell }
17081e7009eSStephen Rothwell #endif
17181e7009eSStephen Rothwell 
172ef75e731SChristophe Leroy #define unsafe_save_general_regs(regs, frame, label) do {	\
173f918a81eSChristophe Leroy 	if (__unsafe_save_general_regs(regs, frame))		\
174ef75e731SChristophe Leroy 		goto label;					\
175ef75e731SChristophe Leroy } while (0)
176ef75e731SChristophe Leroy 
177627b72beSChristophe Leroy #define unsafe_restore_general_regs(regs, frame, label) do {	\
178627b72beSChristophe Leroy 	if (__unsafe_restore_general_regs(regs, frame))		\
17981e7009eSStephen Rothwell 		goto label;					\
18081e7009eSStephen Rothwell } while (0)
18181e7009eSStephen Rothwell 
18281e7009eSStephen Rothwell /*
18381e7009eSStephen Rothwell  * When we have signals to deliver, we set up on the
18481e7009eSStephen Rothwell  * user stack, going down from the original stack pointer:
185a3f61dc0SBenjamin Herrenschmidt  *	an ABI gap of 56 words
186a3f61dc0SBenjamin Herrenschmidt  *	an mcontext struct
18781e7009eSStephen Rothwell  *	a sigcontext struct
18881e7009eSStephen Rothwell  *	a gap of __SIGNAL_FRAMESIZE bytes
18981e7009eSStephen Rothwell  *
190a3f61dc0SBenjamin Herrenschmidt  * Each of these things must be a multiple of 16 bytes in size. The following
191a3f61dc0SBenjamin Herrenschmidt  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
19281e7009eSStephen Rothwell  *
19381e7009eSStephen Rothwell  */
194a3f61dc0SBenjamin Herrenschmidt struct sigframe {
195a3f61dc0SBenjamin Herrenschmidt 	struct sigcontext sctx;		/* the sigcontext */
19681e7009eSStephen Rothwell 	struct mcontext	mctx;		/* all the register values */
1972b0a576dSMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1982b0a576dSMichael Neuling 	struct sigcontext sctx_transact;
1992b0a576dSMichael Neuling 	struct mcontext	mctx_transact;
2002b0a576dSMichael Neuling #endif
20181e7009eSStephen Rothwell 	/*
20281e7009eSStephen Rothwell 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
20381e7009eSStephen Rothwell 	 * regs and 18 fp regs below sp before decrementing it.
20481e7009eSStephen Rothwell 	 */
20581e7009eSStephen Rothwell 	int			abigap[56];
20681e7009eSStephen Rothwell };
20781e7009eSStephen Rothwell 
20881e7009eSStephen Rothwell /*
20981e7009eSStephen Rothwell  *  When we have rt signals to deliver, we set up on the
21081e7009eSStephen Rothwell  *  user stack, going down from the original stack pointer:
21181e7009eSStephen Rothwell  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
21281e7009eSStephen Rothwell  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
21381e7009eSStephen Rothwell  *  (the +16 is to get the siginfo and ucontext in the same
21481e7009eSStephen Rothwell  *  positions as in older kernels).
21581e7009eSStephen Rothwell  *
21681e7009eSStephen Rothwell  *  Each of these things must be a multiple of 16 bytes in size.
21781e7009eSStephen Rothwell  *
21881e7009eSStephen Rothwell  */
21981e7009eSStephen Rothwell struct rt_sigframe {
22081e7009eSStephen Rothwell #ifdef CONFIG_PPC64
22181e7009eSStephen Rothwell 	compat_siginfo_t info;
22281e7009eSStephen Rothwell #else
22381e7009eSStephen Rothwell 	struct siginfo info;
22481e7009eSStephen Rothwell #endif
22581e7009eSStephen Rothwell 	struct ucontext	uc;
2262b0a576dSMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2272b0a576dSMichael Neuling 	struct ucontext	uc_transact;
2282b0a576dSMichael Neuling #endif
22981e7009eSStephen Rothwell 	/*
23081e7009eSStephen Rothwell 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
23181e7009eSStephen Rothwell 	 * regs and 18 fp regs below sp before decrementing it.
23281e7009eSStephen Rothwell 	 */
23381e7009eSStephen Rothwell 	int			abigap[56];
23481e7009eSStephen Rothwell };
23581e7009eSStephen Rothwell 
get_min_sigframe_size_32(void)2362896b2dfSNicholas Piggin unsigned long get_min_sigframe_size_32(void)
2372896b2dfSNicholas Piggin {
2382896b2dfSNicholas Piggin 	return max(sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE + 16,
2392896b2dfSNicholas Piggin 		   sizeof(struct sigframe) + __SIGNAL_FRAMESIZE);
2402896b2dfSNicholas Piggin }
2412896b2dfSNicholas Piggin 
24281e7009eSStephen Rothwell /*
24381e7009eSStephen Rothwell  * Save the current user registers on the user stack.
24481e7009eSStephen Rothwell  * We only save the altivec/spe registers if the process has used
24581e7009eSStephen Rothwell  * altivec/spe instructions at some point.
24681e7009eSStephen Rothwell  */
prepare_save_user_regs(int ctx_has_vsx_region)247968c4fccSChristophe Leroy static void prepare_save_user_regs(int ctx_has_vsx_region)
248968c4fccSChristophe Leroy {
249968c4fccSChristophe Leroy 	/* Make sure floating point registers are stored in regs */
250968c4fccSChristophe Leroy 	flush_fp_to_thread(current);
251968c4fccSChristophe Leroy #ifdef CONFIG_ALTIVEC
252968c4fccSChristophe Leroy 	if (current->thread.used_vr)
253968c4fccSChristophe Leroy 		flush_altivec_to_thread(current);
254968c4fccSChristophe Leroy 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
255968c4fccSChristophe Leroy 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
256968c4fccSChristophe Leroy #endif
257968c4fccSChristophe Leroy #ifdef CONFIG_VSX
258968c4fccSChristophe Leroy 	if (current->thread.used_vsr && ctx_has_vsx_region)
259968c4fccSChristophe Leroy 		flush_vsx_to_thread(current);
260968c4fccSChristophe Leroy #endif
261968c4fccSChristophe Leroy #ifdef CONFIG_SPE
262968c4fccSChristophe Leroy 	if (current->thread.used_spe)
263968c4fccSChristophe Leroy 		flush_spe_to_thread(current);
264968c4fccSChristophe Leroy #endif
265968c4fccSChristophe Leroy }
266968c4fccSChristophe Leroy 
267*a03b1a0bSChristophe Leroy static __always_inline int
__unsafe_save_user_regs(struct pt_regs * regs,struct mcontext __user * frame,struct mcontext __user * tm_frame,int ctx_has_vsx_region)268*a03b1a0bSChristophe Leroy __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
2698d33001dSChristophe Leroy 			struct mcontext __user *tm_frame, int ctx_has_vsx_region)
27081e7009eSStephen Rothwell {
2719e751186SMichael Neuling 	unsigned long msr = regs->msr;
2729e751186SMichael Neuling 
273c6e6771bSMichael Neuling 	/* save general registers */
274ef75e731SChristophe Leroy 	unsafe_save_general_regs(regs, frame, failed);
27581e7009eSStephen Rothwell 
27681e7009eSStephen Rothwell #ifdef CONFIG_ALTIVEC
27781e7009eSStephen Rothwell 	/* save altivec registers */
27881e7009eSStephen Rothwell 	if (current->thread.used_vr) {
279ef75e731SChristophe Leroy 		unsafe_copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
280ef75e731SChristophe Leroy 				    ELF_NVRREG * sizeof(vector128), failed);
28181e7009eSStephen Rothwell 		/* set MSR_VEC in the saved MSR value to indicate that
28281e7009eSStephen Rothwell 		   frame->mc_vregs contains valid data */
2839e751186SMichael Neuling 		msr |= MSR_VEC;
28481e7009eSStephen Rothwell 	}
28581e7009eSStephen Rothwell 	/* else assert((regs->msr & MSR_VEC) == 0) */
28681e7009eSStephen Rothwell 
28781e7009eSStephen Rothwell 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
28881e7009eSStephen Rothwell 	 * use altivec. Since VSCR only contains 32 bits saved in the least
28981e7009eSStephen Rothwell 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
29081e7009eSStephen Rothwell 	 * most significant bits of that same vector. --BenH
291408a7e08SPaul Mackerras 	 * Note that the current VRSAVE value is in the SPR at this point.
29281e7009eSStephen Rothwell 	 */
293ef75e731SChristophe Leroy 	unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
294ef75e731SChristophe Leroy 			failed);
29581e7009eSStephen Rothwell #endif /* CONFIG_ALTIVEC */
296ef75e731SChristophe Leroy 	unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed);
297ec67ad82SMichael Neuling 
298ec67ad82SMichael Neuling 	/*
299ec67ad82SMichael Neuling 	 * Clear the MSR VSX bit to indicate there is no valid state attached
300ec67ad82SMichael Neuling 	 * to this context, except in the specific case below where we set it.
301ec67ad82SMichael Neuling 	 */
302ec67ad82SMichael Neuling 	msr &= ~MSR_VSX;
3036a274c08SMichael Neuling #ifdef CONFIG_VSX
304ce48b210SMichael Neuling 	/*
305ce48b210SMichael Neuling 	 * Copy VSR 0-31 upper half from thread_struct to local
306ce48b210SMichael Neuling 	 * buffer, then write that to userspace.  Also set MSR_VSX in
307ce48b210SMichael Neuling 	 * the saved MSR value to indicate that frame->mc_vregs
308ce48b210SMichael Neuling 	 * contains valid data
309ce48b210SMichael Neuling 	 */
31016c29d18SMichael Neuling 	if (current->thread.used_vsr && ctx_has_vsx_region) {
311ef75e731SChristophe Leroy 		unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed);
312ce48b210SMichael Neuling 		msr |= MSR_VSX;
313ec67ad82SMichael Neuling 	}
314c6e6771bSMichael Neuling #endif /* CONFIG_VSX */
31581e7009eSStephen Rothwell #ifdef CONFIG_SPE
31681e7009eSStephen Rothwell 	/* save spe registers */
31781e7009eSStephen Rothwell 	if (current->thread.used_spe) {
318ef75e731SChristophe Leroy 		unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
319ef75e731SChristophe Leroy 				    ELF_NEVRREG * sizeof(u32), failed);
32081e7009eSStephen Rothwell 		/* set MSR_SPE in the saved MSR value to indicate that
32181e7009eSStephen Rothwell 		   frame->mc_vregs contains valid data */
3229e751186SMichael Neuling 		msr |= MSR_SPE;
32381e7009eSStephen Rothwell 	}
32481e7009eSStephen Rothwell 	/* else assert((regs->msr & MSR_SPE) == 0) */
32581e7009eSStephen Rothwell 
32681e7009eSStephen Rothwell 	/* We always copy to/from spefscr */
327ef75e731SChristophe Leroy 	unsafe_put_user(current->thread.spefscr,
328ef75e731SChristophe Leroy 			(u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
32981e7009eSStephen Rothwell #endif /* CONFIG_SPE */
33081e7009eSStephen Rothwell 
331ef75e731SChristophe Leroy 	unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
332ef75e731SChristophe Leroy 
3331d25f11fSMichael Neuling 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
3341d25f11fSMichael Neuling 	 * can check it on the restore to see if TM is active
3351d25f11fSMichael Neuling 	 */
336ef75e731SChristophe Leroy 	if (tm_frame)
337ef75e731SChristophe Leroy 		unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed);
3381d25f11fSMichael Neuling 
33981e7009eSStephen Rothwell 	return 0;
340ef75e731SChristophe Leroy 
341ef75e731SChristophe Leroy failed:
342ef75e731SChristophe Leroy 	return 1;
34381e7009eSStephen Rothwell }
34481e7009eSStephen Rothwell 
345ef75e731SChristophe Leroy #define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
346f918a81eSChristophe Leroy 	if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx))	\
347ef75e731SChristophe Leroy 		goto label;						\
348ef75e731SChristophe Leroy } while (0)
349ef75e731SChristophe Leroy 
3502b0a576dSMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3512b0a576dSMichael Neuling /*
3522b0a576dSMichael Neuling  * Save the current user registers on the user stack.
3532b0a576dSMichael Neuling  * We only save the altivec/spe registers if the process has used
3542b0a576dSMichael Neuling  * altivec/spe instructions at some point.
3552b0a576dSMichael Neuling  * We also save the transactional registers to a second ucontext in the
3562b0a576dSMichael Neuling  * frame.
3572b0a576dSMichael Neuling  *
358f918a81eSChristophe Leroy  * See __unsafe_save_user_regs() and signal_64.c:setup_tm_sigcontexts().
3592b0a576dSMichael Neuling  */
prepare_save_tm_user_regs(void)360968c4fccSChristophe Leroy static void prepare_save_tm_user_regs(void)
3612b0a576dSMichael Neuling {
36292fb8690SMichael Neuling 	WARN_ON(tm_suspend_disabled);
36392fb8690SMichael Neuling 
364968c4fccSChristophe Leroy 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
365968c4fccSChristophe Leroy 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
366968c4fccSChristophe Leroy }
367968c4fccSChristophe Leroy 
368*a03b1a0bSChristophe Leroy static __always_inline int
save_tm_user_regs_unsafe(struct pt_regs * regs,struct mcontext __user * frame,struct mcontext __user * tm_frame,unsigned long msr)369*a03b1a0bSChristophe Leroy save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
370968c4fccSChristophe Leroy 			 struct mcontext __user *tm_frame, unsigned long msr)
371968c4fccSChristophe Leroy {
3722b0a576dSMichael Neuling 	/* Save both sets of general registers */
373ef75e731SChristophe Leroy 	unsafe_save_general_regs(&current->thread.ckpt_regs, frame, failed);
374ef75e731SChristophe Leroy 	unsafe_save_general_regs(regs, tm_frame, failed);
3752b0a576dSMichael Neuling 
3762b0a576dSMichael Neuling 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
3772b0a576dSMichael Neuling 	 * of the transactional mcontext.  This way we have a backward-compatible
3782b0a576dSMichael Neuling 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
3792b0a576dSMichael Neuling 	 * also look at what type of transaction (T or S) was active at the
3802b0a576dSMichael Neuling 	 * time of the signal.
3812b0a576dSMichael Neuling 	 */
382ef75e731SChristophe Leroy 	unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
3832b0a576dSMichael Neuling 
3842b0a576dSMichael Neuling 	/* save altivec registers */
3852b0a576dSMichael Neuling 	if (current->thread.used_vr) {
386ef75e731SChristophe Leroy 		unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
387ef75e731SChristophe Leroy 				    ELF_NVRREG * sizeof(vector128), failed);
388ef75e731SChristophe Leroy 		if (msr & MSR_VEC)
389ef75e731SChristophe Leroy 			unsafe_copy_to_user(&tm_frame->mc_vregs,
390dc310669SCyril Bur 					    &current->thread.vr_state,
391ef75e731SChristophe Leroy 					    ELF_NVRREG * sizeof(vector128), failed);
392ef75e731SChristophe Leroy 		else
393ef75e731SChristophe Leroy 			unsafe_copy_to_user(&tm_frame->mc_vregs,
394000ec280SCyril Bur 					    &current->thread.ckvr_state,
395ef75e731SChristophe Leroy 					    ELF_NVRREG * sizeof(vector128), failed);
3962b0a576dSMichael Neuling 
3972b0a576dSMichael Neuling 		/* set MSR_VEC in the saved MSR value to indicate that
3982b0a576dSMichael Neuling 		 * frame->mc_vregs contains valid data
3992b0a576dSMichael Neuling 		 */
4002b0a576dSMichael Neuling 		msr |= MSR_VEC;
4012b0a576dSMichael Neuling 	}
4022b0a576dSMichael Neuling 
4032b0a576dSMichael Neuling 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
4042b0a576dSMichael Neuling 	 * use altivec. Since VSCR only contains 32 bits saved in the least
4052b0a576dSMichael Neuling 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
4062b0a576dSMichael Neuling 	 * most significant bits of that same vector. --BenH
4072b0a576dSMichael Neuling 	 */
408ef75e731SChristophe Leroy 	unsafe_put_user(current->thread.ckvrsave,
409ef75e731SChristophe Leroy 			(u32 __user *)&frame->mc_vregs[32], failed);
410ef75e731SChristophe Leroy 	if (msr & MSR_VEC)
411ef75e731SChristophe Leroy 		unsafe_put_user(current->thread.vrsave,
412ef75e731SChristophe Leroy 				(u32 __user *)&tm_frame->mc_vregs[32], failed);
413ef75e731SChristophe Leroy 	else
414ef75e731SChristophe Leroy 		unsafe_put_user(current->thread.ckvrsave,
415ef75e731SChristophe Leroy 				(u32 __user *)&tm_frame->mc_vregs[32], failed);
4162b0a576dSMichael Neuling 
417ef75e731SChristophe Leroy 	unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
418ef75e731SChristophe Leroy 	if (msr & MSR_FP)
419ef75e731SChristophe Leroy 		unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed);
420ef75e731SChristophe Leroy 	else
421ef75e731SChristophe Leroy 		unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
4222b0a576dSMichael Neuling 
4232b0a576dSMichael Neuling 	/*
4242b0a576dSMichael Neuling 	 * Copy VSR 0-31 upper half from thread_struct to local
4252b0a576dSMichael Neuling 	 * buffer, then write that to userspace.  Also set MSR_VSX in
4262b0a576dSMichael Neuling 	 * the saved MSR value to indicate that frame->mc_vregs
4272b0a576dSMichael Neuling 	 * contains valid data
4282b0a576dSMichael Neuling 	 */
4292b0a576dSMichael Neuling 	if (current->thread.used_vsr) {
430ef75e731SChristophe Leroy 		unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed);
431ef75e731SChristophe Leroy 		if (msr & MSR_VSX)
432ef75e731SChristophe Leroy 			unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed);
433ef75e731SChristophe Leroy 		else
434ef75e731SChristophe Leroy 			unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed);
4352b0a576dSMichael Neuling 
4362b0a576dSMichael Neuling 		msr |= MSR_VSX;
4372b0a576dSMichael Neuling 	}
4382b0a576dSMichael Neuling 
439ef75e731SChristophe Leroy 	unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
4402b0a576dSMichael Neuling 
4412b0a576dSMichael Neuling 	return 0;
442ef75e731SChristophe Leroy 
443ef75e731SChristophe Leroy failed:
444ef75e731SChristophe Leroy 	return 1;
4452b0a576dSMichael Neuling }
446f1cf4f93SChristophe Leroy #else
prepare_save_tm_user_regs(void)447968c4fccSChristophe Leroy static void prepare_save_tm_user_regs(void) { }
448968c4fccSChristophe Leroy 
449*a03b1a0bSChristophe Leroy static __always_inline int
save_tm_user_regs_unsafe(struct pt_regs * regs,struct mcontext __user * frame,struct mcontext __user * tm_frame,unsigned long msr)450*a03b1a0bSChristophe Leroy save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
451f1cf4f93SChristophe Leroy 			 struct mcontext __user *tm_frame, unsigned long msr)
452f1cf4f93SChristophe Leroy {
453f1cf4f93SChristophe Leroy 	return 0;
454f1cf4f93SChristophe Leroy }
4552b0a576dSMichael Neuling #endif
4562b0a576dSMichael Neuling 
457ef75e731SChristophe Leroy #define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \
458ef75e731SChristophe Leroy 	if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr))	\
459ef75e731SChristophe Leroy 		goto label;						\
460ef75e731SChristophe Leroy } while (0)
461ef75e731SChristophe Leroy 
46281e7009eSStephen Rothwell /*
46381e7009eSStephen Rothwell  * Restore the current user register values from the user stack,
46481e7009eSStephen Rothwell  * (except for MSR).
46581e7009eSStephen Rothwell  */
restore_user_regs(struct pt_regs * regs,struct mcontext __user * sr,int sig)46681e7009eSStephen Rothwell static long restore_user_regs(struct pt_regs *regs,
46781e7009eSStephen Rothwell 			      struct mcontext __user *sr, int sig)
46881e7009eSStephen Rothwell {
46981e7009eSStephen Rothwell 	unsigned int save_r2 = 0;
47081e7009eSStephen Rothwell 	unsigned long msr;
471c6e6771bSMichael Neuling #ifdef CONFIG_VSX
472c6e6771bSMichael Neuling 	int i;
473c6e6771bSMichael Neuling #endif
47481e7009eSStephen Rothwell 
475627b72beSChristophe Leroy 	if (!user_read_access_begin(sr, sizeof(*sr)))
476362471b3SChristophe Leroy 		return 1;
47781e7009eSStephen Rothwell 	/*
47881e7009eSStephen Rothwell 	 * restore general registers but not including MSR or SOFTE. Also
47981e7009eSStephen Rothwell 	 * take care of keeping r2 (TLS) intact if not a signal
48081e7009eSStephen Rothwell 	 */
48181e7009eSStephen Rothwell 	if (!sig)
48281e7009eSStephen Rothwell 		save_r2 = (unsigned int)regs->gpr[2];
483627b72beSChristophe Leroy 	unsafe_restore_general_regs(regs, sr, failed);
4844e0e45b0SNicholas Piggin 	set_trap_norestart(regs);
485627b72beSChristophe Leroy 	unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
48681e7009eSStephen Rothwell 	if (!sig)
48781e7009eSStephen Rothwell 		regs->gpr[2] = (unsigned long) save_r2;
48881e7009eSStephen Rothwell 
489fab5db97SPaul Mackerras 	/* if doing signal return, restore the previous little-endian mode */
490fab5db97SPaul Mackerras 	if (sig)
49159dc5bfcSNicholas Piggin 		regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
492fab5db97SPaul Mackerras 
49381e7009eSStephen Rothwell #ifdef CONFIG_ALTIVEC
494c6e6771bSMichael Neuling 	/*
495c6e6771bSMichael Neuling 	 * Force the process to reload the altivec registers from
496c6e6771bSMichael Neuling 	 * current->thread when it next does altivec instructions
497c6e6771bSMichael Neuling 	 */
49859dc5bfcSNicholas Piggin 	regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
499fab5db97SPaul Mackerras 	if (msr & MSR_VEC) {
50081e7009eSStephen Rothwell 		/* restore altivec registers from the stack */
501627b72beSChristophe Leroy 		unsafe_copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
502627b72beSChristophe Leroy 				      sizeof(sr->mc_vregs), failed);
503e1c0d66fSSimon Guo 		current->thread.used_vr = true;
50481e7009eSStephen Rothwell 	} else if (current->thread.used_vr)
505de79f7b9SPaul Mackerras 		memset(&current->thread.vr_state, 0,
506de79f7b9SPaul Mackerras 		       ELF_NVRREG * sizeof(vector128));
50781e7009eSStephen Rothwell 
50881e7009eSStephen Rothwell 	/* Always get VRSAVE back */
509627b72beSChristophe Leroy 	unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
510408a7e08SPaul Mackerras 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
511408a7e08SPaul Mackerras 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
51281e7009eSStephen Rothwell #endif /* CONFIG_ALTIVEC */
513627b72beSChristophe Leroy 	unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
51481e7009eSStephen Rothwell 
515c6e6771bSMichael Neuling #ifdef CONFIG_VSX
516ce48b210SMichael Neuling 	/*
517ce48b210SMichael Neuling 	 * Force the process to reload the VSX registers from
518ce48b210SMichael Neuling 	 * current->thread when it next does VSX instruction.
519ce48b210SMichael Neuling 	 */
52059dc5bfcSNicholas Piggin 	regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
521ce48b210SMichael Neuling 	if (msr & MSR_VSX) {
522ce48b210SMichael Neuling 		/*
523ce48b210SMichael Neuling 		 * Restore altivec registers from the stack to a local
524ce48b210SMichael Neuling 		 * buffer, then write this out to the thread_struct
525ce48b210SMichael Neuling 		 */
526627b72beSChristophe Leroy 		unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed);
527e1c0d66fSSimon Guo 		current->thread.used_vsr = true;
528ce48b210SMichael Neuling 	} else if (current->thread.used_vsr)
529ce48b210SMichael Neuling 		for (i = 0; i < 32 ; i++)
530de79f7b9SPaul Mackerras 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
531c6e6771bSMichael Neuling #endif /* CONFIG_VSX */
532c6e6771bSMichael Neuling 	/*
533c6e6771bSMichael Neuling 	 * force the process to reload the FP registers from
534c6e6771bSMichael Neuling 	 * current->thread when it next does FP instructions
535c6e6771bSMichael Neuling 	 */
53659dc5bfcSNicholas Piggin 	regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
537c6e6771bSMichael Neuling 
53881e7009eSStephen Rothwell #ifdef CONFIG_SPE
53962ea67e3SKees Cook 	/*
54062ea67e3SKees Cook 	 * Force the process to reload the spe registers from
54162ea67e3SKees Cook 	 * current->thread when it next does spe instructions.
54262ea67e3SKees Cook 	 * Since this is user ABI, we must enforce the sizing.
54362ea67e3SKees Cook 	 */
54462ea67e3SKees Cook 	BUILD_BUG_ON(sizeof(current->thread.spe) != ELF_NEVRREG * sizeof(u32));
54559dc5bfcSNicholas Piggin 	regs_set_return_msr(regs, regs->msr & ~MSR_SPE);
546fab5db97SPaul Mackerras 	if (msr & MSR_SPE) {
54781e7009eSStephen Rothwell 		/* restore spe registers from the stack */
54862ea67e3SKees Cook 		unsafe_copy_from_user(&current->thread.spe, &sr->mc_vregs,
54962ea67e3SKees Cook 				      sizeof(current->thread.spe), failed);
550e1c0d66fSSimon Guo 		current->thread.used_spe = true;
55181e7009eSStephen Rothwell 	} else if (current->thread.used_spe)
55262ea67e3SKees Cook 		memset(&current->thread.spe, 0, sizeof(current->thread.spe));
55381e7009eSStephen Rothwell 
55481e7009eSStephen Rothwell 	/* Always get SPEFSCR back */
555627b72beSChristophe Leroy 	unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
55681e7009eSStephen Rothwell #endif /* CONFIG_SPE */
55781e7009eSStephen Rothwell 
558627b72beSChristophe Leroy 	user_read_access_end();
55981e7009eSStephen Rothwell 	return 0;
560627b72beSChristophe Leroy 
561627b72beSChristophe Leroy failed:
562627b72beSChristophe Leroy 	user_read_access_end();
563627b72beSChristophe Leroy 	return 1;
56481e7009eSStephen Rothwell }
56581e7009eSStephen Rothwell 
5662b0a576dSMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
5672b0a576dSMichael Neuling /*
5682b0a576dSMichael Neuling  * Restore the current user register values from the user stack, except for
5692b0a576dSMichael Neuling  * MSR, and recheckpoint the original checkpointed register state for processes
5702b0a576dSMichael Neuling  * in transactions.
5712b0a576dSMichael Neuling  */
restore_tm_user_regs(struct pt_regs * regs,struct mcontext __user * sr,struct mcontext __user * tm_sr)5722b0a576dSMichael Neuling static long restore_tm_user_regs(struct pt_regs *regs,
5732b0a576dSMichael Neuling 				 struct mcontext __user *sr,
5742b0a576dSMichael Neuling 				 struct mcontext __user *tm_sr)
5752b0a576dSMichael Neuling {
5762c27a18fSMichael Neuling 	unsigned long msr, msr_hi;
5772b0a576dSMichael Neuling 	int i;
5782b0a576dSMichael Neuling 
57992fb8690SMichael Neuling 	if (tm_suspend_disabled)
58092fb8690SMichael Neuling 		return 1;
5812b0a576dSMichael Neuling 	/*
5822b0a576dSMichael Neuling 	 * restore general registers but not including MSR or SOFTE. Also
5832b0a576dSMichael Neuling 	 * take care of keeping r2 (TLS) intact if not a signal.
5842b0a576dSMichael Neuling 	 * See comment in signal_64.c:restore_tm_sigcontexts();
5852b0a576dSMichael Neuling 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
5862b0a576dSMichael Neuling 	 * were set by the signal delivery.
5872b0a576dSMichael Neuling 	 */
588627b72beSChristophe Leroy 	if (!user_read_access_begin(sr, sizeof(*sr)))
5892b0a576dSMichael Neuling 		return 1;
5902b0a576dSMichael Neuling 
591627b72beSChristophe Leroy 	unsafe_restore_general_regs(&current->thread.ckpt_regs, sr, failed);
592627b72beSChristophe Leroy 	unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
593627b72beSChristophe Leroy 	unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
594627b72beSChristophe Leroy 
5952b0a576dSMichael Neuling 	/* Restore the previous little-endian mode */
59659dc5bfcSNicholas Piggin 	regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
5972b0a576dSMichael Neuling 
59859dc5bfcSNicholas Piggin 	regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
5992b0a576dSMichael Neuling 	if (msr & MSR_VEC) {
6002b0a576dSMichael Neuling 		/* restore altivec registers from the stack */
601627b72beSChristophe Leroy 		unsafe_copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
602627b72beSChristophe Leroy 				      sizeof(sr->mc_vregs), failed);
603e1c0d66fSSimon Guo 		current->thread.used_vr = true;
6042b0a576dSMichael Neuling 	} else if (current->thread.used_vr) {
605de79f7b9SPaul Mackerras 		memset(&current->thread.vr_state, 0,
606de79f7b9SPaul Mackerras 		       ELF_NVRREG * sizeof(vector128));
607000ec280SCyril Bur 		memset(&current->thread.ckvr_state, 0,
6082b0a576dSMichael Neuling 		       ELF_NVRREG * sizeof(vector128));
6092b0a576dSMichael Neuling 	}
6102b0a576dSMichael Neuling 
6112b0a576dSMichael Neuling 	/* Always get VRSAVE back */
612627b72beSChristophe Leroy 	unsafe_get_user(current->thread.ckvrsave,
613627b72beSChristophe Leroy 			(u32 __user *)&sr->mc_vregs[32], failed);
614408a7e08SPaul Mackerras 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
615000ec280SCyril Bur 		mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
6162b0a576dSMichael Neuling 
61759dc5bfcSNicholas Piggin 	regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
6182b0a576dSMichael Neuling 
619627b72beSChristophe Leroy 	unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
6202b0a576dSMichael Neuling 
62159dc5bfcSNicholas Piggin 	regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
6222b0a576dSMichael Neuling 	if (msr & MSR_VSX) {
6232b0a576dSMichael Neuling 		/*
6242b0a576dSMichael Neuling 		 * Restore altivec registers from the stack to a local
6252b0a576dSMichael Neuling 		 * buffer, then write this out to the thread_struct
6262b0a576dSMichael Neuling 		 */
627627b72beSChristophe Leroy 		unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed);
628e1c0d66fSSimon Guo 		current->thread.used_vsr = true;
6292b0a576dSMichael Neuling 	} else if (current->thread.used_vsr)
6302b0a576dSMichael Neuling 		for (i = 0; i < 32 ; i++) {
631de79f7b9SPaul Mackerras 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
632000ec280SCyril Bur 			current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
6332b0a576dSMichael Neuling 		}
6342b0a576dSMichael Neuling 
635627b72beSChristophe Leroy 	user_read_access_end();
636627b72beSChristophe Leroy 
637627b72beSChristophe Leroy 	if (!user_read_access_begin(tm_sr, sizeof(*tm_sr)))
638d2b9d2a5SMichael Neuling 		return 1;
639036fc2cbSChristophe Leroy 
640627b72beSChristophe Leroy 	unsafe_restore_general_regs(regs, tm_sr, failed);
641627b72beSChristophe Leroy 
642036fc2cbSChristophe Leroy 	/* restore altivec registers from the stack */
643036fc2cbSChristophe Leroy 	if (msr & MSR_VEC)
644627b72beSChristophe Leroy 		unsafe_copy_from_user(&current->thread.vr_state, &tm_sr->mc_vregs,
645627b72beSChristophe Leroy 				      sizeof(sr->mc_vregs), failed);
646036fc2cbSChristophe Leroy 
647036fc2cbSChristophe Leroy 	/* Always get VRSAVE back */
648627b72beSChristophe Leroy 	unsafe_get_user(current->thread.vrsave,
649627b72beSChristophe Leroy 			(u32 __user *)&tm_sr->mc_vregs[32], failed);
650036fc2cbSChristophe Leroy 
651627b72beSChristophe Leroy 	unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
652036fc2cbSChristophe Leroy 
653036fc2cbSChristophe Leroy 	if (msr & MSR_VSX) {
654036fc2cbSChristophe Leroy 		/*
655036fc2cbSChristophe Leroy 		 * Restore altivec registers from the stack to a local
656036fc2cbSChristophe Leroy 		 * buffer, then write this out to the thread_struct
657036fc2cbSChristophe Leroy 		 */
658627b72beSChristophe Leroy 		unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
659036fc2cbSChristophe Leroy 		current->thread.used_vsr = true;
660036fc2cbSChristophe Leroy 	}
661036fc2cbSChristophe Leroy 
662d2b9d2a5SMichael Neuling 	/* Get the top half of the MSR from the user context */
663627b72beSChristophe Leroy 	unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
664d2b9d2a5SMichael Neuling 	msr_hi <<= 32;
665627b72beSChristophe Leroy 
666627b72beSChristophe Leroy 	user_read_access_end();
667627b72beSChristophe Leroy 
668d2b9d2a5SMichael Neuling 	/* If TM bits are set to the reserved value, it's an invalid context */
669d2b9d2a5SMichael Neuling 	if (MSR_TM_RESV(msr_hi))
670d2b9d2a5SMichael Neuling 		return 1;
671e1c3743eSBreno Leitao 
672e1c3743eSBreno Leitao 	/*
673e1c3743eSBreno Leitao 	 * Disabling preemption, since it is unsafe to be preempted
674e1c3743eSBreno Leitao 	 * with MSR[TS] set without recheckpointing.
675e1c3743eSBreno Leitao 	 */
676e1c3743eSBreno Leitao 	preempt_disable();
677e1c3743eSBreno Leitao 
678e1c3743eSBreno Leitao 	/*
679e1c3743eSBreno Leitao 	 * CAUTION:
680e1c3743eSBreno Leitao 	 * After regs->MSR[TS] being updated, make sure that get_user(),
681e1c3743eSBreno Leitao 	 * put_user() or similar functions are *not* called. These
682e1c3743eSBreno Leitao 	 * functions can generate page faults which will cause the process
683e1c3743eSBreno Leitao 	 * to be de-scheduled with MSR[TS] set but without calling
684e1c3743eSBreno Leitao 	 * tm_recheckpoint(). This can cause a bug.
685e1c3743eSBreno Leitao 	 *
686e1c3743eSBreno Leitao 	 * Pull in the MSR TM bits from the user context
687e1c3743eSBreno Leitao 	 */
68859dc5bfcSNicholas Piggin 	regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK));
6892b0a576dSMichael Neuling 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
6902b0a576dSMichael Neuling 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
6912b0a576dSMichael Neuling 	 * transactional versions should be loaded.
6922b0a576dSMichael Neuling 	 */
6932b0a576dSMichael Neuling 	tm_enable();
694e6b8fd02SMichael Neuling 	/* Make sure the transaction is marked as failed */
695e6b8fd02SMichael Neuling 	current->thread.tm_texasr |= TEXASR_FS;
6962b0a576dSMichael Neuling 	/* This loads the checkpointed FP/VEC state, if used */
697eb5c3f1cSCyril Bur 	tm_recheckpoint(&current->thread);
6982b0a576dSMichael Neuling 
6992b0a576dSMichael Neuling 	/* This loads the speculative FP/VEC state, if used */
700dc310669SCyril Bur 	msr_check_and_set(msr & (MSR_FP | MSR_VEC));
7012b0a576dSMichael Neuling 	if (msr & MSR_FP) {
702dc310669SCyril Bur 		load_fp_state(&current->thread.fp_state);
70359dc5bfcSNicholas Piggin 		regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode));
7042b0a576dSMichael Neuling 	}
7052b0a576dSMichael Neuling 	if (msr & MSR_VEC) {
706dc310669SCyril Bur 		load_vr_state(&current->thread.vr_state);
70759dc5bfcSNicholas Piggin 		regs_set_return_msr(regs, regs->msr | MSR_VEC);
7082b0a576dSMichael Neuling 	}
7092b0a576dSMichael Neuling 
710e1c3743eSBreno Leitao 	preempt_enable();
711e1c3743eSBreno Leitao 
7122b0a576dSMichael Neuling 	return 0;
713627b72beSChristophe Leroy 
714627b72beSChristophe Leroy failed:
715627b72beSChristophe Leroy 	user_read_access_end();
716627b72beSChristophe Leroy 	return 1;
7172b0a576dSMichael Neuling }
718ca9e1605SChristophe Leroy #else
restore_tm_user_regs(struct pt_regs * regs,struct mcontext __user * sr,struct mcontext __user * tm_sr)719ca9e1605SChristophe Leroy static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr,
720ca9e1605SChristophe Leroy 				 struct mcontext __user *tm_sr)
721ca9e1605SChristophe Leroy {
722ca9e1605SChristophe Leroy 	return 0;
7232b0a576dSMichael Neuling }
7242b0a576dSMichael Neuling #endif
7252b0a576dSMichael Neuling 
72681e7009eSStephen Rothwell #ifdef CONFIG_PPC64
72781e7009eSStephen Rothwell 
72881e7009eSStephen Rothwell #define copy_siginfo_to_user	copy_siginfo_to_user32
72981e7009eSStephen Rothwell 
73081e7009eSStephen Rothwell #endif /* CONFIG_PPC64 */
73181e7009eSStephen Rothwell 
73281e7009eSStephen Rothwell /*
73381e7009eSStephen Rothwell  * Set up a signal frame for a "real-time" signal handler
73481e7009eSStephen Rothwell  * (one which gets siginfo).
73581e7009eSStephen Rothwell  */
handle_rt_signal32(struct ksignal * ksig,sigset_t * oldset,struct task_struct * tsk)736129b69dfSRichard Weinberger int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
737d1199431SCyril Bur 		       struct task_struct *tsk)
73881e7009eSStephen Rothwell {
7398e91cf85SChristophe Leroy 	struct rt_sigframe __user *frame;
7408e91cf85SChristophe Leroy 	struct mcontext __user *mctx;
7418e91cf85SChristophe Leroy 	struct mcontext __user *tm_mctx = NULL;
742a3f61dc0SBenjamin Herrenschmidt 	unsigned long newsp = 0;
7432b0a576dSMichael Neuling 	unsigned long tramp;
744d1199431SCyril Bur 	struct pt_regs *regs = tsk->thread.regs;
7452464cc4cSGustavo Luiz Duarte 	/* Save the thread's msr before get_tm_stackpointer() changes it */
7462464cc4cSGustavo Luiz Duarte 	unsigned long msr = regs->msr;
747d1199431SCyril Bur 
74881e7009eSStephen Rothwell 	/* Set up Signal Frame */
7498e91cf85SChristophe Leroy 	frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
75091b8ecd4SChristophe Leroy 	mctx = &frame->uc.uc_mcontext;
75191b8ecd4SChristophe Leroy #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
75291b8ecd4SChristophe Leroy 	tm_mctx = &frame->uc_transact.uc_mcontext;
75391b8ecd4SChristophe Leroy #endif
754ef75e731SChristophe Leroy 	if (MSR_TM_ACTIVE(msr))
755ef75e731SChristophe Leroy 		prepare_save_tm_user_regs();
756ef75e731SChristophe Leroy 	else
757ef75e731SChristophe Leroy 		prepare_save_user_regs(1);
758ef75e731SChristophe Leroy 
759acca5721SChristophe Leroy 	if (!user_access_begin(frame, sizeof(*frame)))
76081e7009eSStephen Rothwell 		goto badframe;
76181e7009eSStephen Rothwell 
76281e7009eSStephen Rothwell 	/* Put the siginfo & fill in most of the ucontext */
7639504db3eSChristophe Leroy 	unsafe_put_user(0, &frame->uc.uc_flags, failed);
7649504db3eSChristophe Leroy #ifdef CONFIG_PPC64
7659504db3eSChristophe Leroy 	unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
7669504db3eSChristophe Leroy #else
7679504db3eSChristophe Leroy 	unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
7689504db3eSChristophe Leroy #endif
7699504db3eSChristophe Leroy 	unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed);
7709504db3eSChristophe Leroy 
7719504db3eSChristophe Leroy 	if (MSR_TM_ACTIVE(msr)) {
7729504db3eSChristophe Leroy #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
7739504db3eSChristophe Leroy 		unsafe_put_user((unsigned long)&frame->uc_transact,
7749504db3eSChristophe Leroy 				&frame->uc.uc_link, failed);
7759504db3eSChristophe Leroy 		unsafe_put_user((unsigned long)tm_mctx,
7769504db3eSChristophe Leroy 				&frame->uc_transact.uc_regs, failed);
7779504db3eSChristophe Leroy #endif
778ef75e731SChristophe Leroy 		unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
7799504db3eSChristophe Leroy 	} else {
7809504db3eSChristophe Leroy 		unsafe_put_user(0, &frame->uc.uc_link, failed);
781ef75e731SChristophe Leroy 		unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
7829504db3eSChristophe Leroy 	}
78381e7009eSStephen Rothwell 
78481e7009eSStephen Rothwell 	/* Save user registers on the stack */
78591bf6955SChristophe Leroy 	if (tsk->mm->context.vdso) {
78691bf6955SChristophe Leroy 		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
787a7f290daSBenjamin Herrenschmidt 	} else {
7888d33001dSChristophe Leroy 		tramp = (unsigned long)mctx->mc_pad;
7891c9debbcSChristophe Leroy 		unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed);
7901c9debbcSChristophe Leroy 		unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
791acca5721SChristophe Leroy 		asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
79281e7009eSStephen Rothwell 	}
793de781ebdSChristophe Leroy 	unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
794de781ebdSChristophe Leroy 
795acca5721SChristophe Leroy 	user_access_end();
7969504db3eSChristophe Leroy 
7979504db3eSChristophe Leroy 	if (copy_siginfo_to_user(&frame->info, &ksig->info))
7989504db3eSChristophe Leroy 		goto badframe;
7999504db3eSChristophe Leroy 
8002b0a576dSMichael Neuling 	regs->link = tramp;
8012b0a576dSMichael Neuling 
802b6254cedSChristophe Leroy #ifdef CONFIG_PPC_FPU_REGS
803d1199431SCyril Bur 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
804b6254cedSChristophe Leroy #endif
805cc657f53SPaul Mackerras 
806a3f61dc0SBenjamin Herrenschmidt 	/* create a stack frame for the caller of the handler */
8078e91cf85SChristophe Leroy 	newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16);
808e2b55306SPaul Mackerras 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
80981e7009eSStephen Rothwell 		goto badframe;
810a3f61dc0SBenjamin Herrenschmidt 
811a3f61dc0SBenjamin Herrenschmidt 	/* Fill registers for signal handler */
81281e7009eSStephen Rothwell 	regs->gpr[1] = newsp;
813129b69dfSRichard Weinberger 	regs->gpr[3] = ksig->sig;
8148e91cf85SChristophe Leroy 	regs->gpr[4] = (unsigned long)&frame->info;
8158e91cf85SChristophe Leroy 	regs->gpr[5] = (unsigned long)&frame->uc;
8168e91cf85SChristophe Leroy 	regs->gpr[6] = (unsigned long)frame;
81759dc5bfcSNicholas Piggin 	regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
818e871c6bbSAnton Blanchard 	/* enter the signal handler in native-endian mode */
81959dc5bfcSNicholas Piggin 	regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
82059dc5bfcSNicholas Piggin 
821129b69dfSRichard Weinberger 	return 0;
82281e7009eSStephen Rothwell 
8239504db3eSChristophe Leroy failed:
824acca5721SChristophe Leroy 	user_access_end();
8259504db3eSChristophe Leroy 
82681e7009eSStephen Rothwell badframe:
8278e91cf85SChristophe Leroy 	signal_fault(tsk, regs, "handle_rt_signal32", frame);
828d0c3d534SOlof Johansson 
829129b69dfSRichard Weinberger 	return 1;
83081e7009eSStephen Rothwell }
83181e7009eSStephen Rothwell 
8323eea688bSChristophe Leroy /*
8333eea688bSChristophe Leroy  * OK, we're invoking a handler
8343eea688bSChristophe Leroy  */
handle_signal32(struct ksignal * ksig,sigset_t * oldset,struct task_struct * tsk)8353eea688bSChristophe Leroy int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
8363eea688bSChristophe Leroy 		struct task_struct *tsk)
8373eea688bSChristophe Leroy {
8383eea688bSChristophe Leroy 	struct sigcontext __user *sc;
8393eea688bSChristophe Leroy 	struct sigframe __user *frame;
84091b8ecd4SChristophe Leroy 	struct mcontext __user *mctx;
8413eea688bSChristophe Leroy 	struct mcontext __user *tm_mctx = NULL;
8423eea688bSChristophe Leroy 	unsigned long newsp = 0;
8433eea688bSChristophe Leroy 	unsigned long tramp;
8443eea688bSChristophe Leroy 	struct pt_regs *regs = tsk->thread.regs;
8453eea688bSChristophe Leroy 	/* Save the thread's msr before get_tm_stackpointer() changes it */
8463eea688bSChristophe Leroy 	unsigned long msr = regs->msr;
8473eea688bSChristophe Leroy 
8483eea688bSChristophe Leroy 	/* Set up Signal Frame */
8493eea688bSChristophe Leroy 	frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
85091b8ecd4SChristophe Leroy 	mctx = &frame->mctx;
85191b8ecd4SChristophe Leroy #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
85291b8ecd4SChristophe Leroy 	tm_mctx = &frame->mctx_transact;
85391b8ecd4SChristophe Leroy #endif
854ef75e731SChristophe Leroy 	if (MSR_TM_ACTIVE(msr))
855ef75e731SChristophe Leroy 		prepare_save_tm_user_regs();
856ef75e731SChristophe Leroy 	else
857ef75e731SChristophe Leroy 		prepare_save_user_regs(1);
858ef75e731SChristophe Leroy 
859acca5721SChristophe Leroy 	if (!user_access_begin(frame, sizeof(*frame)))
8603eea688bSChristophe Leroy 		goto badframe;
8613eea688bSChristophe Leroy 	sc = (struct sigcontext __user *) &frame->sctx;
8623eea688bSChristophe Leroy 
8633eea688bSChristophe Leroy #if _NSIG != 64
8643eea688bSChristophe Leroy #error "Please adjust handle_signal()"
8653eea688bSChristophe Leroy #endif
866ad65f490SChristophe Leroy 	unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed);
867ad65f490SChristophe Leroy 	unsafe_put_user(oldset->sig[0], &sc->oldmask, failed);
8683eea688bSChristophe Leroy #ifdef CONFIG_PPC64
869ad65f490SChristophe Leroy 	unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed);
8703eea688bSChristophe Leroy #else
871ad65f490SChristophe Leroy 	unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed);
8723eea688bSChristophe Leroy #endif
873ad65f490SChristophe Leroy 	unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed);
874ad65f490SChristophe Leroy 	unsafe_put_user(ksig->sig, &sc->signal, failed);
8753eea688bSChristophe Leroy 
876ef75e731SChristophe Leroy 	if (MSR_TM_ACTIVE(msr))
877ef75e731SChristophe Leroy 		unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
878ef75e731SChristophe Leroy 	else
879ef75e731SChristophe Leroy 		unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
880ef75e731SChristophe Leroy 
88191bf6955SChristophe Leroy 	if (tsk->mm->context.vdso) {
88291bf6955SChristophe Leroy 		tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
8833eea688bSChristophe Leroy 	} else {
8848d33001dSChristophe Leroy 		tramp = (unsigned long)mctx->mc_pad;
8851c9debbcSChristophe Leroy 		unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed);
8861c9debbcSChristophe Leroy 		unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
887acca5721SChristophe Leroy 		asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
8883eea688bSChristophe Leroy 	}
889acca5721SChristophe Leroy 	user_access_end();
8903eea688bSChristophe Leroy 
8913eea688bSChristophe Leroy 	regs->link = tramp;
8923eea688bSChristophe Leroy 
8933eea688bSChristophe Leroy #ifdef CONFIG_PPC_FPU_REGS
8943eea688bSChristophe Leroy 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
8953eea688bSChristophe Leroy #endif
8963eea688bSChristophe Leroy 
8973eea688bSChristophe Leroy 	/* create a stack frame for the caller of the handler */
8983eea688bSChristophe Leroy 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
8993eea688bSChristophe Leroy 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
9003eea688bSChristophe Leroy 		goto badframe;
9013eea688bSChristophe Leroy 
9023eea688bSChristophe Leroy 	regs->gpr[1] = newsp;
9033eea688bSChristophe Leroy 	regs->gpr[3] = ksig->sig;
9043eea688bSChristophe Leroy 	regs->gpr[4] = (unsigned long) sc;
90559dc5bfcSNicholas Piggin 	regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
906caccf2acSJoseph J Allen 	/* enter the signal handler in native-endian mode */
90759dc5bfcSNicholas Piggin 	regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
90859dc5bfcSNicholas Piggin 
9093eea688bSChristophe Leroy 	return 0;
9103eea688bSChristophe Leroy 
911ad65f490SChristophe Leroy failed:
912acca5721SChristophe Leroy 	user_access_end();
913ad65f490SChristophe Leroy 
9143eea688bSChristophe Leroy badframe:
9153eea688bSChristophe Leroy 	signal_fault(tsk, regs, "handle_signal32", frame);
9163eea688bSChristophe Leroy 
9173eea688bSChristophe Leroy 	return 1;
9183eea688bSChristophe Leroy }
9193eea688bSChristophe Leroy 
do_setcontext(struct ucontext __user * ucp,struct pt_regs * regs,int sig)92081e7009eSStephen Rothwell static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
92181e7009eSStephen Rothwell {
92281e7009eSStephen Rothwell 	sigset_t set;
92381e7009eSStephen Rothwell 	struct mcontext __user *mcp;
92481e7009eSStephen Rothwell 
92552564262SChristophe Leroy 	if (!user_read_access_begin(ucp, sizeof(*ucp)))
92681e7009eSStephen Rothwell 		return -EFAULT;
927887f3cebSChristophe Leroy 
928887f3cebSChristophe Leroy 	unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
92981e7009eSStephen Rothwell #ifdef CONFIG_PPC64
93081e7009eSStephen Rothwell 	{
93181e7009eSStephen Rothwell 		u32 cmcp;
93281e7009eSStephen Rothwell 
933887f3cebSChristophe Leroy 		unsafe_get_user(cmcp, &ucp->uc_regs, failed);
93481e7009eSStephen Rothwell 		mcp = (struct mcontext __user *)(u64)cmcp;
93581e7009eSStephen Rothwell 	}
93681e7009eSStephen Rothwell #else
937887f3cebSChristophe Leroy 	unsafe_get_user(mcp, &ucp->uc_regs, failed);
93881e7009eSStephen Rothwell #endif
939887f3cebSChristophe Leroy 	user_read_access_end();
940887f3cebSChristophe Leroy 
94117440f17SAl Viro 	set_current_blocked(&set);
94281e7009eSStephen Rothwell 	if (restore_user_regs(regs, mcp, sig))
94381e7009eSStephen Rothwell 		return -EFAULT;
94481e7009eSStephen Rothwell 
94581e7009eSStephen Rothwell 	return 0;
946887f3cebSChristophe Leroy 
947887f3cebSChristophe Leroy failed:
948887f3cebSChristophe Leroy 	user_read_access_end();
949887f3cebSChristophe Leroy 	return -EFAULT;
95081e7009eSStephen Rothwell }
95181e7009eSStephen Rothwell 
9522b0a576dSMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
do_setcontext_tm(struct ucontext __user * ucp,struct ucontext __user * tm_ucp,struct pt_regs * regs)9532b0a576dSMichael Neuling static int do_setcontext_tm(struct ucontext __user *ucp,
9542b0a576dSMichael Neuling 			    struct ucontext __user *tm_ucp,
9552b0a576dSMichael Neuling 			    struct pt_regs *regs)
9562b0a576dSMichael Neuling {
9572b0a576dSMichael Neuling 	sigset_t set;
9582b0a576dSMichael Neuling 	struct mcontext __user *mcp;
9592b0a576dSMichael Neuling 	struct mcontext __user *tm_mcp;
9602b0a576dSMichael Neuling 	u32 cmcp;
9612b0a576dSMichael Neuling 	u32 tm_cmcp;
9622b0a576dSMichael Neuling 
96352564262SChristophe Leroy 	if (!user_read_access_begin(ucp, sizeof(*ucp)))
9642b0a576dSMichael Neuling 		return -EFAULT;
9652b0a576dSMichael Neuling 
966887f3cebSChristophe Leroy 	unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
967887f3cebSChristophe Leroy 	unsafe_get_user(cmcp, &ucp->uc_regs, failed);
968887f3cebSChristophe Leroy 
969887f3cebSChristophe Leroy 	user_read_access_end();
970887f3cebSChristophe Leroy 
971887f3cebSChristophe Leroy 	if (__get_user(tm_cmcp, &tm_ucp->uc_regs))
9722b0a576dSMichael Neuling 		return -EFAULT;
9732b0a576dSMichael Neuling 	mcp = (struct mcontext __user *)(u64)cmcp;
9742b0a576dSMichael Neuling 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
9752b0a576dSMichael Neuling 	/* no need to check access_ok(mcp), since mcp < 4GB */
9762b0a576dSMichael Neuling 
9772b0a576dSMichael Neuling 	set_current_blocked(&set);
9782b0a576dSMichael Neuling 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
9792b0a576dSMichael Neuling 		return -EFAULT;
9802b0a576dSMichael Neuling 
9812b0a576dSMichael Neuling 	return 0;
982887f3cebSChristophe Leroy 
983887f3cebSChristophe Leroy failed:
984887f3cebSChristophe Leroy 	user_read_access_end();
985887f3cebSChristophe Leroy 	return -EFAULT;
9862b0a576dSMichael Neuling }
9872b0a576dSMichael Neuling #endif
9882b0a576dSMichael Neuling 
989f3675644SAl Viro #ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE3(swapcontext,struct ucontext __user *,old_ctx,struct ucontext __user *,new_ctx,int,ctx_size)990f3675644SAl Viro COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
991f3675644SAl Viro 		       struct ucontext __user *, new_ctx, int, ctx_size)
992f3675644SAl Viro #else
993f3675644SAl Viro SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
994f3675644SAl Viro 		       struct ucontext __user *, new_ctx, long, ctx_size)
995f3675644SAl Viro #endif
99681e7009eSStephen Rothwell {
997f3675644SAl Viro 	struct pt_regs *regs = current_pt_regs();
99816c29d18SMichael Neuling 	int ctx_has_vsx_region = 0;
99981e7009eSStephen Rothwell 
1000c1cb299eSMichael Neuling #ifdef CONFIG_PPC64
1001c1cb299eSMichael Neuling 	unsigned long new_msr = 0;
1002c1cb299eSMichael Neuling 
100377eb50aeSAndreas Schwab 	if (new_ctx) {
100477eb50aeSAndreas Schwab 		struct mcontext __user *mcp;
100577eb50aeSAndreas Schwab 		u32 cmcp;
100677eb50aeSAndreas Schwab 
100777eb50aeSAndreas Schwab 		/*
100877eb50aeSAndreas Schwab 		 * Get pointer to the real mcontext.  No need for
100977eb50aeSAndreas Schwab 		 * access_ok since we are dealing with compat
101077eb50aeSAndreas Schwab 		 * pointers.
101177eb50aeSAndreas Schwab 		 */
101277eb50aeSAndreas Schwab 		if (__get_user(cmcp, &new_ctx->uc_regs))
1013c1cb299eSMichael Neuling 			return -EFAULT;
101477eb50aeSAndreas Schwab 		mcp = (struct mcontext __user *)(u64)cmcp;
101577eb50aeSAndreas Schwab 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
101677eb50aeSAndreas Schwab 			return -EFAULT;
101777eb50aeSAndreas Schwab 	}
1018c1cb299eSMichael Neuling 	/*
1019c1cb299eSMichael Neuling 	 * Check that the context is not smaller than the original
1020c1cb299eSMichael Neuling 	 * size (with VMX but without VSX)
1021c1cb299eSMichael Neuling 	 */
1022c1cb299eSMichael Neuling 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1023c1cb299eSMichael Neuling 		return -EINVAL;
1024c1cb299eSMichael Neuling 	/*
1025c1cb299eSMichael Neuling 	 * If the new context state sets the MSR VSX bits but
1026c1cb299eSMichael Neuling 	 * it doesn't provide VSX state.
1027c1cb299eSMichael Neuling 	 */
1028c1cb299eSMichael Neuling 	if ((ctx_size < sizeof(struct ucontext)) &&
1029c1cb299eSMichael Neuling 	    (new_msr & MSR_VSX))
1030c1cb299eSMichael Neuling 		return -EINVAL;
103116c29d18SMichael Neuling 	/* Does the context have enough room to store VSX data? */
103216c29d18SMichael Neuling 	if (ctx_size >= sizeof(struct ucontext))
103316c29d18SMichael Neuling 		ctx_has_vsx_region = 1;
1034c1cb299eSMichael Neuling #else
103581e7009eSStephen Rothwell 	/* Context size is for future use. Right now, we only make sure
103681e7009eSStephen Rothwell 	 * we are passed something we understand
103781e7009eSStephen Rothwell 	 */
103881e7009eSStephen Rothwell 	if (ctx_size < sizeof(struct ucontext))
103981e7009eSStephen Rothwell 		return -EINVAL;
1040c1cb299eSMichael Neuling #endif
104181e7009eSStephen Rothwell 	if (old_ctx != NULL) {
10421c9bb1a0SPaul Mackerras 		struct mcontext __user *mctx;
10431c9bb1a0SPaul Mackerras 
10441c9bb1a0SPaul Mackerras 		/*
10451c9bb1a0SPaul Mackerras 		 * old_ctx might not be 16-byte aligned, in which
10461c9bb1a0SPaul Mackerras 		 * case old_ctx->uc_mcontext won't be either.
10471c9bb1a0SPaul Mackerras 		 * Because we have the old_ctx->uc_pad2 field
10481c9bb1a0SPaul Mackerras 		 * before old_ctx->uc_mcontext, we need to round down
10491c9bb1a0SPaul Mackerras 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
10501c9bb1a0SPaul Mackerras 		 */
10511c9bb1a0SPaul Mackerras 		mctx = (struct mcontext __user *)
10521c9bb1a0SPaul Mackerras 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1053968c4fccSChristophe Leroy 		prepare_save_user_regs(ctx_has_vsx_region);
105431147d7dSChristophe Leroy 		if (!user_write_access_begin(old_ctx, ctx_size))
105531147d7dSChristophe Leroy 			return -EFAULT;
1056ef75e731SChristophe Leroy 		unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed);
105731147d7dSChristophe Leroy 		unsafe_put_sigset_t(&old_ctx->uc_sigmask, &current->blocked, failed);
105831147d7dSChristophe Leroy 		unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed);
105931147d7dSChristophe Leroy 		user_write_access_end();
106081e7009eSStephen Rothwell 	}
106181e7009eSStephen Rothwell 	if (new_ctx == NULL)
106281e7009eSStephen Rothwell 		return 0;
106396d4f267SLinus Torvalds 	if (!access_ok(new_ctx, ctx_size) ||
1064bb523b40SAndreas Gruenbacher 	    fault_in_readable((char __user *)new_ctx, ctx_size))
106581e7009eSStephen Rothwell 		return -EFAULT;
106681e7009eSStephen Rothwell 
106781e7009eSStephen Rothwell 	/*
106881e7009eSStephen Rothwell 	 * If we get a fault copying the context into the kernel's
106981e7009eSStephen Rothwell 	 * image of the user's registers, we can't just return -EFAULT
107081e7009eSStephen Rothwell 	 * because the user's registers will be corrupted.  For instance
107181e7009eSStephen Rothwell 	 * the NIP value may have been updated but not some of the
107281e7009eSStephen Rothwell 	 * other registers.  Given that we have done the access_ok
107381e7009eSStephen Rothwell 	 * and successfully read the first and last bytes of the region
107481e7009eSStephen Rothwell 	 * above, this should only happen in an out-of-memory situation
107581e7009eSStephen Rothwell 	 * or if another thread unmaps the region containing the context.
107681e7009eSStephen Rothwell 	 * We kill the task with a SIGSEGV in this situation.
107781e7009eSStephen Rothwell 	 */
107883a1f27aSEric W. Biederman 	if (do_setcontext(new_ctx, regs, 0)) {
1079fcb116bcSEric W. Biederman 		force_exit_sig(SIGSEGV);
108083a1f27aSEric W. Biederman 		return -EFAULT;
108183a1f27aSEric W. Biederman 	}
1082401d1f02SDavid Woodhouse 
1083401d1f02SDavid Woodhouse 	set_thread_flag(TIF_RESTOREALL);
108481e7009eSStephen Rothwell 	return 0;
108531147d7dSChristophe Leroy 
108631147d7dSChristophe Leroy failed:
108731147d7dSChristophe Leroy 	user_write_access_end();
108831147d7dSChristophe Leroy 	return -EFAULT;
108981e7009eSStephen Rothwell }
109081e7009eSStephen Rothwell 
1091f3675644SAl Viro #ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE0(rt_sigreturn)1092f3675644SAl Viro COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1093f3675644SAl Viro #else
1094f3675644SAl Viro SYSCALL_DEFINE0(rt_sigreturn)
1095f3675644SAl Viro #endif
109681e7009eSStephen Rothwell {
109781e7009eSStephen Rothwell 	struct rt_sigframe __user *rt_sf;
1098f3675644SAl Viro 	struct pt_regs *regs = current_pt_regs();
10996f5b9f01SBreno Leitao 	int tm_restore = 0;
11002b0a576dSMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11012b0a576dSMichael Neuling 	struct ucontext __user *uc_transact;
11022b0a576dSMichael Neuling 	unsigned long msr_hi;
11032b0a576dSMichael Neuling 	unsigned long tmp;
11042b0a576dSMichael Neuling #endif
110581e7009eSStephen Rothwell 	/* Always make any pending restarted system calls return -EINTR */
1106f56141e3SAndy Lutomirski 	current->restart_block.fn = do_no_restart_syscall;
110781e7009eSStephen Rothwell 
110881e7009eSStephen Rothwell 	rt_sf = (struct rt_sigframe __user *)
110981e7009eSStephen Rothwell 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
111096d4f267SLinus Torvalds 	if (!access_ok(rt_sf, sizeof(*rt_sf)))
111181e7009eSStephen Rothwell 		goto bad;
111278a3e888SCyril Bur 
11132b0a576dSMichael Neuling #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
111478a3e888SCyril Bur 	/*
111578a3e888SCyril Bur 	 * If there is a transactional state then throw it away.
111678a3e888SCyril Bur 	 * The purpose of a sigreturn is to destroy all traces of the
111778a3e888SCyril Bur 	 * signal frame, this includes any transactional state created
111878a3e888SCyril Bur 	 * within in. We only check for suspended as we can never be
111978a3e888SCyril Bur 	 * active in the kernel, we are active, there is nothing better to
112078a3e888SCyril Bur 	 * do than go ahead and Bad Thing later.
112178a3e888SCyril Bur 	 * The cause is not important as there will never be a
112278a3e888SCyril Bur 	 * recheckpoint so it's not user visible.
112378a3e888SCyril Bur 	 */
112478a3e888SCyril Bur 	if (MSR_TM_SUSPENDED(mfmsr()))
112578a3e888SCyril Bur 		tm_reclaim_current(0);
112678a3e888SCyril Bur 
11272b0a576dSMichael Neuling 	if (__get_user(tmp, &rt_sf->uc.uc_link))
11282b0a576dSMichael Neuling 		goto bad;
11292b0a576dSMichael Neuling 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
11302b0a576dSMichael Neuling 	if (uc_transact) {
11312b0a576dSMichael Neuling 		u32 cmcp;
11322b0a576dSMichael Neuling 		struct mcontext __user *mcp;
11332b0a576dSMichael Neuling 
11342b0a576dSMichael Neuling 		if (__get_user(cmcp, &uc_transact->uc_regs))
11352b0a576dSMichael Neuling 			return -EFAULT;
11362b0a576dSMichael Neuling 		mcp = (struct mcontext __user *)(u64)cmcp;
11372b0a576dSMichael Neuling 		/* The top 32 bits of the MSR are stashed in the transactional
11382b0a576dSMichael Neuling 		 * ucontext. */
11392b0a576dSMichael Neuling 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
11402b0a576dSMichael Neuling 			goto bad;
11412b0a576dSMichael Neuling 
114255e43418SMichael Neuling 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1143f16d80b7SMichael Neuling 			/* Trying to start TM on non TM system */
1144f16d80b7SMichael Neuling 			if (!cpu_has_feature(CPU_FTR_TM))
1145f16d80b7SMichael Neuling 				goto bad;
11462b0a576dSMichael Neuling 			/* We only recheckpoint on return if we're
11472b0a576dSMichael Neuling 			 * transaction.
11482b0a576dSMichael Neuling 			 */
11492b0a576dSMichael Neuling 			tm_restore = 1;
11502b0a576dSMichael Neuling 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
11512b0a576dSMichael Neuling 				goto bad;
11522b0a576dSMichael Neuling 		}
11532b0a576dSMichael Neuling 	}
11546f5b9f01SBreno Leitao 	if (!tm_restore) {
11556f5b9f01SBreno Leitao 		/*
11566f5b9f01SBreno Leitao 		 * Unset regs->msr because ucontext MSR TS is not
11576f5b9f01SBreno Leitao 		 * set, and recheckpoint was not called. This avoid
11586f5b9f01SBreno Leitao 		 * hitting a TM Bad thing at RFID
11596f5b9f01SBreno Leitao 		 */
116059dc5bfcSNicholas Piggin 		regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
11616f5b9f01SBreno Leitao 	}
11622b0a576dSMichael Neuling 	/* Fall through, for non-TM restore */
11632b0a576dSMichael Neuling #endif
11646f5b9f01SBreno Leitao 	if (!tm_restore)
116581e7009eSStephen Rothwell 		if (do_setcontext(&rt_sf->uc, regs, 1))
116681e7009eSStephen Rothwell 			goto bad;
116781e7009eSStephen Rothwell 
116881e7009eSStephen Rothwell 	/*
116981e7009eSStephen Rothwell 	 * It's not clear whether or why it is desirable to save the
117081e7009eSStephen Rothwell 	 * sigaltstack setting on signal delivery and restore it on
117181e7009eSStephen Rothwell 	 * signal return.  But other architectures do this and we have
117281e7009eSStephen Rothwell 	 * always done it up until now so it is probably better not to
117381e7009eSStephen Rothwell 	 * change it.  -- paulus
117481e7009eSStephen Rothwell 	 */
117581e7009eSStephen Rothwell #ifdef CONFIG_PPC64
11767cce2465SAl Viro 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
11777cce2465SAl Viro 		goto bad;
117881e7009eSStephen Rothwell #else
11797cce2465SAl Viro 	if (restore_altstack(&rt_sf->uc.uc_stack))
11807cce2465SAl Viro 		goto bad;
118181e7009eSStephen Rothwell #endif
1182401d1f02SDavid Woodhouse 	set_thread_flag(TIF_RESTOREALL);
1183401d1f02SDavid Woodhouse 	return 0;
118481e7009eSStephen Rothwell 
118581e7009eSStephen Rothwell  bad:
11867fe8f773SChristophe Leroy 	signal_fault(current, regs, "sys_rt_sigreturn", rt_sf);
1187d0c3d534SOlof Johansson 
11883cf5d076SEric W. Biederman 	force_sig(SIGSEGV);
118981e7009eSStephen Rothwell 	return 0;
119081e7009eSStephen Rothwell }
119181e7009eSStephen Rothwell 
119281e7009eSStephen Rothwell #ifdef CONFIG_PPC32
SYSCALL_DEFINE3(debug_setcontext,struct ucontext __user *,ctx,int,ndbg,struct sig_dbg_op __user *,dbg)1193f3675644SAl Viro SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1194f3675644SAl Viro 			 int, ndbg, struct sig_dbg_op __user *, dbg)
119581e7009eSStephen Rothwell {
1196f3675644SAl Viro 	struct pt_regs *regs = current_pt_regs();
119781e7009eSStephen Rothwell 	struct sig_dbg_op op;
119881e7009eSStephen Rothwell 	int i;
119981e7009eSStephen Rothwell 	unsigned long new_msr = regs->msr;
1200172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
120151ae8d4aSBharat Bhushan 	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
120281e7009eSStephen Rothwell #endif
120381e7009eSStephen Rothwell 
120481e7009eSStephen Rothwell 	for (i=0; i<ndbg; i++) {
12057c85d1f9SPaul Mackerras 		if (copy_from_user(&op, dbg + i, sizeof(op)))
120681e7009eSStephen Rothwell 			return -EFAULT;
120781e7009eSStephen Rothwell 		switch (op.dbg_type) {
120881e7009eSStephen Rothwell 		case SIG_DBG_SINGLE_STEPPING:
1209172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
121081e7009eSStephen Rothwell 			if (op.dbg_value) {
121181e7009eSStephen Rothwell 				new_msr |= MSR_DE;
121281e7009eSStephen Rothwell 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
121381e7009eSStephen Rothwell 			} else {
12143bffb652SDave Kleikamp 				new_dbcr0 &= ~DBCR0_IC;
12153bffb652SDave Kleikamp 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
121651ae8d4aSBharat Bhushan 						current->thread.debug.dbcr1)) {
121781e7009eSStephen Rothwell 					new_msr &= ~MSR_DE;
12183bffb652SDave Kleikamp 					new_dbcr0 &= ~DBCR0_IDM;
12193bffb652SDave Kleikamp 				}
122081e7009eSStephen Rothwell 			}
122181e7009eSStephen Rothwell #else
122281e7009eSStephen Rothwell 			if (op.dbg_value)
122381e7009eSStephen Rothwell 				new_msr |= MSR_SE;
122481e7009eSStephen Rothwell 			else
122581e7009eSStephen Rothwell 				new_msr &= ~MSR_SE;
122681e7009eSStephen Rothwell #endif
122781e7009eSStephen Rothwell 			break;
122881e7009eSStephen Rothwell 		case SIG_DBG_BRANCH_TRACING:
1229172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
123081e7009eSStephen Rothwell 			return -EINVAL;
123181e7009eSStephen Rothwell #else
123281e7009eSStephen Rothwell 			if (op.dbg_value)
123381e7009eSStephen Rothwell 				new_msr |= MSR_BE;
123481e7009eSStephen Rothwell 			else
123581e7009eSStephen Rothwell 				new_msr &= ~MSR_BE;
123681e7009eSStephen Rothwell #endif
123781e7009eSStephen Rothwell 			break;
123881e7009eSStephen Rothwell 
123981e7009eSStephen Rothwell 		default:
124081e7009eSStephen Rothwell 			return -EINVAL;
124181e7009eSStephen Rothwell 		}
124281e7009eSStephen Rothwell 	}
124381e7009eSStephen Rothwell 
124481e7009eSStephen Rothwell 	/* We wait until here to actually install the values in the
124581e7009eSStephen Rothwell 	   registers so if we fail in the above loop, it will not
124681e7009eSStephen Rothwell 	   affect the contents of these registers.  After this point,
124781e7009eSStephen Rothwell 	   failure is a problem, anyway, and it's very unlikely unless
124881e7009eSStephen Rothwell 	   the user is really doing something wrong. */
124959dc5bfcSNicholas Piggin 	regs_set_return_msr(regs, new_msr);
1250172ae2e7SDave Kleikamp #ifdef CONFIG_PPC_ADV_DEBUG_REGS
125151ae8d4aSBharat Bhushan 	current->thread.debug.dbcr0 = new_dbcr0;
125281e7009eSStephen Rothwell #endif
125381e7009eSStephen Rothwell 
125496d4f267SLinus Torvalds 	if (!access_ok(ctx, sizeof(*ctx)) ||
1255bb523b40SAndreas Gruenbacher 	    fault_in_readable((char __user *)ctx, sizeof(*ctx)))
12567c85d1f9SPaul Mackerras 		return -EFAULT;
12577c85d1f9SPaul Mackerras 
125881e7009eSStephen Rothwell 	/*
125981e7009eSStephen Rothwell 	 * If we get a fault copying the context into the kernel's
126081e7009eSStephen Rothwell 	 * image of the user's registers, we can't just return -EFAULT
126181e7009eSStephen Rothwell 	 * because the user's registers will be corrupted.  For instance
126281e7009eSStephen Rothwell 	 * the NIP value may have been updated but not some of the
126381e7009eSStephen Rothwell 	 * other registers.  Given that we have done the access_ok
126481e7009eSStephen Rothwell 	 * and successfully read the first and last bytes of the region
126581e7009eSStephen Rothwell 	 * above, this should only happen in an out-of-memory situation
126681e7009eSStephen Rothwell 	 * or if another thread unmaps the region containing the context.
126781e7009eSStephen Rothwell 	 * We kill the task with a SIGSEGV in this situation.
126881e7009eSStephen Rothwell 	 */
126981e7009eSStephen Rothwell 	if (do_setcontext(ctx, regs, 1)) {
12707fe8f773SChristophe Leroy 		signal_fault(current, regs, "sys_debug_setcontext", ctx);
1271d0c3d534SOlof Johansson 
12723cf5d076SEric W. Biederman 		force_sig(SIGSEGV);
127381e7009eSStephen Rothwell 		goto out;
127481e7009eSStephen Rothwell 	}
127581e7009eSStephen Rothwell 
127681e7009eSStephen Rothwell 	/*
127781e7009eSStephen Rothwell 	 * It's not clear whether or why it is desirable to save the
127881e7009eSStephen Rothwell 	 * sigaltstack setting on signal delivery and restore it on
127981e7009eSStephen Rothwell 	 * signal return.  But other architectures do this and we have
128081e7009eSStephen Rothwell 	 * always done it up until now so it is probably better not to
128181e7009eSStephen Rothwell 	 * change it.  -- paulus
128281e7009eSStephen Rothwell 	 */
12837cce2465SAl Viro 	restore_altstack(&ctx->uc_stack);
128481e7009eSStephen Rothwell 
1285401d1f02SDavid Woodhouse 	set_thread_flag(TIF_RESTOREALL);
128681e7009eSStephen Rothwell  out:
128781e7009eSStephen Rothwell 	return 0;
128881e7009eSStephen Rothwell }
128981e7009eSStephen Rothwell #endif
129081e7009eSStephen Rothwell 
129181e7009eSStephen Rothwell /*
129281e7009eSStephen Rothwell  * Do a signal return; undo the signal stack.
129381e7009eSStephen Rothwell  */
1294f3675644SAl Viro #ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE0(sigreturn)1295f3675644SAl Viro COMPAT_SYSCALL_DEFINE0(sigreturn)
1296f3675644SAl Viro #else
1297f3675644SAl Viro SYSCALL_DEFINE0(sigreturn)
1298f3675644SAl Viro #endif
129981e7009eSStephen Rothwell {
1300f3675644SAl Viro 	struct pt_regs *regs = current_pt_regs();
1301fee55450SMichael Neuling 	struct sigframe __user *sf;
130281e7009eSStephen Rothwell 	struct sigcontext __user *sc;
130381e7009eSStephen Rothwell 	struct sigcontext sigctx;
130481e7009eSStephen Rothwell 	struct mcontext __user *sr;
130581e7009eSStephen Rothwell 	sigset_t set;
1306ca9e1605SChristophe Leroy 	struct mcontext __user *mcp;
1307ca9e1605SChristophe Leroy 	struct mcontext __user *tm_mcp = NULL;
1308ca9e1605SChristophe Leroy 	unsigned long long msr_hi = 0;
130981e7009eSStephen Rothwell 
131081e7009eSStephen Rothwell 	/* Always make any pending restarted system calls return -EINTR */
1311f56141e3SAndy Lutomirski 	current->restart_block.fn = do_no_restart_syscall;
131281e7009eSStephen Rothwell 
1313fee55450SMichael Neuling 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1314fee55450SMichael Neuling 	sc = &sf->sctx;
131581e7009eSStephen Rothwell 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
131681e7009eSStephen Rothwell 		goto badframe;
131781e7009eSStephen Rothwell 
131881e7009eSStephen Rothwell #ifdef CONFIG_PPC64
131981e7009eSStephen Rothwell 	/*
132081e7009eSStephen Rothwell 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
132181e7009eSStephen Rothwell 	 * unused part of the signal stackframe
132281e7009eSStephen Rothwell 	 */
132381e7009eSStephen Rothwell 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
132481e7009eSStephen Rothwell #else
132581e7009eSStephen Rothwell 	set.sig[0] = sigctx.oldmask;
132681e7009eSStephen Rothwell 	set.sig[1] = sigctx._unused[3];
132781e7009eSStephen Rothwell #endif
132817440f17SAl Viro 	set_current_blocked(&set);
132981e7009eSStephen Rothwell 
1330fee55450SMichael Neuling 	mcp = (struct mcontext __user *)&sf->mctx;
1331ca9e1605SChristophe Leroy #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1332fee55450SMichael Neuling 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1333fee55450SMichael Neuling 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1334fee55450SMichael Neuling 		goto badframe;
1335ca9e1605SChristophe Leroy #endif
1336fee55450SMichael Neuling 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
1337fee55450SMichael Neuling 		if (!cpu_has_feature(CPU_FTR_TM))
1338fee55450SMichael Neuling 			goto badframe;
1339fee55450SMichael Neuling 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
1340fee55450SMichael Neuling 			goto badframe;
1341ca9e1605SChristophe Leroy 	} else {
134281e7009eSStephen Rothwell 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1343c7393a71SChristophe Leroy 		if (restore_user_regs(regs, sr, 1)) {
1344c7393a71SChristophe Leroy 			signal_fault(current, regs, "sys_sigreturn", sr);
1345c7393a71SChristophe Leroy 
1346c7393a71SChristophe Leroy 			force_sig(SIGSEGV);
1347c7393a71SChristophe Leroy 			return 0;
1348c7393a71SChristophe Leroy 		}
1349fee55450SMichael Neuling 	}
135081e7009eSStephen Rothwell 
1351401d1f02SDavid Woodhouse 	set_thread_flag(TIF_RESTOREALL);
135281e7009eSStephen Rothwell 	return 0;
135381e7009eSStephen Rothwell 
135481e7009eSStephen Rothwell badframe:
1355c7393a71SChristophe Leroy 	signal_fault(current, regs, "sys_sigreturn", sc);
1356d0c3d534SOlof Johansson 
13573cf5d076SEric W. Biederman 	force_sig(SIGSEGV);
135881e7009eSStephen Rothwell 	return 0;
135981e7009eSStephen Rothwell }
1360