xref: /linux/arch/powerpc/kernel/signal_32.c (revision 092e0e7e520a1fca03e13c9f2d157432a8657ff2)
1 /*
2  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3  *
4  *  PowerPC version
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  * Copyright (C) 2001 IBM
7  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9  *
10  *  Derived from "arch/i386/kernel/signal.c"
11  *    Copyright (C) 1991, 1992 Linus Torvalds
12  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
13  *
14  *  This program is free software; you can redistribute it and/or
15  *  modify it under the terms of the GNU General Public License
16  *  as published by the Free Software Foundation; either version
17  *  2 of the License, or (at your option) any later version.
18  */
19 
20 #include <linux/sched.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/kernel.h>
24 #include <linux/signal.h>
25 #include <linux/errno.h>
26 #include <linux/elf.h>
27 #include <linux/ptrace.h>
28 #ifdef CONFIG_PPC64
29 #include <linux/syscalls.h>
30 #include <linux/compat.h>
31 #else
32 #include <linux/wait.h>
33 #include <linux/unistd.h>
34 #include <linux/stddef.h>
35 #include <linux/tty.h>
36 #include <linux/binfmts.h>
37 #include <linux/freezer.h>
38 #endif
39 
40 #include <asm/uaccess.h>
41 #include <asm/cacheflush.h>
42 #include <asm/syscalls.h>
43 #include <asm/sigcontext.h>
44 #include <asm/vdso.h>
45 #ifdef CONFIG_PPC64
46 #include "ppc32.h"
47 #include <asm/unistd.h>
48 #else
49 #include <asm/ucontext.h>
50 #include <asm/pgtable.h>
51 #endif
52 
53 #include "signal.h"
54 
55 #undef DEBUG_SIG
56 
57 #ifdef CONFIG_PPC64
58 #define sys_sigsuspend	compat_sys_sigsuspend
59 #define sys_rt_sigsuspend	compat_sys_rt_sigsuspend
60 #define sys_rt_sigreturn	compat_sys_rt_sigreturn
61 #define sys_sigaction	compat_sys_sigaction
62 #define sys_swapcontext	compat_sys_swapcontext
63 #define sys_sigreturn	compat_sys_sigreturn
64 
65 #define old_sigaction	old_sigaction32
66 #define sigcontext	sigcontext32
67 #define mcontext	mcontext32
68 #define ucontext	ucontext32
69 
70 /*
71  * Userspace code may pass a ucontext which doesn't include VSX added
72  * at the end.  We need to check for this case.
73  */
74 #define UCONTEXTSIZEWITHOUTVSX \
75 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
76 
77 /*
78  * Returning 0 means we return to userspace via
79  * ret_from_except and thus restore all user
80  * registers from *regs.  This is what we need
81  * to do when a signal has been delivered.
82  */
83 
84 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
85 #undef __SIGNAL_FRAMESIZE
86 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
87 #undef ELF_NVRREG
88 #define ELF_NVRREG	ELF_NVRREG32
89 
90 /*
91  * Functions for flipping sigsets (thanks to brain dead generic
92  * implementation that makes things simple for little endian only)
93  */
94 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
95 {
96 	compat_sigset_t	cset;
97 
98 	switch (_NSIG_WORDS) {
99 	case 4: cset.sig[5] = set->sig[3] & 0xffffffffull;
100 		cset.sig[7] = set->sig[3] >> 32;
101 	case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
102 		cset.sig[5] = set->sig[2] >> 32;
103 	case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
104 		cset.sig[3] = set->sig[1] >> 32;
105 	case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
106 		cset.sig[1] = set->sig[0] >> 32;
107 	}
108 	return copy_to_user(uset, &cset, sizeof(*uset));
109 }
110 
111 static inline int get_sigset_t(sigset_t *set,
112 			       const compat_sigset_t __user *uset)
113 {
114 	compat_sigset_t s32;
115 
116 	if (copy_from_user(&s32, uset, sizeof(*uset)))
117 		return -EFAULT;
118 
119 	/*
120 	 * Swap the 2 words of the 64-bit sigset_t (they are stored
121 	 * in the "wrong" endian in 32-bit user storage).
122 	 */
123 	switch (_NSIG_WORDS) {
124 	case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
125 	case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
126 	case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
127 	case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
128 	}
129 	return 0;
130 }
131 
132 static inline int get_old_sigaction(struct k_sigaction *new_ka,
133 		struct old_sigaction __user *act)
134 {
135 	compat_old_sigset_t mask;
136 	compat_uptr_t handler, restorer;
137 
138 	if (get_user(handler, &act->sa_handler) ||
139 	    __get_user(restorer, &act->sa_restorer) ||
140 	    __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
141 	    __get_user(mask, &act->sa_mask))
142 		return -EFAULT;
143 	new_ka->sa.sa_handler = compat_ptr(handler);
144 	new_ka->sa.sa_restorer = compat_ptr(restorer);
145 	siginitset(&new_ka->sa.sa_mask, mask);
146 	return 0;
147 }
148 
149 #define to_user_ptr(p)		ptr_to_compat(p)
150 #define from_user_ptr(p)	compat_ptr(p)
151 
152 static inline int save_general_regs(struct pt_regs *regs,
153 		struct mcontext __user *frame)
154 {
155 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
156 	int i;
157 
158 	WARN_ON(!FULL_REGS(regs));
159 
160 	for (i = 0; i <= PT_RESULT; i ++) {
161 		if (i == 14 && !FULL_REGS(regs))
162 			i = 32;
163 		if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
164 			return -EFAULT;
165 	}
166 	return 0;
167 }
168 
169 static inline int restore_general_regs(struct pt_regs *regs,
170 		struct mcontext __user *sr)
171 {
172 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
173 	int i;
174 
175 	for (i = 0; i <= PT_RESULT; i++) {
176 		if ((i == PT_MSR) || (i == PT_SOFTE))
177 			continue;
178 		if (__get_user(gregs[i], &sr->mc_gregs[i]))
179 			return -EFAULT;
180 	}
181 	return 0;
182 }
183 
184 #else /* CONFIG_PPC64 */
185 
186 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
187 
188 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
189 {
190 	return copy_to_user(uset, set, sizeof(*uset));
191 }
192 
193 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
194 {
195 	return copy_from_user(set, uset, sizeof(*uset));
196 }
197 
198 static inline int get_old_sigaction(struct k_sigaction *new_ka,
199 		struct old_sigaction __user *act)
200 {
201 	old_sigset_t mask;
202 
203 	if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
204 			__get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
205 			__get_user(new_ka->sa.sa_restorer, &act->sa_restorer))
206 		return -EFAULT;
207 	__get_user(new_ka->sa.sa_flags, &act->sa_flags);
208 	__get_user(mask, &act->sa_mask);
209 	siginitset(&new_ka->sa.sa_mask, mask);
210 	return 0;
211 }
212 
213 #define to_user_ptr(p)		((unsigned long)(p))
214 #define from_user_ptr(p)	((void __user *)(p))
215 
216 static inline int save_general_regs(struct pt_regs *regs,
217 		struct mcontext __user *frame)
218 {
219 	WARN_ON(!FULL_REGS(regs));
220 	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
221 }
222 
223 static inline int restore_general_regs(struct pt_regs *regs,
224 		struct mcontext __user *sr)
225 {
226 	/* copy up to but not including MSR */
227 	if (__copy_from_user(regs, &sr->mc_gregs,
228 				PT_MSR * sizeof(elf_greg_t)))
229 		return -EFAULT;
230 	/* copy from orig_r3 (the word after the MSR) up to the end */
231 	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
232 				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
233 		return -EFAULT;
234 	return 0;
235 }
236 
237 #endif /* CONFIG_PPC64 */
238 
239 /*
240  * Atomically swap in the new signal mask, and wait for a signal.
241  */
242 long sys_sigsuspend(old_sigset_t mask)
243 {
244 	mask &= _BLOCKABLE;
245 	spin_lock_irq(&current->sighand->siglock);
246 	current->saved_sigmask = current->blocked;
247 	siginitset(&current->blocked, mask);
248 	recalc_sigpending();
249 	spin_unlock_irq(&current->sighand->siglock);
250 
251  	current->state = TASK_INTERRUPTIBLE;
252  	schedule();
253 	set_restore_sigmask();
254  	return -ERESTARTNOHAND;
255 }
256 
257 long sys_sigaction(int sig, struct old_sigaction __user *act,
258 		struct old_sigaction __user *oact)
259 {
260 	struct k_sigaction new_ka, old_ka;
261 	int ret;
262 
263 #ifdef CONFIG_PPC64
264 	if (sig < 0)
265 		sig = -sig;
266 #endif
267 
268 	if (act) {
269 		if (get_old_sigaction(&new_ka, act))
270 			return -EFAULT;
271 	}
272 
273 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
274 	if (!ret && oact) {
275 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
276 		    __put_user(to_user_ptr(old_ka.sa.sa_handler),
277 			    &oact->sa_handler) ||
278 		    __put_user(to_user_ptr(old_ka.sa.sa_restorer),
279 			    &oact->sa_restorer) ||
280 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
281 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
282 			return -EFAULT;
283 	}
284 
285 	return ret;
286 }
287 
288 /*
289  * When we have signals to deliver, we set up on the
290  * user stack, going down from the original stack pointer:
291  *	an ABI gap of 56 words
292  *	an mcontext struct
293  *	a sigcontext struct
294  *	a gap of __SIGNAL_FRAMESIZE bytes
295  *
296  * Each of these things must be a multiple of 16 bytes in size. The following
297  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
298  *
299  */
300 struct sigframe {
301 	struct sigcontext sctx;		/* the sigcontext */
302 	struct mcontext	mctx;		/* all the register values */
303 	/*
304 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
305 	 * regs and 18 fp regs below sp before decrementing it.
306 	 */
307 	int			abigap[56];
308 };
309 
310 /* We use the mc_pad field for the signal return trampoline. */
311 #define tramp	mc_pad
312 
313 /*
314  *  When we have rt signals to deliver, we set up on the
315  *  user stack, going down from the original stack pointer:
316  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
317  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
318  *  (the +16 is to get the siginfo and ucontext in the same
319  *  positions as in older kernels).
320  *
321  *  Each of these things must be a multiple of 16 bytes in size.
322  *
323  */
324 struct rt_sigframe {
325 #ifdef CONFIG_PPC64
326 	compat_siginfo_t info;
327 #else
328 	struct siginfo info;
329 #endif
330 	struct ucontext	uc;
331 	/*
332 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
333 	 * regs and 18 fp regs below sp before decrementing it.
334 	 */
335 	int			abigap[56];
336 };
337 
338 #ifdef CONFIG_VSX
339 unsigned long copy_fpr_to_user(void __user *to,
340 			       struct task_struct *task)
341 {
342 	double buf[ELF_NFPREG];
343 	int i;
344 
345 	/* save FPR copy to local buffer then write to the thread_struct */
346 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
347 		buf[i] = task->thread.TS_FPR(i);
348 	memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
349 	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
350 }
351 
352 unsigned long copy_fpr_from_user(struct task_struct *task,
353 				 void __user *from)
354 {
355 	double buf[ELF_NFPREG];
356 	int i;
357 
358 	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
359 		return 1;
360 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
361 		task->thread.TS_FPR(i) = buf[i];
362 	memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
363 
364 	return 0;
365 }
366 
367 unsigned long copy_vsx_to_user(void __user *to,
368 			       struct task_struct *task)
369 {
370 	double buf[ELF_NVSRHALFREG];
371 	int i;
372 
373 	/* save FPR copy to local buffer then write to the thread_struct */
374 	for (i = 0; i < ELF_NVSRHALFREG; i++)
375 		buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
376 	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
377 }
378 
379 unsigned long copy_vsx_from_user(struct task_struct *task,
380 				 void __user *from)
381 {
382 	double buf[ELF_NVSRHALFREG];
383 	int i;
384 
385 	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
386 		return 1;
387 	for (i = 0; i < ELF_NVSRHALFREG ; i++)
388 		task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
389 	return 0;
390 }
391 #else
392 inline unsigned long copy_fpr_to_user(void __user *to,
393 				      struct task_struct *task)
394 {
395 	return __copy_to_user(to, task->thread.fpr,
396 			      ELF_NFPREG * sizeof(double));
397 }
398 
399 inline unsigned long copy_fpr_from_user(struct task_struct *task,
400 					void __user *from)
401 {
402 	return __copy_from_user(task->thread.fpr, from,
403 			      ELF_NFPREG * sizeof(double));
404 }
405 #endif
406 
407 /*
408  * Save the current user registers on the user stack.
409  * We only save the altivec/spe registers if the process has used
410  * altivec/spe instructions at some point.
411  */
412 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
413 		int sigret, int ctx_has_vsx_region)
414 {
415 	unsigned long msr = regs->msr;
416 
417 	/* Make sure floating point registers are stored in regs */
418 	flush_fp_to_thread(current);
419 
420 	/* save general registers */
421 	if (save_general_regs(regs, frame))
422 		return 1;
423 
424 #ifdef CONFIG_ALTIVEC
425 	/* save altivec registers */
426 	if (current->thread.used_vr) {
427 		flush_altivec_to_thread(current);
428 		if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
429 				   ELF_NVRREG * sizeof(vector128)))
430 			return 1;
431 		/* set MSR_VEC in the saved MSR value to indicate that
432 		   frame->mc_vregs contains valid data */
433 		msr |= MSR_VEC;
434 	}
435 	/* else assert((regs->msr & MSR_VEC) == 0) */
436 
437 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
438 	 * use altivec. Since VSCR only contains 32 bits saved in the least
439 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
440 	 * most significant bits of that same vector. --BenH
441 	 */
442 	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
443 		return 1;
444 #endif /* CONFIG_ALTIVEC */
445 	if (copy_fpr_to_user(&frame->mc_fregs, current))
446 		return 1;
447 #ifdef CONFIG_VSX
448 	/*
449 	 * Copy VSR 0-31 upper half from thread_struct to local
450 	 * buffer, then write that to userspace.  Also set MSR_VSX in
451 	 * the saved MSR value to indicate that frame->mc_vregs
452 	 * contains valid data
453 	 */
454 	if (current->thread.used_vsr && ctx_has_vsx_region) {
455 		__giveup_vsx(current);
456 		if (copy_vsx_to_user(&frame->mc_vsregs, current))
457 			return 1;
458 		msr |= MSR_VSX;
459 	}
460 #endif /* CONFIG_VSX */
461 #ifdef CONFIG_SPE
462 	/* save spe registers */
463 	if (current->thread.used_spe) {
464 		flush_spe_to_thread(current);
465 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
466 				   ELF_NEVRREG * sizeof(u32)))
467 			return 1;
468 		/* set MSR_SPE in the saved MSR value to indicate that
469 		   frame->mc_vregs contains valid data */
470 		msr |= MSR_SPE;
471 	}
472 	/* else assert((regs->msr & MSR_SPE) == 0) */
473 
474 	/* We always copy to/from spefscr */
475 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
476 		return 1;
477 #endif /* CONFIG_SPE */
478 
479 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
480 		return 1;
481 	if (sigret) {
482 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
483 		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
484 		    || __put_user(0x44000002UL, &frame->tramp[1]))
485 			return 1;
486 		flush_icache_range((unsigned long) &frame->tramp[0],
487 				   (unsigned long) &frame->tramp[2]);
488 	}
489 
490 	return 0;
491 }
492 
493 /*
494  * Restore the current user register values from the user stack,
495  * (except for MSR).
496  */
497 static long restore_user_regs(struct pt_regs *regs,
498 			      struct mcontext __user *sr, int sig)
499 {
500 	long err;
501 	unsigned int save_r2 = 0;
502 	unsigned long msr;
503 #ifdef CONFIG_VSX
504 	int i;
505 #endif
506 
507 	/*
508 	 * restore general registers but not including MSR or SOFTE. Also
509 	 * take care of keeping r2 (TLS) intact if not a signal
510 	 */
511 	if (!sig)
512 		save_r2 = (unsigned int)regs->gpr[2];
513 	err = restore_general_regs(regs, sr);
514 	regs->trap = 0;
515 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
516 	if (!sig)
517 		regs->gpr[2] = (unsigned long) save_r2;
518 	if (err)
519 		return 1;
520 
521 	/* if doing signal return, restore the previous little-endian mode */
522 	if (sig)
523 		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
524 
525 	/*
526 	 * Do this before updating the thread state in
527 	 * current->thread.fpr/vr/evr.  That way, if we get preempted
528 	 * and another task grabs the FPU/Altivec/SPE, it won't be
529 	 * tempted to save the current CPU state into the thread_struct
530 	 * and corrupt what we are writing there.
531 	 */
532 	discard_lazy_cpu_state();
533 
534 #ifdef CONFIG_ALTIVEC
535 	/*
536 	 * Force the process to reload the altivec registers from
537 	 * current->thread when it next does altivec instructions
538 	 */
539 	regs->msr &= ~MSR_VEC;
540 	if (msr & MSR_VEC) {
541 		/* restore altivec registers from the stack */
542 		if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
543 				     sizeof(sr->mc_vregs)))
544 			return 1;
545 	} else if (current->thread.used_vr)
546 		memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
547 
548 	/* Always get VRSAVE back */
549 	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
550 		return 1;
551 #endif /* CONFIG_ALTIVEC */
552 	if (copy_fpr_from_user(current, &sr->mc_fregs))
553 		return 1;
554 
555 #ifdef CONFIG_VSX
556 	/*
557 	 * Force the process to reload the VSX registers from
558 	 * current->thread when it next does VSX instruction.
559 	 */
560 	regs->msr &= ~MSR_VSX;
561 	if (msr & MSR_VSX) {
562 		/*
563 		 * Restore altivec registers from the stack to a local
564 		 * buffer, then write this out to the thread_struct
565 		 */
566 		if (copy_vsx_from_user(current, &sr->mc_vsregs))
567 			return 1;
568 	} else if (current->thread.used_vsr)
569 		for (i = 0; i < 32 ; i++)
570 			current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
571 #endif /* CONFIG_VSX */
572 	/*
573 	 * force the process to reload the FP registers from
574 	 * current->thread when it next does FP instructions
575 	 */
576 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
577 
578 #ifdef CONFIG_SPE
579 	/* force the process to reload the spe registers from
580 	   current->thread when it next does spe instructions */
581 	regs->msr &= ~MSR_SPE;
582 	if (msr & MSR_SPE) {
583 		/* restore spe registers from the stack */
584 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
585 				     ELF_NEVRREG * sizeof(u32)))
586 			return 1;
587 	} else if (current->thread.used_spe)
588 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
589 
590 	/* Always get SPEFSCR back */
591 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
592 		return 1;
593 #endif /* CONFIG_SPE */
594 
595 	return 0;
596 }
597 
598 #ifdef CONFIG_PPC64
599 long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
600 		struct sigaction32 __user *oact, size_t sigsetsize)
601 {
602 	struct k_sigaction new_ka, old_ka;
603 	int ret;
604 
605 	/* XXX: Don't preclude handling different sized sigset_t's.  */
606 	if (sigsetsize != sizeof(compat_sigset_t))
607 		return -EINVAL;
608 
609 	if (act) {
610 		compat_uptr_t handler;
611 
612 		ret = get_user(handler, &act->sa_handler);
613 		new_ka.sa.sa_handler = compat_ptr(handler);
614 		ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
615 		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
616 		if (ret)
617 			return -EFAULT;
618 	}
619 
620 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
621 	if (!ret && oact) {
622 		ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler);
623 		ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
624 		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
625 	}
626 	return ret;
627 }
628 
629 /*
630  * Note: it is necessary to treat how as an unsigned int, with the
631  * corresponding cast to a signed int to insure that the proper
632  * conversion (sign extension) between the register representation
633  * of a signed int (msr in 32-bit mode) and the register representation
634  * of a signed int (msr in 64-bit mode) is performed.
635  */
636 long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
637 		compat_sigset_t __user *oset, size_t sigsetsize)
638 {
639 	sigset_t s;
640 	sigset_t __user *up;
641 	int ret;
642 	mm_segment_t old_fs = get_fs();
643 
644 	if (set) {
645 		if (get_sigset_t(&s, set))
646 			return -EFAULT;
647 	}
648 
649 	set_fs(KERNEL_DS);
650 	/* This is valid because of the set_fs() */
651 	up = (sigset_t __user *) &s;
652 	ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
653 				 sigsetsize);
654 	set_fs(old_fs);
655 	if (ret)
656 		return ret;
657 	if (oset) {
658 		if (put_sigset_t(oset, &s))
659 			return -EFAULT;
660 	}
661 	return 0;
662 }
663 
664 long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
665 {
666 	sigset_t s;
667 	int ret;
668 	mm_segment_t old_fs = get_fs();
669 
670 	set_fs(KERNEL_DS);
671 	/* The __user pointer cast is valid because of the set_fs() */
672 	ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
673 	set_fs(old_fs);
674 	if (!ret) {
675 		if (put_sigset_t(set, &s))
676 			return -EFAULT;
677 	}
678 	return ret;
679 }
680 
681 
682 int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
683 {
684 	int err;
685 
686 	if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
687 		return -EFAULT;
688 
689 	/* If you change siginfo_t structure, please be sure
690 	 * this code is fixed accordingly.
691 	 * It should never copy any pad contained in the structure
692 	 * to avoid security leaks, but must copy the generic
693 	 * 3 ints plus the relevant union member.
694 	 * This routine must convert siginfo from 64bit to 32bit as well
695 	 * at the same time.
696 	 */
697 	err = __put_user(s->si_signo, &d->si_signo);
698 	err |= __put_user(s->si_errno, &d->si_errno);
699 	err |= __put_user((short)s->si_code, &d->si_code);
700 	if (s->si_code < 0)
701 		err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
702 				      SI_PAD_SIZE32);
703 	else switch(s->si_code >> 16) {
704 	case __SI_CHLD >> 16:
705 		err |= __put_user(s->si_pid, &d->si_pid);
706 		err |= __put_user(s->si_uid, &d->si_uid);
707 		err |= __put_user(s->si_utime, &d->si_utime);
708 		err |= __put_user(s->si_stime, &d->si_stime);
709 		err |= __put_user(s->si_status, &d->si_status);
710 		break;
711 	case __SI_FAULT >> 16:
712 		err |= __put_user((unsigned int)(unsigned long)s->si_addr,
713 				  &d->si_addr);
714 		break;
715 	case __SI_POLL >> 16:
716 		err |= __put_user(s->si_band, &d->si_band);
717 		err |= __put_user(s->si_fd, &d->si_fd);
718 		break;
719 	case __SI_TIMER >> 16:
720 		err |= __put_user(s->si_tid, &d->si_tid);
721 		err |= __put_user(s->si_overrun, &d->si_overrun);
722 		err |= __put_user(s->si_int, &d->si_int);
723 		break;
724 	case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
725 	case __SI_MESGQ >> 16:
726 		err |= __put_user(s->si_int, &d->si_int);
727 		/* fallthrough */
728 	case __SI_KILL >> 16:
729 	default:
730 		err |= __put_user(s->si_pid, &d->si_pid);
731 		err |= __put_user(s->si_uid, &d->si_uid);
732 		break;
733 	}
734 	return err;
735 }
736 
737 #define copy_siginfo_to_user	copy_siginfo_to_user32
738 
739 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
740 {
741 	memset(to, 0, sizeof *to);
742 
743 	if (copy_from_user(to, from, 3*sizeof(int)) ||
744 	    copy_from_user(to->_sifields._pad,
745 			   from->_sifields._pad, SI_PAD_SIZE32))
746 		return -EFAULT;
747 
748 	return 0;
749 }
750 
751 /*
752  * Note: it is necessary to treat pid and sig as unsigned ints, with the
753  * corresponding cast to a signed int to insure that the proper conversion
754  * (sign extension) between the register representation of a signed int
755  * (msr in 32-bit mode) and the register representation of a signed int
756  * (msr in 64-bit mode) is performed.
757  */
758 long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
759 {
760 	siginfo_t info;
761 	int ret;
762 	mm_segment_t old_fs = get_fs();
763 
764 	ret = copy_siginfo_from_user32(&info, uinfo);
765 	if (unlikely(ret))
766 		return ret;
767 
768 	set_fs (KERNEL_DS);
769 	/* The __user pointer cast is valid becasuse of the set_fs() */
770 	ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
771 	set_fs (old_fs);
772 	return ret;
773 }
774 /*
775  *  Start Alternate signal stack support
776  *
777  *  System Calls
778  *       sigaltatck               compat_sys_sigaltstack
779  */
780 
781 int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
782 		      int r6, int r7, int r8, struct pt_regs *regs)
783 {
784 	stack_32_t __user * newstack = compat_ptr(__new);
785 	stack_32_t __user * oldstack = compat_ptr(__old);
786 	stack_t uss, uoss;
787 	int ret;
788 	mm_segment_t old_fs;
789 	unsigned long sp;
790 	compat_uptr_t ss_sp;
791 
792 	/*
793 	 * set sp to the user stack on entry to the system call
794 	 * the system call router sets R9 to the saved registers
795 	 */
796 	sp = regs->gpr[1];
797 
798 	/* Put new stack info in local 64 bit stack struct */
799 	if (newstack) {
800 		if (get_user(ss_sp, &newstack->ss_sp) ||
801 		    __get_user(uss.ss_flags, &newstack->ss_flags) ||
802 		    __get_user(uss.ss_size, &newstack->ss_size))
803 			return -EFAULT;
804 		uss.ss_sp = compat_ptr(ss_sp);
805 	}
806 
807 	old_fs = get_fs();
808 	set_fs(KERNEL_DS);
809 	/* The __user pointer casts are valid because of the set_fs() */
810 	ret = do_sigaltstack(
811 		newstack ? (stack_t __user *) &uss : NULL,
812 		oldstack ? (stack_t __user *) &uoss : NULL,
813 		sp);
814 	set_fs(old_fs);
815 	/* Copy the stack information to the user output buffer */
816 	if (!ret && oldstack  &&
817 		(put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) ||
818 		 __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
819 		 __put_user(uoss.ss_size, &oldstack->ss_size)))
820 		return -EFAULT;
821 	return ret;
822 }
823 #endif /* CONFIG_PPC64 */
824 
825 /*
826  * Set up a signal frame for a "real-time" signal handler
827  * (one which gets siginfo).
828  */
829 int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
830 		siginfo_t *info, sigset_t *oldset,
831 		struct pt_regs *regs)
832 {
833 	struct rt_sigframe __user *rt_sf;
834 	struct mcontext __user *frame;
835 	void __user *addr;
836 	unsigned long newsp = 0;
837 
838 	/* Set up Signal Frame */
839 	/* Put a Real Time Context onto stack */
840 	rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
841 	addr = rt_sf;
842 	if (unlikely(rt_sf == NULL))
843 		goto badframe;
844 
845 	/* Put the siginfo & fill in most of the ucontext */
846 	if (copy_siginfo_to_user(&rt_sf->info, info)
847 	    || __put_user(0, &rt_sf->uc.uc_flags)
848 	    || __put_user(0, &rt_sf->uc.uc_link)
849 	    || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
850 	    || __put_user(sas_ss_flags(regs->gpr[1]),
851 			  &rt_sf->uc.uc_stack.ss_flags)
852 	    || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
853 	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
854 		    &rt_sf->uc.uc_regs)
855 	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
856 		goto badframe;
857 
858 	/* Save user registers on the stack */
859 	frame = &rt_sf->uc.uc_mcontext;
860 	addr = frame;
861 	if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
862 		if (save_user_regs(regs, frame, 0, 1))
863 			goto badframe;
864 		regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
865 	} else {
866 		if (save_user_regs(regs, frame, __NR_rt_sigreturn, 1))
867 			goto badframe;
868 		regs->link = (unsigned long) frame->tramp;
869 	}
870 
871 	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
872 
873 	/* create a stack frame for the caller of the handler */
874 	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
875 	addr = (void __user *)regs->gpr[1];
876 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
877 		goto badframe;
878 
879 	/* Fill registers for signal handler */
880 	regs->gpr[1] = newsp;
881 	regs->gpr[3] = sig;
882 	regs->gpr[4] = (unsigned long) &rt_sf->info;
883 	regs->gpr[5] = (unsigned long) &rt_sf->uc;
884 	regs->gpr[6] = (unsigned long) rt_sf;
885 	regs->nip = (unsigned long) ka->sa.sa_handler;
886 	/* enter the signal handler in big-endian mode */
887 	regs->msr &= ~MSR_LE;
888 	return 1;
889 
890 badframe:
891 #ifdef DEBUG_SIG
892 	printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
893 	       regs, frame, newsp);
894 #endif
895 	if (show_unhandled_signals && printk_ratelimit())
896 		printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: "
897 			"%p nip %08lx lr %08lx\n",
898 			current->comm, current->pid,
899 			addr, regs->nip, regs->link);
900 
901 	force_sigsegv(sig, current);
902 	return 0;
903 }
904 
905 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
906 {
907 	sigset_t set;
908 	struct mcontext __user *mcp;
909 
910 	if (get_sigset_t(&set, &ucp->uc_sigmask))
911 		return -EFAULT;
912 #ifdef CONFIG_PPC64
913 	{
914 		u32 cmcp;
915 
916 		if (__get_user(cmcp, &ucp->uc_regs))
917 			return -EFAULT;
918 		mcp = (struct mcontext __user *)(u64)cmcp;
919 		/* no need to check access_ok(mcp), since mcp < 4GB */
920 	}
921 #else
922 	if (__get_user(mcp, &ucp->uc_regs))
923 		return -EFAULT;
924 	if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
925 		return -EFAULT;
926 #endif
927 	restore_sigmask(&set);
928 	if (restore_user_regs(regs, mcp, sig))
929 		return -EFAULT;
930 
931 	return 0;
932 }
933 
934 long sys_swapcontext(struct ucontext __user *old_ctx,
935 		     struct ucontext __user *new_ctx,
936 		     int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
937 {
938 	unsigned char tmp;
939 	int ctx_has_vsx_region = 0;
940 
941 #ifdef CONFIG_PPC64
942 	unsigned long new_msr = 0;
943 
944 	if (new_ctx) {
945 		struct mcontext __user *mcp;
946 		u32 cmcp;
947 
948 		/*
949 		 * Get pointer to the real mcontext.  No need for
950 		 * access_ok since we are dealing with compat
951 		 * pointers.
952 		 */
953 		if (__get_user(cmcp, &new_ctx->uc_regs))
954 			return -EFAULT;
955 		mcp = (struct mcontext __user *)(u64)cmcp;
956 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
957 			return -EFAULT;
958 	}
959 	/*
960 	 * Check that the context is not smaller than the original
961 	 * size (with VMX but without VSX)
962 	 */
963 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
964 		return -EINVAL;
965 	/*
966 	 * If the new context state sets the MSR VSX bits but
967 	 * it doesn't provide VSX state.
968 	 */
969 	if ((ctx_size < sizeof(struct ucontext)) &&
970 	    (new_msr & MSR_VSX))
971 		return -EINVAL;
972 	/* Does the context have enough room to store VSX data? */
973 	if (ctx_size >= sizeof(struct ucontext))
974 		ctx_has_vsx_region = 1;
975 #else
976 	/* Context size is for future use. Right now, we only make sure
977 	 * we are passed something we understand
978 	 */
979 	if (ctx_size < sizeof(struct ucontext))
980 		return -EINVAL;
981 #endif
982 	if (old_ctx != NULL) {
983 		struct mcontext __user *mctx;
984 
985 		/*
986 		 * old_ctx might not be 16-byte aligned, in which
987 		 * case old_ctx->uc_mcontext won't be either.
988 		 * Because we have the old_ctx->uc_pad2 field
989 		 * before old_ctx->uc_mcontext, we need to round down
990 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
991 		 */
992 		mctx = (struct mcontext __user *)
993 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
994 		if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
995 		    || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
996 		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
997 		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
998 			return -EFAULT;
999 	}
1000 	if (new_ctx == NULL)
1001 		return 0;
1002 	if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
1003 	    || __get_user(tmp, (u8 __user *) new_ctx)
1004 	    || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
1005 		return -EFAULT;
1006 
1007 	/*
1008 	 * If we get a fault copying the context into the kernel's
1009 	 * image of the user's registers, we can't just return -EFAULT
1010 	 * because the user's registers will be corrupted.  For instance
1011 	 * the NIP value may have been updated but not some of the
1012 	 * other registers.  Given that we have done the access_ok
1013 	 * and successfully read the first and last bytes of the region
1014 	 * above, this should only happen in an out-of-memory situation
1015 	 * or if another thread unmaps the region containing the context.
1016 	 * We kill the task with a SIGSEGV in this situation.
1017 	 */
1018 	if (do_setcontext(new_ctx, regs, 0))
1019 		do_exit(SIGSEGV);
1020 
1021 	set_thread_flag(TIF_RESTOREALL);
1022 	return 0;
1023 }
1024 
1025 long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1026 		     struct pt_regs *regs)
1027 {
1028 	struct rt_sigframe __user *rt_sf;
1029 
1030 	/* Always make any pending restarted system calls return -EINTR */
1031 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
1032 
1033 	rt_sf = (struct rt_sigframe __user *)
1034 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1035 	if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1036 		goto bad;
1037 	if (do_setcontext(&rt_sf->uc, regs, 1))
1038 		goto bad;
1039 
1040 	/*
1041 	 * It's not clear whether or why it is desirable to save the
1042 	 * sigaltstack setting on signal delivery and restore it on
1043 	 * signal return.  But other architectures do this and we have
1044 	 * always done it up until now so it is probably better not to
1045 	 * change it.  -- paulus
1046 	 */
1047 #ifdef CONFIG_PPC64
1048 	/*
1049 	 * We use the compat_sys_ version that does the 32/64 bits conversion
1050 	 * and takes userland pointer directly. What about error checking ?
1051 	 * nobody does any...
1052 	 */
1053 	compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
1054 #else
1055 	do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
1056 #endif
1057 	set_thread_flag(TIF_RESTOREALL);
1058 	return 0;
1059 
1060  bad:
1061 	if (show_unhandled_signals && printk_ratelimit())
1062 		printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: "
1063 			"%p nip %08lx lr %08lx\n",
1064 			current->comm, current->pid,
1065 			rt_sf, regs->nip, regs->link);
1066 
1067 	force_sig(SIGSEGV, current);
1068 	return 0;
1069 }
1070 
1071 #ifdef CONFIG_PPC32
1072 int sys_debug_setcontext(struct ucontext __user *ctx,
1073 			 int ndbg, struct sig_dbg_op __user *dbg,
1074 			 int r6, int r7, int r8,
1075 			 struct pt_regs *regs)
1076 {
1077 	struct sig_dbg_op op;
1078 	int i;
1079 	unsigned char tmp;
1080 	unsigned long new_msr = regs->msr;
1081 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1082 	unsigned long new_dbcr0 = current->thread.dbcr0;
1083 #endif
1084 
1085 	for (i=0; i<ndbg; i++) {
1086 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1087 			return -EFAULT;
1088 		switch (op.dbg_type) {
1089 		case SIG_DBG_SINGLE_STEPPING:
1090 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1091 			if (op.dbg_value) {
1092 				new_msr |= MSR_DE;
1093 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1094 			} else {
1095 				new_dbcr0 &= ~DBCR0_IC;
1096 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1097 						current->thread.dbcr1)) {
1098 					new_msr &= ~MSR_DE;
1099 					new_dbcr0 &= ~DBCR0_IDM;
1100 				}
1101 			}
1102 #else
1103 			if (op.dbg_value)
1104 				new_msr |= MSR_SE;
1105 			else
1106 				new_msr &= ~MSR_SE;
1107 #endif
1108 			break;
1109 		case SIG_DBG_BRANCH_TRACING:
1110 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1111 			return -EINVAL;
1112 #else
1113 			if (op.dbg_value)
1114 				new_msr |= MSR_BE;
1115 			else
1116 				new_msr &= ~MSR_BE;
1117 #endif
1118 			break;
1119 
1120 		default:
1121 			return -EINVAL;
1122 		}
1123 	}
1124 
1125 	/* We wait until here to actually install the values in the
1126 	   registers so if we fail in the above loop, it will not
1127 	   affect the contents of these registers.  After this point,
1128 	   failure is a problem, anyway, and it's very unlikely unless
1129 	   the user is really doing something wrong. */
1130 	regs->msr = new_msr;
1131 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1132 	current->thread.dbcr0 = new_dbcr0;
1133 #endif
1134 
1135 	if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1136 	    || __get_user(tmp, (u8 __user *) ctx)
1137 	    || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1138 		return -EFAULT;
1139 
1140 	/*
1141 	 * If we get a fault copying the context into the kernel's
1142 	 * image of the user's registers, we can't just return -EFAULT
1143 	 * because the user's registers will be corrupted.  For instance
1144 	 * the NIP value may have been updated but not some of the
1145 	 * other registers.  Given that we have done the access_ok
1146 	 * and successfully read the first and last bytes of the region
1147 	 * above, this should only happen in an out-of-memory situation
1148 	 * or if another thread unmaps the region containing the context.
1149 	 * We kill the task with a SIGSEGV in this situation.
1150 	 */
1151 	if (do_setcontext(ctx, regs, 1)) {
1152 		if (show_unhandled_signals && printk_ratelimit())
1153 			printk(KERN_INFO "%s[%d]: bad frame in "
1154 				"sys_debug_setcontext: %p nip %08lx "
1155 				"lr %08lx\n",
1156 				current->comm, current->pid,
1157 				ctx, regs->nip, regs->link);
1158 
1159 		force_sig(SIGSEGV, current);
1160 		goto out;
1161 	}
1162 
1163 	/*
1164 	 * It's not clear whether or why it is desirable to save the
1165 	 * sigaltstack setting on signal delivery and restore it on
1166 	 * signal return.  But other architectures do this and we have
1167 	 * always done it up until now so it is probably better not to
1168 	 * change it.  -- paulus
1169 	 */
1170 	do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1171 
1172 	set_thread_flag(TIF_RESTOREALL);
1173  out:
1174 	return 0;
1175 }
1176 #endif
1177 
1178 /*
1179  * OK, we're invoking a handler
1180  */
1181 int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1182 		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
1183 {
1184 	struct sigcontext __user *sc;
1185 	struct sigframe __user *frame;
1186 	unsigned long newsp = 0;
1187 
1188 	/* Set up Signal Frame */
1189 	frame = get_sigframe(ka, regs, sizeof(*frame), 1);
1190 	if (unlikely(frame == NULL))
1191 		goto badframe;
1192 	sc = (struct sigcontext __user *) &frame->sctx;
1193 
1194 #if _NSIG != 64
1195 #error "Please adjust handle_signal()"
1196 #endif
1197 	if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
1198 	    || __put_user(oldset->sig[0], &sc->oldmask)
1199 #ifdef CONFIG_PPC64
1200 	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1201 #else
1202 	    || __put_user(oldset->sig[1], &sc->_unused[3])
1203 #endif
1204 	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1205 	    || __put_user(sig, &sc->signal))
1206 		goto badframe;
1207 
1208 	if (vdso32_sigtramp && current->mm->context.vdso_base) {
1209 		if (save_user_regs(regs, &frame->mctx, 0, 1))
1210 			goto badframe;
1211 		regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
1212 	} else {
1213 		if (save_user_regs(regs, &frame->mctx, __NR_sigreturn, 1))
1214 			goto badframe;
1215 		regs->link = (unsigned long) frame->mctx.tramp;
1216 	}
1217 
1218 	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
1219 
1220 	/* create a stack frame for the caller of the handler */
1221 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1222 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1223 		goto badframe;
1224 
1225 	regs->gpr[1] = newsp;
1226 	regs->gpr[3] = sig;
1227 	regs->gpr[4] = (unsigned long) sc;
1228 	regs->nip = (unsigned long) ka->sa.sa_handler;
1229 	/* enter the signal handler in big-endian mode */
1230 	regs->msr &= ~MSR_LE;
1231 
1232 	return 1;
1233 
1234 badframe:
1235 #ifdef DEBUG_SIG
1236 	printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1237 	       regs, frame, newsp);
1238 #endif
1239 	if (show_unhandled_signals && printk_ratelimit())
1240 		printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: "
1241 			"%p nip %08lx lr %08lx\n",
1242 			current->comm, current->pid,
1243 			frame, regs->nip, regs->link);
1244 
1245 	force_sigsegv(sig, current);
1246 	return 0;
1247 }
1248 
1249 /*
1250  * Do a signal return; undo the signal stack.
1251  */
1252 long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1253 		       struct pt_regs *regs)
1254 {
1255 	struct sigcontext __user *sc;
1256 	struct sigcontext sigctx;
1257 	struct mcontext __user *sr;
1258 	void __user *addr;
1259 	sigset_t set;
1260 
1261 	/* Always make any pending restarted system calls return -EINTR */
1262 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
1263 
1264 	sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1265 	addr = sc;
1266 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1267 		goto badframe;
1268 
1269 #ifdef CONFIG_PPC64
1270 	/*
1271 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1272 	 * unused part of the signal stackframe
1273 	 */
1274 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1275 #else
1276 	set.sig[0] = sigctx.oldmask;
1277 	set.sig[1] = sigctx._unused[3];
1278 #endif
1279 	restore_sigmask(&set);
1280 
1281 	sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1282 	addr = sr;
1283 	if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1284 	    || restore_user_regs(regs, sr, 1))
1285 		goto badframe;
1286 
1287 	set_thread_flag(TIF_RESTOREALL);
1288 	return 0;
1289 
1290 badframe:
1291 	if (show_unhandled_signals && printk_ratelimit())
1292 		printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: "
1293 			"%p nip %08lx lr %08lx\n",
1294 			current->comm, current->pid,
1295 			addr, regs->nip, regs->link);
1296 
1297 	force_sig(SIGSEGV, current);
1298 	return 0;
1299 }
1300