xref: /linux/arch/arm64/kernel/signal32.c (revision f884ab15afdc5514e88105c92a4e2e1e6539869a)
1 /*
2  * Based on arch/arm/kernel/signal.c
3  *
4  * Copyright (C) 1995-2009 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  * Modified by Will Deacon <will.deacon@arm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include <linux/compat.h>
22 #include <linux/signal.h>
23 #include <linux/syscalls.h>
24 #include <linux/ratelimit.h>
25 
26 #include <asm/fpsimd.h>
27 #include <asm/signal32.h>
28 #include <asm/uaccess.h>
29 #include <asm/unistd32.h>
30 
31 struct compat_sigcontext {
32 	/* We always set these two fields to 0 */
33 	compat_ulong_t			trap_no;
34 	compat_ulong_t			error_code;
35 
36 	compat_ulong_t			oldmask;
37 	compat_ulong_t			arm_r0;
38 	compat_ulong_t			arm_r1;
39 	compat_ulong_t			arm_r2;
40 	compat_ulong_t			arm_r3;
41 	compat_ulong_t			arm_r4;
42 	compat_ulong_t			arm_r5;
43 	compat_ulong_t			arm_r6;
44 	compat_ulong_t			arm_r7;
45 	compat_ulong_t			arm_r8;
46 	compat_ulong_t			arm_r9;
47 	compat_ulong_t			arm_r10;
48 	compat_ulong_t			arm_fp;
49 	compat_ulong_t			arm_ip;
50 	compat_ulong_t			arm_sp;
51 	compat_ulong_t			arm_lr;
52 	compat_ulong_t			arm_pc;
53 	compat_ulong_t			arm_cpsr;
54 	compat_ulong_t			fault_address;
55 };
56 
57 struct compat_ucontext {
58 	compat_ulong_t			uc_flags;
59 	compat_uptr_t			uc_link;
60 	compat_stack_t			uc_stack;
61 	struct compat_sigcontext	uc_mcontext;
62 	compat_sigset_t			uc_sigmask;
63 	int		__unused[32 - (sizeof (compat_sigset_t) / sizeof (int))];
64 	compat_ulong_t	uc_regspace[128] __attribute__((__aligned__(8)));
65 };
66 
67 struct compat_vfp_sigframe {
68 	compat_ulong_t	magic;
69 	compat_ulong_t	size;
70 	struct compat_user_vfp {
71 		compat_u64	fpregs[32];
72 		compat_ulong_t	fpscr;
73 	} ufp;
74 	struct compat_user_vfp_exc {
75 		compat_ulong_t	fpexc;
76 		compat_ulong_t	fpinst;
77 		compat_ulong_t	fpinst2;
78 	} ufp_exc;
79 } __attribute__((__aligned__(8)));
80 
81 #define VFP_MAGIC		0x56465001
82 #define VFP_STORAGE_SIZE	sizeof(struct compat_vfp_sigframe)
83 
84 struct compat_aux_sigframe {
85 	struct compat_vfp_sigframe	vfp;
86 
87 	/* Something that isn't a valid magic number for any coprocessor.  */
88 	unsigned long			end_magic;
89 } __attribute__((__aligned__(8)));
90 
91 struct compat_sigframe {
92 	struct compat_ucontext	uc;
93 	compat_ulong_t		retcode[2];
94 };
95 
96 struct compat_rt_sigframe {
97 	struct compat_siginfo info;
98 	struct compat_sigframe sig;
99 };
100 
101 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
102 
103 /*
104  * For ARM syscalls, the syscall number has to be loaded into r7.
105  * We do not support an OABI userspace.
106  */
107 #define MOV_R7_NR_SIGRETURN	(0xe3a07000 | __NR_compat_sigreturn)
108 #define SVC_SYS_SIGRETURN	(0xef000000 | __NR_compat_sigreturn)
109 #define MOV_R7_NR_RT_SIGRETURN	(0xe3a07000 | __NR_compat_rt_sigreturn)
110 #define SVC_SYS_RT_SIGRETURN	(0xef000000 | __NR_compat_rt_sigreturn)
111 
112 /*
113  * For Thumb syscalls, we also pass the syscall number via r7. We therefore
114  * need two 16-bit instructions.
115  */
116 #define SVC_THUMB_SIGRETURN	(((0xdf00 | __NR_compat_sigreturn) << 16) | \
117 				   0x2700 | __NR_compat_sigreturn)
118 #define SVC_THUMB_RT_SIGRETURN	(((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \
119 				   0x2700 | __NR_compat_rt_sigreturn)
120 
121 const compat_ulong_t aarch32_sigret_code[6] = {
122 	/*
123 	 * AArch32 sigreturn code.
124 	 * We don't construct an OABI SWI - instead we just set the imm24 field
125 	 * to the EABI syscall number so that we create a sane disassembly.
126 	 */
127 	MOV_R7_NR_SIGRETURN,    SVC_SYS_SIGRETURN,    SVC_THUMB_SIGRETURN,
128 	MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN,
129 };
130 
131 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
132 {
133 	compat_sigset_t	cset;
134 
135 	cset.sig[0] = set->sig[0] & 0xffffffffull;
136 	cset.sig[1] = set->sig[0] >> 32;
137 
138 	return copy_to_user(uset, &cset, sizeof(*uset));
139 }
140 
141 static inline int get_sigset_t(sigset_t *set,
142 			       const compat_sigset_t __user *uset)
143 {
144 	compat_sigset_t s32;
145 
146 	if (copy_from_user(&s32, uset, sizeof(*uset)))
147 		return -EFAULT;
148 
149 	set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
150 	return 0;
151 }
152 
153 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
154 {
155 	int err;
156 
157 	if (!access_ok(VERIFY_WRITE, to, sizeof(*to)))
158 		return -EFAULT;
159 
160 	/* If you change siginfo_t structure, please be sure
161 	 * this code is fixed accordingly.
162 	 * It should never copy any pad contained in the structure
163 	 * to avoid security leaks, but must copy the generic
164 	 * 3 ints plus the relevant union member.
165 	 * This routine must convert siginfo from 64bit to 32bit as well
166 	 * at the same time.
167 	 */
168 	err = __put_user(from->si_signo, &to->si_signo);
169 	err |= __put_user(from->si_errno, &to->si_errno);
170 	err |= __put_user((short)from->si_code, &to->si_code);
171 	if (from->si_code < 0)
172 		err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad,
173 				      SI_PAD_SIZE);
174 	else switch (from->si_code & __SI_MASK) {
175 	case __SI_KILL:
176 		err |= __put_user(from->si_pid, &to->si_pid);
177 		err |= __put_user(from->si_uid, &to->si_uid);
178 		break;
179 	case __SI_TIMER:
180 		 err |= __put_user(from->si_tid, &to->si_tid);
181 		 err |= __put_user(from->si_overrun, &to->si_overrun);
182 		 err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr,
183 				   &to->si_ptr);
184 		break;
185 	case __SI_POLL:
186 		err |= __put_user(from->si_band, &to->si_band);
187 		err |= __put_user(from->si_fd, &to->si_fd);
188 		break;
189 	case __SI_FAULT:
190 		err |= __put_user((compat_uptr_t)(unsigned long)from->si_addr,
191 				  &to->si_addr);
192 #ifdef BUS_MCEERR_AO
193 		/*
194 		 * Other callers might not initialize the si_lsb field,
195 		 * so check explicitely for the right codes here.
196 		 */
197 		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
198 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
199 #endif
200 		break;
201 	case __SI_CHLD:
202 		err |= __put_user(from->si_pid, &to->si_pid);
203 		err |= __put_user(from->si_uid, &to->si_uid);
204 		err |= __put_user(from->si_status, &to->si_status);
205 		err |= __put_user(from->si_utime, &to->si_utime);
206 		err |= __put_user(from->si_stime, &to->si_stime);
207 		break;
208 	case __SI_RT: /* This is not generated by the kernel as of now. */
209 	case __SI_MESGQ: /* But this is */
210 		err |= __put_user(from->si_pid, &to->si_pid);
211 		err |= __put_user(from->si_uid, &to->si_uid);
212 		err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
213 		break;
214 	default: /* this is just in case for now ... */
215 		err |= __put_user(from->si_pid, &to->si_pid);
216 		err |= __put_user(from->si_uid, &to->si_uid);
217 		break;
218 	}
219 	return err;
220 }
221 
222 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
223 {
224 	memset(to, 0, sizeof *to);
225 
226 	if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
227 	    copy_from_user(to->_sifields._pad,
228 			   from->_sifields._pad, SI_PAD_SIZE))
229 		return -EFAULT;
230 
231 	return 0;
232 }
233 
234 /*
235  * VFP save/restore code.
236  */
237 static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
238 {
239 	struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
240 	compat_ulong_t magic = VFP_MAGIC;
241 	compat_ulong_t size = VFP_STORAGE_SIZE;
242 	compat_ulong_t fpscr, fpexc;
243 	int err = 0;
244 
245 	/*
246 	 * Save the hardware registers to the fpsimd_state structure.
247 	 * Note that this also saves V16-31, which aren't visible
248 	 * in AArch32.
249 	 */
250 	fpsimd_save_state(fpsimd);
251 
252 	/* Place structure header on the stack */
253 	__put_user_error(magic, &frame->magic, err);
254 	__put_user_error(size, &frame->size, err);
255 
256 	/*
257 	 * Now copy the FP registers. Since the registers are packed,
258 	 * we can copy the prefix we want (V0-V15) as it is.
259 	 * FIXME: Won't work if big endian.
260 	 */
261 	err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
262 			      sizeof(frame->ufp.fpregs));
263 
264 	/* Create an AArch32 fpscr from the fpsr and the fpcr. */
265 	fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
266 		(fpsimd->fpcr & VFP_FPSCR_CTRL_MASK);
267 	__put_user_error(fpscr, &frame->ufp.fpscr, err);
268 
269 	/*
270 	 * The exception register aren't available so we fake up a
271 	 * basic FPEXC and zero everything else.
272 	 */
273 	fpexc = (1 << 30);
274 	__put_user_error(fpexc, &frame->ufp_exc.fpexc, err);
275 	__put_user_error(0, &frame->ufp_exc.fpinst, err);
276 	__put_user_error(0, &frame->ufp_exc.fpinst2, err);
277 
278 	return err ? -EFAULT : 0;
279 }
280 
281 static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
282 {
283 	struct fpsimd_state fpsimd;
284 	compat_ulong_t magic = VFP_MAGIC;
285 	compat_ulong_t size = VFP_STORAGE_SIZE;
286 	compat_ulong_t fpscr;
287 	int err = 0;
288 
289 	__get_user_error(magic, &frame->magic, err);
290 	__get_user_error(size, &frame->size, err);
291 
292 	if (err)
293 		return -EFAULT;
294 	if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
295 		return -EINVAL;
296 
297 	/*
298 	 * Copy the FP registers into the start of the fpsimd_state.
299 	 * FIXME: Won't work if big endian.
300 	 */
301 	err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
302 				sizeof(frame->ufp.fpregs));
303 
304 	/* Extract the fpsr and the fpcr from the fpscr */
305 	__get_user_error(fpscr, &frame->ufp.fpscr, err);
306 	fpsimd.fpsr = fpscr & VFP_FPSCR_STAT_MASK;
307 	fpsimd.fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
308 
309 	/*
310 	 * We don't need to touch the exception register, so
311 	 * reload the hardware state.
312 	 */
313 	if (!err) {
314 		preempt_disable();
315 		fpsimd_load_state(&fpsimd);
316 		preempt_enable();
317 	}
318 
319 	return err ? -EFAULT : 0;
320 }
321 
322 static int compat_restore_sigframe(struct pt_regs *regs,
323 				   struct compat_sigframe __user *sf)
324 {
325 	int err;
326 	sigset_t set;
327 	struct compat_aux_sigframe __user *aux;
328 
329 	err = get_sigset_t(&set, &sf->uc.uc_sigmask);
330 	if (err == 0) {
331 		sigdelsetmask(&set, ~_BLOCKABLE);
332 		set_current_blocked(&set);
333 	}
334 
335 	__get_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
336 	__get_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err);
337 	__get_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err);
338 	__get_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err);
339 	__get_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err);
340 	__get_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err);
341 	__get_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err);
342 	__get_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err);
343 	__get_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err);
344 	__get_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err);
345 	__get_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err);
346 	__get_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err);
347 	__get_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err);
348 	__get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
349 	__get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
350 	__get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
351 	__get_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
352 
353 	/*
354 	 * Avoid compat_sys_sigreturn() restarting.
355 	 */
356 	regs->syscallno = ~0UL;
357 
358 	err |= !valid_user_regs(&regs->user_regs);
359 
360 	aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
361 	if (err == 0)
362 		err |= compat_restore_vfp_context(&aux->vfp);
363 
364 	return err;
365 }
366 
367 asmlinkage int compat_sys_sigreturn(struct pt_regs *regs)
368 {
369 	struct compat_sigframe __user *frame;
370 
371 	/* Always make any pending restarted system calls return -EINTR */
372 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
373 
374 	/*
375 	 * Since we stacked the signal on a 64-bit boundary,
376 	 * then 'sp' should be word aligned here.  If it's
377 	 * not, then the user is trying to mess with us.
378 	 */
379 	if (regs->compat_sp & 7)
380 		goto badframe;
381 
382 	frame = (struct compat_sigframe __user *)regs->compat_sp;
383 
384 	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
385 		goto badframe;
386 
387 	if (compat_restore_sigframe(regs, frame))
388 		goto badframe;
389 
390 	return regs->regs[0];
391 
392 badframe:
393 	if (show_unhandled_signals)
394 		pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
395 				    current->comm, task_pid_nr(current), __func__,
396 				    regs->pc, regs->sp);
397 	force_sig(SIGSEGV, current);
398 	return 0;
399 }
400 
401 asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs)
402 {
403 	struct compat_rt_sigframe __user *frame;
404 
405 	/* Always make any pending restarted system calls return -EINTR */
406 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
407 
408 	/*
409 	 * Since we stacked the signal on a 64-bit boundary,
410 	 * then 'sp' should be word aligned here.  If it's
411 	 * not, then the user is trying to mess with us.
412 	 */
413 	if (regs->compat_sp & 7)
414 		goto badframe;
415 
416 	frame = (struct compat_rt_sigframe __user *)regs->compat_sp;
417 
418 	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
419 		goto badframe;
420 
421 	if (compat_restore_sigframe(regs, &frame->sig))
422 		goto badframe;
423 
424 	if (compat_restore_altstack(&frame->sig.uc.uc_stack))
425 		goto badframe;
426 
427 	return regs->regs[0];
428 
429 badframe:
430 	if (show_unhandled_signals)
431 		pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
432 				    current->comm, task_pid_nr(current), __func__,
433 				    regs->pc, regs->sp);
434 	force_sig(SIGSEGV, current);
435 	return 0;
436 }
437 
438 static void __user *compat_get_sigframe(struct k_sigaction *ka,
439 					struct pt_regs *regs,
440 					int framesize)
441 {
442 	compat_ulong_t sp = regs->compat_sp;
443 	void __user *frame;
444 
445 	/*
446 	 * This is the X/Open sanctioned signal stack switching.
447 	 */
448 	if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
449 		sp = current->sas_ss_sp + current->sas_ss_size;
450 
451 	/*
452 	 * ATPCS B01 mandates 8-byte alignment
453 	 */
454 	frame = compat_ptr((compat_uptr_t)((sp - framesize) & ~7));
455 
456 	/*
457 	 * Check that we can actually write to the signal frame.
458 	 */
459 	if (!access_ok(VERIFY_WRITE, frame, framesize))
460 		frame = NULL;
461 
462 	return frame;
463 }
464 
465 static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
466 				compat_ulong_t __user *rc, void __user *frame,
467 				int usig)
468 {
469 	compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler);
470 	compat_ulong_t retcode;
471 	compat_ulong_t spsr = regs->pstate & ~PSR_f;
472 	int thumb;
473 
474 	/* Check if the handler is written for ARM or Thumb */
475 	thumb = handler & 1;
476 
477 	if (thumb) {
478 		spsr |= COMPAT_PSR_T_BIT;
479 		spsr &= ~COMPAT_PSR_IT_MASK;
480 	} else {
481 		spsr &= ~COMPAT_PSR_T_BIT;
482 	}
483 
484 	if (ka->sa.sa_flags & SA_RESTORER) {
485 		retcode = ptr_to_compat(ka->sa.sa_restorer);
486 	} else {
487 		/* Set up sigreturn pointer */
488 		unsigned int idx = thumb << 1;
489 
490 		if (ka->sa.sa_flags & SA_SIGINFO)
491 			idx += 3;
492 
493 		retcode = AARCH32_VECTORS_BASE +
494 			  AARCH32_KERN_SIGRET_CODE_OFFSET +
495 			  (idx << 2) + thumb;
496 	}
497 
498 	regs->regs[0]	= usig;
499 	regs->compat_sp	= ptr_to_compat(frame);
500 	regs->compat_lr	= retcode;
501 	regs->pc	= handler;
502 	regs->pstate	= spsr;
503 }
504 
505 static int compat_setup_sigframe(struct compat_sigframe __user *sf,
506 				 struct pt_regs *regs, sigset_t *set)
507 {
508 	struct compat_aux_sigframe __user *aux;
509 	int err = 0;
510 
511 	__put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
512 	__put_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err);
513 	__put_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err);
514 	__put_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err);
515 	__put_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err);
516 	__put_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err);
517 	__put_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err);
518 	__put_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err);
519 	__put_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err);
520 	__put_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err);
521 	__put_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err);
522 	__put_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err);
523 	__put_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err);
524 	__put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
525 	__put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
526 	__put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
527 	__put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
528 
529 	__put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
530 	__put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.error_code, err);
531 	__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
532 	__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
533 
534 	err |= put_sigset_t(&sf->uc.uc_sigmask, set);
535 
536 	aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
537 
538 	if (err == 0)
539 		err |= compat_preserve_vfp_context(&aux->vfp);
540 	__put_user_error(0, &aux->end_magic, err);
541 
542 	return err;
543 }
544 
545 /*
546  * 32-bit signal handling routines called from signal.c
547  */
548 int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
549 			  sigset_t *set, struct pt_regs *regs)
550 {
551 	struct compat_rt_sigframe __user *frame;
552 	int err = 0;
553 
554 	frame = compat_get_sigframe(ka, regs, sizeof(*frame));
555 
556 	if (!frame)
557 		return 1;
558 
559 	err |= copy_siginfo_to_user32(&frame->info, info);
560 
561 	__put_user_error(0, &frame->sig.uc.uc_flags, err);
562 	__put_user_error(0, &frame->sig.uc.uc_link, err);
563 
564 	err |= __compat_save_altstack(&frame->sig.uc.uc_stack, regs->compat_sp);
565 
566 	err |= compat_setup_sigframe(&frame->sig, regs, set);
567 
568 	if (err == 0) {
569 		compat_setup_return(regs, ka, frame->sig.retcode, frame, usig);
570 		regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info;
571 		regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc;
572 	}
573 
574 	return err;
575 }
576 
577 int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set,
578 		       struct pt_regs *regs)
579 {
580 	struct compat_sigframe __user *frame;
581 	int err = 0;
582 
583 	frame = compat_get_sigframe(ka, regs, sizeof(*frame));
584 
585 	if (!frame)
586 		return 1;
587 
588 	__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
589 
590 	err |= compat_setup_sigframe(frame, regs, set);
591 	if (err == 0)
592 		compat_setup_return(regs, ka, frame->retcode, frame, usig);
593 
594 	return err;
595 }
596 
597 void compat_setup_restart_syscall(struct pt_regs *regs)
598 {
599        regs->regs[7] = __NR_compat_restart_syscall;
600 }
601