xref: /linux/arch/arm/kernel/signal.c (revision a8b3e6f10f08f66ae1072efd087b30966a3654f6)
1 /*
2  *  linux/arch/arm/kernel/signal.c
3  *
4  *  Copyright (C) 1995-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/config.h>
11 #include <linux/errno.h>
12 #include <linux/signal.h>
13 #include <linux/ptrace.h>
14 #include <linux/personality.h>
15 
16 #include <asm/cacheflush.h>
17 #include <asm/ucontext.h>
18 #include <asm/uaccess.h>
19 #include <asm/unistd.h>
20 
21 #include "ptrace.h"
22 #include "signal.h"
23 
24 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
25 
26 /*
27  * For ARM syscalls, we encode the syscall number into the instruction.
28  */
29 #define SWI_SYS_SIGRETURN	(0xef000000|(__NR_sigreturn))
30 #define SWI_SYS_RT_SIGRETURN	(0xef000000|(__NR_rt_sigreturn))
31 
32 /*
33  * For Thumb syscalls, we pass the syscall number via r7.  We therefore
34  * need two 16-bit instructions.
35  */
36 #define SWI_THUMB_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
37 #define SWI_THUMB_RT_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
38 
39 const unsigned long sigreturn_codes[4] = {
40 	SWI_SYS_SIGRETURN,	SWI_THUMB_SIGRETURN,
41 	SWI_SYS_RT_SIGRETURN,	SWI_THUMB_RT_SIGRETURN
42 };
43 
44 static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall);
45 
46 /*
47  * atomically swap in the new signal mask, and wait for a signal.
48  */
49 asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs)
50 {
51 	sigset_t saveset;
52 
53 	mask &= _BLOCKABLE;
54 	spin_lock_irq(&current->sighand->siglock);
55 	saveset = current->blocked;
56 	siginitset(&current->blocked, mask);
57 	recalc_sigpending();
58 	spin_unlock_irq(&current->sighand->siglock);
59 	regs->ARM_r0 = -EINTR;
60 
61 	while (1) {
62 		current->state = TASK_INTERRUPTIBLE;
63 		schedule();
64 		if (do_signal(&saveset, regs, 0))
65 			return regs->ARM_r0;
66 	}
67 }
68 
69 asmlinkage int
70 sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
71 {
72 	sigset_t saveset, newset;
73 
74 	/* XXX: Don't preclude handling different sized sigset_t's. */
75 	if (sigsetsize != sizeof(sigset_t))
76 		return -EINVAL;
77 
78 	if (copy_from_user(&newset, unewset, sizeof(newset)))
79 		return -EFAULT;
80 	sigdelsetmask(&newset, ~_BLOCKABLE);
81 
82 	spin_lock_irq(&current->sighand->siglock);
83 	saveset = current->blocked;
84 	current->blocked = newset;
85 	recalc_sigpending();
86 	spin_unlock_irq(&current->sighand->siglock);
87 	regs->ARM_r0 = -EINTR;
88 
89 	while (1) {
90 		current->state = TASK_INTERRUPTIBLE;
91 		schedule();
92 		if (do_signal(&saveset, regs, 0))
93 			return regs->ARM_r0;
94 	}
95 }
96 
97 asmlinkage int
98 sys_sigaction(int sig, const struct old_sigaction __user *act,
99 	      struct old_sigaction __user *oact)
100 {
101 	struct k_sigaction new_ka, old_ka;
102 	int ret;
103 
104 	if (act) {
105 		old_sigset_t mask;
106 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
107 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
108 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
109 			return -EFAULT;
110 		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
111 		__get_user(mask, &act->sa_mask);
112 		siginitset(&new_ka.sa.sa_mask, mask);
113 	}
114 
115 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
116 
117 	if (!ret && oact) {
118 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
119 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
120 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
121 			return -EFAULT;
122 		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
123 		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
124 	}
125 
126 	return ret;
127 }
128 
129 #ifdef CONFIG_IWMMXT
130 
131 /* iwmmxt_area is 0x98 bytes long, preceeded by 8 bytes of signature */
132 #define IWMMXT_STORAGE_SIZE	(0x98 + 8)
133 #define IWMMXT_MAGIC0		0x12ef842a
134 #define IWMMXT_MAGIC1		0x1c07ca71
135 
136 struct iwmmxt_sigframe {
137 	unsigned long	magic0;
138 	unsigned long	magic1;
139 	unsigned long	storage[0x98/4];
140 };
141 
142 static int page_present(struct mm_struct *mm, void __user *uptr, int wr)
143 {
144 	unsigned long addr = (unsigned long)uptr;
145 	pgd_t *pgd = pgd_offset(mm, addr);
146 	if (pgd_present(*pgd)) {
147 		pmd_t *pmd = pmd_offset(pgd, addr);
148 		if (pmd_present(*pmd)) {
149 			pte_t *pte = pte_offset_map(pmd, addr);
150 			return (pte_present(*pte) && (!wr || pte_write(*pte)));
151 		}
152 	}
153 	return 0;
154 }
155 
156 static int copy_locked(void __user *uptr, void *kptr, size_t size, int write,
157 		       void (*copyfn)(void *, void __user *))
158 {
159 	unsigned char v, __user *userptr = uptr;
160 	int err = 0;
161 
162 	do {
163 		struct mm_struct *mm;
164 
165 		if (write) {
166 			__put_user_error(0, userptr, err);
167 			__put_user_error(0, userptr + size - 1, err);
168 		} else {
169 			__get_user_error(v, userptr, err);
170 			__get_user_error(v, userptr + size - 1, err);
171 		}
172 
173 		if (err)
174 			break;
175 
176 		mm = current->mm;
177 		spin_lock(&mm->page_table_lock);
178 		if (page_present(mm, userptr, write) &&
179 		    page_present(mm, userptr + size - 1, write)) {
180 		    	copyfn(kptr, uptr);
181 		} else
182 			err = 1;
183 		spin_unlock(&mm->page_table_lock);
184 	} while (err);
185 
186 	return err;
187 }
188 
189 static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
190 {
191 	int err = 0;
192 
193 	/* the iWMMXt context must be 64 bit aligned */
194 	WARN_ON((unsigned long)frame & 7);
195 
196 	__put_user_error(IWMMXT_MAGIC0, &frame->magic0, err);
197 	__put_user_error(IWMMXT_MAGIC1, &frame->magic1, err);
198 
199 	/*
200 	 * iwmmxt_task_copy() doesn't check user permissions.
201 	 * Let's do a dummy write on the upper boundary to ensure
202 	 * access to user mem is OK all way up.
203 	 */
204 	err |= copy_locked(&frame->storage, current_thread_info(),
205 			   sizeof(frame->storage), 1, iwmmxt_task_copy);
206 	return err;
207 }
208 
209 static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
210 {
211 	unsigned long magic0, magic1;
212 	int err = 0;
213 
214 	/* the iWMMXt context is 64 bit aligned */
215 	WARN_ON((unsigned long)frame & 7);
216 
217 	/*
218 	 * Validate iWMMXt context signature.
219 	 * Also, iwmmxt_task_restore() doesn't check user permissions.
220 	 * Let's do a dummy write on the upper boundary to ensure
221 	 * access to user mem is OK all way up.
222 	 */
223 	__get_user_error(magic0, &frame->magic0, err);
224 	__get_user_error(magic1, &frame->magic1, err);
225 	if (!err && magic0 == IWMMXT_MAGIC0 && magic1 == IWMMXT_MAGIC1)
226 		err = copy_locked(&frame->storage, current_thread_info(),
227 				  sizeof(frame->storage), 0, iwmmxt_task_restore);
228 	return err;
229 }
230 
231 #endif
232 
233 /*
234  * Auxiliary signal frame.  This saves stuff like FP state.
235  * The layout of this structure is not part of the user ABI.
236  */
237 struct aux_sigframe {
238 #ifdef CONFIG_IWMMXT
239 	struct iwmmxt_sigframe	iwmmxt;
240 #endif
241 #ifdef CONFIG_VFP
242 	union vfp_state		vfp;
243 #endif
244 };
245 
246 /*
247  * Do a signal return; undo the signal stack.  These are aligned to 64-bit.
248  */
249 struct sigframe {
250 	struct sigcontext sc;
251 	unsigned long extramask[_NSIG_WORDS-1];
252 	unsigned long retcode;
253 	struct aux_sigframe aux __attribute__((aligned(8)));
254 };
255 
256 struct rt_sigframe {
257 	struct siginfo __user *pinfo;
258 	void __user *puc;
259 	struct siginfo info;
260 	struct ucontext uc;
261 	unsigned long retcode;
262 	struct aux_sigframe aux __attribute__((aligned(8)));
263 };
264 
265 static int
266 restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
267 		   struct aux_sigframe __user *aux)
268 {
269 	int err = 0;
270 
271 	__get_user_error(regs->ARM_r0, &sc->arm_r0, err);
272 	__get_user_error(regs->ARM_r1, &sc->arm_r1, err);
273 	__get_user_error(regs->ARM_r2, &sc->arm_r2, err);
274 	__get_user_error(regs->ARM_r3, &sc->arm_r3, err);
275 	__get_user_error(regs->ARM_r4, &sc->arm_r4, err);
276 	__get_user_error(regs->ARM_r5, &sc->arm_r5, err);
277 	__get_user_error(regs->ARM_r6, &sc->arm_r6, err);
278 	__get_user_error(regs->ARM_r7, &sc->arm_r7, err);
279 	__get_user_error(regs->ARM_r8, &sc->arm_r8, err);
280 	__get_user_error(regs->ARM_r9, &sc->arm_r9, err);
281 	__get_user_error(regs->ARM_r10, &sc->arm_r10, err);
282 	__get_user_error(regs->ARM_fp, &sc->arm_fp, err);
283 	__get_user_error(regs->ARM_ip, &sc->arm_ip, err);
284 	__get_user_error(regs->ARM_sp, &sc->arm_sp, err);
285 	__get_user_error(regs->ARM_lr, &sc->arm_lr, err);
286 	__get_user_error(regs->ARM_pc, &sc->arm_pc, err);
287 	__get_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err);
288 
289 	err |= !valid_user_regs(regs);
290 
291 #ifdef CONFIG_IWMMXT
292 	if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
293 		err |= restore_iwmmxt_context(&aux->iwmmxt);
294 #endif
295 #ifdef CONFIG_VFP
296 //	if (err == 0)
297 //		err |= vfp_restore_state(&aux->vfp);
298 #endif
299 
300 	return err;
301 }
302 
303 asmlinkage int sys_sigreturn(struct pt_regs *regs)
304 {
305 	struct sigframe __user *frame;
306 	sigset_t set;
307 
308 	/* Always make any pending restarted system calls return -EINTR */
309 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
310 
311 	/*
312 	 * Since we stacked the signal on a 64-bit boundary,
313 	 * then 'sp' should be word aligned here.  If it's
314 	 * not, then the user is trying to mess with us.
315 	 */
316 	if (regs->ARM_sp & 7)
317 		goto badframe;
318 
319 	frame = (struct sigframe __user *)regs->ARM_sp;
320 
321 	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
322 		goto badframe;
323 	if (__get_user(set.sig[0], &frame->sc.oldmask)
324 	    || (_NSIG_WORDS > 1
325 	        && __copy_from_user(&set.sig[1], &frame->extramask,
326 				    sizeof(frame->extramask))))
327 		goto badframe;
328 
329 	sigdelsetmask(&set, ~_BLOCKABLE);
330 	spin_lock_irq(&current->sighand->siglock);
331 	current->blocked = set;
332 	recalc_sigpending();
333 	spin_unlock_irq(&current->sighand->siglock);
334 
335 	if (restore_sigcontext(regs, &frame->sc, &frame->aux))
336 		goto badframe;
337 
338 	/* Send SIGTRAP if we're single-stepping */
339 	if (current->ptrace & PT_SINGLESTEP) {
340 		ptrace_cancel_bpt(current);
341 		send_sig(SIGTRAP, current, 1);
342 	}
343 
344 	return regs->ARM_r0;
345 
346 badframe:
347 	force_sig(SIGSEGV, current);
348 	return 0;
349 }
350 
351 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
352 {
353 	struct rt_sigframe __user *frame;
354 	sigset_t set;
355 
356 	/* Always make any pending restarted system calls return -EINTR */
357 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
358 
359 	/*
360 	 * Since we stacked the signal on a 64-bit boundary,
361 	 * then 'sp' should be word aligned here.  If it's
362 	 * not, then the user is trying to mess with us.
363 	 */
364 	if (regs->ARM_sp & 7)
365 		goto badframe;
366 
367 	frame = (struct rt_sigframe __user *)regs->ARM_sp;
368 
369 	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
370 		goto badframe;
371 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
372 		goto badframe;
373 
374 	sigdelsetmask(&set, ~_BLOCKABLE);
375 	spin_lock_irq(&current->sighand->siglock);
376 	current->blocked = set;
377 	recalc_sigpending();
378 	spin_unlock_irq(&current->sighand->siglock);
379 
380 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &frame->aux))
381 		goto badframe;
382 
383 	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
384 		goto badframe;
385 
386 	/* Send SIGTRAP if we're single-stepping */
387 	if (current->ptrace & PT_SINGLESTEP) {
388 		ptrace_cancel_bpt(current);
389 		send_sig(SIGTRAP, current, 1);
390 	}
391 
392 	return regs->ARM_r0;
393 
394 badframe:
395 	force_sig(SIGSEGV, current);
396 	return 0;
397 }
398 
399 static int
400 setup_sigcontext(struct sigcontext __user *sc, struct aux_sigframe __user *aux,
401 		 struct pt_regs *regs, unsigned long mask)
402 {
403 	int err = 0;
404 
405 	__put_user_error(regs->ARM_r0, &sc->arm_r0, err);
406 	__put_user_error(regs->ARM_r1, &sc->arm_r1, err);
407 	__put_user_error(regs->ARM_r2, &sc->arm_r2, err);
408 	__put_user_error(regs->ARM_r3, &sc->arm_r3, err);
409 	__put_user_error(regs->ARM_r4, &sc->arm_r4, err);
410 	__put_user_error(regs->ARM_r5, &sc->arm_r5, err);
411 	__put_user_error(regs->ARM_r6, &sc->arm_r6, err);
412 	__put_user_error(regs->ARM_r7, &sc->arm_r7, err);
413 	__put_user_error(regs->ARM_r8, &sc->arm_r8, err);
414 	__put_user_error(regs->ARM_r9, &sc->arm_r9, err);
415 	__put_user_error(regs->ARM_r10, &sc->arm_r10, err);
416 	__put_user_error(regs->ARM_fp, &sc->arm_fp, err);
417 	__put_user_error(regs->ARM_ip, &sc->arm_ip, err);
418 	__put_user_error(regs->ARM_sp, &sc->arm_sp, err);
419 	__put_user_error(regs->ARM_lr, &sc->arm_lr, err);
420 	__put_user_error(regs->ARM_pc, &sc->arm_pc, err);
421 	__put_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err);
422 
423 	__put_user_error(current->thread.trap_no, &sc->trap_no, err);
424 	__put_user_error(current->thread.error_code, &sc->error_code, err);
425 	__put_user_error(current->thread.address, &sc->fault_address, err);
426 	__put_user_error(mask, &sc->oldmask, err);
427 
428 #ifdef CONFIG_IWMMXT
429 	if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
430 		err |= preserve_iwmmxt_context(&aux->iwmmxt);
431 #endif
432 #ifdef CONFIG_VFP
433 //	if (err == 0)
434 //		err |= vfp_save_state(&aux->vfp);
435 #endif
436 
437 	return err;
438 }
439 
440 static inline void __user *
441 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize)
442 {
443 	unsigned long sp = regs->ARM_sp;
444 	void __user *frame;
445 
446 	/*
447 	 * This is the X/Open sanctioned signal stack switching.
448 	 */
449 	if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
450 		sp = current->sas_ss_sp + current->sas_ss_size;
451 
452 	/*
453 	 * ATPCS B01 mandates 8-byte alignment
454 	 */
455 	frame = (void __user *)((sp - framesize) & ~7);
456 
457 	/*
458 	 * Check that we can actually write to the signal frame.
459 	 */
460 	if (!access_ok(VERIFY_WRITE, frame, framesize))
461 		frame = NULL;
462 
463 	return frame;
464 }
465 
466 static int
467 setup_return(struct pt_regs *regs, struct k_sigaction *ka,
468 	     unsigned long __user *rc, void __user *frame, int usig)
469 {
470 	unsigned long handler = (unsigned long)ka->sa.sa_handler;
471 	unsigned long retcode;
472 	int thumb = 0;
473 	unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
474 
475 	/*
476 	 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
477 	 */
478 	if (ka->sa.sa_flags & SA_THIRTYTWO)
479 		cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
480 
481 #ifdef CONFIG_ARM_THUMB
482 	if (elf_hwcap & HWCAP_THUMB) {
483 		/*
484 		 * The LSB of the handler determines if we're going to
485 		 * be using THUMB or ARM mode for this signal handler.
486 		 */
487 		thumb = handler & 1;
488 
489 		if (thumb)
490 			cpsr |= PSR_T_BIT;
491 		else
492 			cpsr &= ~PSR_T_BIT;
493 	}
494 #endif
495 
496 	if (ka->sa.sa_flags & SA_RESTORER) {
497 		retcode = (unsigned long)ka->sa.sa_restorer;
498 	} else {
499 		unsigned int idx = thumb;
500 
501 		if (ka->sa.sa_flags & SA_SIGINFO)
502 			idx += 2;
503 
504 		if (__put_user(sigreturn_codes[idx], rc))
505 			return 1;
506 
507 		if (cpsr & MODE32_BIT) {
508 			/*
509 			 * 32-bit code can use the new high-page
510 			 * signal return code support.
511 			 */
512 			retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
513 		} else {
514 			/*
515 			 * Ensure that the instruction cache sees
516 			 * the return code written onto the stack.
517 			 */
518 			flush_icache_range((unsigned long)rc,
519 					   (unsigned long)(rc + 1));
520 
521 			retcode = ((unsigned long)rc) + thumb;
522 		}
523 	}
524 
525 	regs->ARM_r0 = usig;
526 	regs->ARM_sp = (unsigned long)frame;
527 	regs->ARM_lr = retcode;
528 	regs->ARM_pc = handler;
529 	regs->ARM_cpsr = cpsr;
530 
531 	return 0;
532 }
533 
534 static int
535 setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs)
536 {
537 	struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
538 	int err = 0;
539 
540 	if (!frame)
541 		return 1;
542 
543 	err |= setup_sigcontext(&frame->sc, &frame->aux, regs, set->sig[0]);
544 
545 	if (_NSIG_WORDS > 1) {
546 		err |= __copy_to_user(frame->extramask, &set->sig[1],
547 				      sizeof(frame->extramask));
548 	}
549 
550 	if (err == 0)
551 		err = setup_return(regs, ka, &frame->retcode, frame, usig);
552 
553 	return err;
554 }
555 
556 static int
557 setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
558 	       sigset_t *set, struct pt_regs *regs)
559 {
560 	struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
561 	stack_t stack;
562 	int err = 0;
563 
564 	if (!frame)
565 		return 1;
566 
567 	__put_user_error(&frame->info, &frame->pinfo, err);
568 	__put_user_error(&frame->uc, &frame->puc, err);
569 	err |= copy_siginfo_to_user(&frame->info, info);
570 
571 	__put_user_error(0, &frame->uc.uc_flags, err);
572 	__put_user_error(NULL, &frame->uc.uc_link, err);
573 
574 	memset(&stack, 0, sizeof(stack));
575 	stack.ss_sp = (void __user *)current->sas_ss_sp;
576 	stack.ss_flags = sas_ss_flags(regs->ARM_sp);
577 	stack.ss_size = current->sas_ss_size;
578 	err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack));
579 
580 	err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->aux,
581 				regs, set->sig[0]);
582 	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
583 
584 	if (err == 0)
585 		err = setup_return(regs, ka, &frame->retcode, frame, usig);
586 
587 	if (err == 0) {
588 		/*
589 		 * For realtime signals we must also set the second and third
590 		 * arguments for the signal handler.
591 		 *   -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
592 		 */
593 		regs->ARM_r1 = (unsigned long)&frame->info;
594 		regs->ARM_r2 = (unsigned long)&frame->uc;
595 	}
596 
597 	return err;
598 }
599 
600 static inline void restart_syscall(struct pt_regs *regs)
601 {
602 	regs->ARM_r0 = regs->ARM_ORIG_r0;
603 	regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
604 }
605 
606 /*
607  * OK, we're invoking a handler
608  */
609 static void
610 handle_signal(unsigned long sig, struct k_sigaction *ka,
611 	      siginfo_t *info, sigset_t *oldset,
612 	      struct pt_regs * regs, int syscall)
613 {
614 	struct thread_info *thread = current_thread_info();
615 	struct task_struct *tsk = current;
616 	int usig = sig;
617 	int ret;
618 
619 	/*
620 	 * If we were from a system call, check for system call restarting...
621 	 */
622 	if (syscall) {
623 		switch (regs->ARM_r0) {
624 		case -ERESTART_RESTARTBLOCK:
625 		case -ERESTARTNOHAND:
626 			regs->ARM_r0 = -EINTR;
627 			break;
628 		case -ERESTARTSYS:
629 			if (!(ka->sa.sa_flags & SA_RESTART)) {
630 				regs->ARM_r0 = -EINTR;
631 				break;
632 			}
633 			/* fallthrough */
634 		case -ERESTARTNOINTR:
635 			restart_syscall(regs);
636 		}
637 	}
638 
639 	/*
640 	 * translate the signal
641 	 */
642 	if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
643 		usig = thread->exec_domain->signal_invmap[usig];
644 
645 	/*
646 	 * Set up the stack frame
647 	 */
648 	if (ka->sa.sa_flags & SA_SIGINFO)
649 		ret = setup_rt_frame(usig, ka, info, oldset, regs);
650 	else
651 		ret = setup_frame(usig, ka, oldset, regs);
652 
653 	/*
654 	 * Check that the resulting registers are actually sane.
655 	 */
656 	ret |= !valid_user_regs(regs);
657 
658 	/*
659 	 * Block the signal if we were unsuccessful.
660 	 */
661 	if (ret != 0 || !(ka->sa.sa_flags & SA_NODEFER)) {
662 		spin_lock_irq(&tsk->sighand->siglock);
663 		sigorsets(&tsk->blocked, &tsk->blocked,
664 			  &ka->sa.sa_mask);
665 		sigaddset(&tsk->blocked, sig);
666 		recalc_sigpending();
667 		spin_unlock_irq(&tsk->sighand->siglock);
668 	}
669 
670 	if (ret == 0)
671 		return;
672 
673 	force_sigsegv(sig, tsk);
674 }
675 
676 /*
677  * Note that 'init' is a special process: it doesn't get signals it doesn't
678  * want to handle. Thus you cannot kill init even with a SIGKILL even by
679  * mistake.
680  *
681  * Note that we go through the signals twice: once to check the signals that
682  * the kernel can handle, and then we build all the user-level signal handling
683  * stack-frames in one go after that.
684  */
685 static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
686 {
687 	struct k_sigaction ka;
688 	siginfo_t info;
689 	int signr;
690 
691 	/*
692 	 * We want the common case to go fast, which
693 	 * is why we may in certain cases get here from
694 	 * kernel mode. Just return without doing anything
695 	 * if so.
696 	 */
697 	if (!user_mode(regs))
698 		return 0;
699 
700 	if (try_to_freeze())
701 		goto no_signal;
702 
703 	if (current->ptrace & PT_SINGLESTEP)
704 		ptrace_cancel_bpt(current);
705 
706 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
707 	if (signr > 0) {
708 		handle_signal(signr, &ka, &info, oldset, regs, syscall);
709 		if (current->ptrace & PT_SINGLESTEP)
710 			ptrace_set_bpt(current);
711 		return 1;
712 	}
713 
714  no_signal:
715 	/*
716 	 * No signal to deliver to the process - restart the syscall.
717 	 */
718 	if (syscall) {
719 		if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
720 			if (thumb_mode(regs)) {
721 				regs->ARM_r7 = __NR_restart_syscall;
722 				regs->ARM_pc -= 2;
723 			} else {
724 				u32 __user *usp;
725 
726 				regs->ARM_sp -= 12;
727 				usp = (u32 __user *)regs->ARM_sp;
728 
729 				put_user(regs->ARM_pc, &usp[0]);
730 				/* swi __NR_restart_syscall */
731 				put_user(0xef000000 | __NR_restart_syscall, &usp[1]);
732 				/* ldr	pc, [sp], #12 */
733 				put_user(0xe49df00c, &usp[2]);
734 
735 				flush_icache_range((unsigned long)usp,
736 						   (unsigned long)(usp + 3));
737 
738 				regs->ARM_pc = regs->ARM_sp + 4;
739 			}
740 		}
741 		if (regs->ARM_r0 == -ERESTARTNOHAND ||
742 		    regs->ARM_r0 == -ERESTARTSYS ||
743 		    regs->ARM_r0 == -ERESTARTNOINTR) {
744 			restart_syscall(regs);
745 		}
746 	}
747 	if (current->ptrace & PT_SINGLESTEP)
748 		ptrace_set_bpt(current);
749 	return 0;
750 }
751 
752 asmlinkage void
753 do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
754 {
755 	if (thread_flags & _TIF_SIGPENDING)
756 		do_signal(&current->blocked, regs, syscall);
757 }
758