xref: /linux/arch/arm/kernel/signal.c (revision 4413e16d9d21673bb5048a2e542f1aaa00015c2e)
1 /*
2  *  linux/arch/arm/kernel/signal.c
3  *
4  *  Copyright (C) 1995-2009 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/errno.h>
11 #include <linux/signal.h>
12 #include <linux/personality.h>
13 #include <linux/freezer.h>
14 #include <linux/uaccess.h>
15 #include <linux/tracehook.h>
16 
17 #include <asm/elf.h>
18 #include <asm/cacheflush.h>
19 #include <asm/ucontext.h>
20 #include <asm/unistd.h>
21 #include <asm/vfp.h>
22 
23 #include "signal.h"
24 
25 /*
26  * For ARM syscalls, we encode the syscall number into the instruction.
27  */
28 #define SWI_SYS_SIGRETURN	(0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
29 #define SWI_SYS_RT_SIGRETURN	(0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
30 
31 /*
32  * With EABI, the syscall number has to be loaded into r7.
33  */
34 #define MOV_R7_NR_SIGRETURN	(0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
35 #define MOV_R7_NR_RT_SIGRETURN	(0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
36 
37 /*
38  * For Thumb syscalls, we pass the syscall number via r7.  We therefore
39  * need two 16-bit instructions.
40  */
41 #define SWI_THUMB_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
42 #define SWI_THUMB_RT_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
43 
44 const unsigned long sigreturn_codes[7] = {
45 	MOV_R7_NR_SIGRETURN,    SWI_SYS_SIGRETURN,    SWI_THUMB_SIGRETURN,
46 	MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
47 };
48 
49 /*
50  * atomically swap in the new signal mask, and wait for a signal.
51  */
52 asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
53 {
54 	sigset_t blocked;
55 	siginitset(&blocked, mask);
56 	return sigsuspend(&blocked);
57 }
58 
59 asmlinkage int
60 sys_sigaction(int sig, const struct old_sigaction __user *act,
61 	      struct old_sigaction __user *oact)
62 {
63 	struct k_sigaction new_ka, old_ka;
64 	int ret;
65 
66 	if (act) {
67 		old_sigset_t mask;
68 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
69 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
70 		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
71 		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
72 		    __get_user(mask, &act->sa_mask))
73 			return -EFAULT;
74 		siginitset(&new_ka.sa.sa_mask, mask);
75 	}
76 
77 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
78 
79 	if (!ret && oact) {
80 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
81 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
82 		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
83 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
84 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
85 			return -EFAULT;
86 	}
87 
88 	return ret;
89 }
90 
91 #ifdef CONFIG_CRUNCH
92 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
93 {
94 	char kbuf[sizeof(*frame) + 8];
95 	struct crunch_sigframe *kframe;
96 
97 	/* the crunch context must be 64 bit aligned */
98 	kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
99 	kframe->magic = CRUNCH_MAGIC;
100 	kframe->size = CRUNCH_STORAGE_SIZE;
101 	crunch_task_copy(current_thread_info(), &kframe->storage);
102 	return __copy_to_user(frame, kframe, sizeof(*frame));
103 }
104 
105 static int restore_crunch_context(struct crunch_sigframe __user *frame)
106 {
107 	char kbuf[sizeof(*frame) + 8];
108 	struct crunch_sigframe *kframe;
109 
110 	/* the crunch context must be 64 bit aligned */
111 	kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
112 	if (__copy_from_user(kframe, frame, sizeof(*frame)))
113 		return -1;
114 	if (kframe->magic != CRUNCH_MAGIC ||
115 	    kframe->size != CRUNCH_STORAGE_SIZE)
116 		return -1;
117 	crunch_task_restore(current_thread_info(), &kframe->storage);
118 	return 0;
119 }
120 #endif
121 
122 #ifdef CONFIG_IWMMXT
123 
124 static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
125 {
126 	char kbuf[sizeof(*frame) + 8];
127 	struct iwmmxt_sigframe *kframe;
128 
129 	/* the iWMMXt context must be 64 bit aligned */
130 	kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
131 	kframe->magic = IWMMXT_MAGIC;
132 	kframe->size = IWMMXT_STORAGE_SIZE;
133 	iwmmxt_task_copy(current_thread_info(), &kframe->storage);
134 	return __copy_to_user(frame, kframe, sizeof(*frame));
135 }
136 
137 static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
138 {
139 	char kbuf[sizeof(*frame) + 8];
140 	struct iwmmxt_sigframe *kframe;
141 
142 	/* the iWMMXt context must be 64 bit aligned */
143 	kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
144 	if (__copy_from_user(kframe, frame, sizeof(*frame)))
145 		return -1;
146 	if (kframe->magic != IWMMXT_MAGIC ||
147 	    kframe->size != IWMMXT_STORAGE_SIZE)
148 		return -1;
149 	iwmmxt_task_restore(current_thread_info(), &kframe->storage);
150 	return 0;
151 }
152 
153 #endif
154 
155 #ifdef CONFIG_VFP
156 
157 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
158 {
159 	const unsigned long magic = VFP_MAGIC;
160 	const unsigned long size = VFP_STORAGE_SIZE;
161 	int err = 0;
162 
163 	__put_user_error(magic, &frame->magic, err);
164 	__put_user_error(size, &frame->size, err);
165 
166 	if (err)
167 		return -EFAULT;
168 
169 	return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
170 }
171 
172 static int restore_vfp_context(struct vfp_sigframe __user *frame)
173 {
174 	unsigned long magic;
175 	unsigned long size;
176 	int err = 0;
177 
178 	__get_user_error(magic, &frame->magic, err);
179 	__get_user_error(size, &frame->size, err);
180 
181 	if (err)
182 		return -EFAULT;
183 	if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
184 		return -EINVAL;
185 
186 	return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
187 }
188 
189 #endif
190 
191 /*
192  * Do a signal return; undo the signal stack.  These are aligned to 64-bit.
193  */
194 struct sigframe {
195 	struct ucontext uc;
196 	unsigned long retcode[2];
197 };
198 
199 struct rt_sigframe {
200 	struct siginfo info;
201 	struct sigframe sig;
202 };
203 
204 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
205 {
206 	struct aux_sigframe __user *aux;
207 	sigset_t set;
208 	int err;
209 
210 	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
211 	if (err == 0)
212 		set_current_blocked(&set);
213 
214 	__get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
215 	__get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
216 	__get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
217 	__get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
218 	__get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
219 	__get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
220 	__get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
221 	__get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
222 	__get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
223 	__get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
224 	__get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
225 	__get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
226 	__get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
227 	__get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
228 	__get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
229 	__get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
230 	__get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
231 
232 	err |= !valid_user_regs(regs);
233 
234 	aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
235 #ifdef CONFIG_CRUNCH
236 	if (err == 0)
237 		err |= restore_crunch_context(&aux->crunch);
238 #endif
239 #ifdef CONFIG_IWMMXT
240 	if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
241 		err |= restore_iwmmxt_context(&aux->iwmmxt);
242 #endif
243 #ifdef CONFIG_VFP
244 	if (err == 0)
245 		err |= restore_vfp_context(&aux->vfp);
246 #endif
247 
248 	return err;
249 }
250 
251 asmlinkage int sys_sigreturn(struct pt_regs *regs)
252 {
253 	struct sigframe __user *frame;
254 
255 	/* Always make any pending restarted system calls return -EINTR */
256 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
257 
258 	/*
259 	 * Since we stacked the signal on a 64-bit boundary,
260 	 * then 'sp' should be word aligned here.  If it's
261 	 * not, then the user is trying to mess with us.
262 	 */
263 	if (regs->ARM_sp & 7)
264 		goto badframe;
265 
266 	frame = (struct sigframe __user *)regs->ARM_sp;
267 
268 	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
269 		goto badframe;
270 
271 	if (restore_sigframe(regs, frame))
272 		goto badframe;
273 
274 	return regs->ARM_r0;
275 
276 badframe:
277 	force_sig(SIGSEGV, current);
278 	return 0;
279 }
280 
281 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
282 {
283 	struct rt_sigframe __user *frame;
284 
285 	/* Always make any pending restarted system calls return -EINTR */
286 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
287 
288 	/*
289 	 * Since we stacked the signal on a 64-bit boundary,
290 	 * then 'sp' should be word aligned here.  If it's
291 	 * not, then the user is trying to mess with us.
292 	 */
293 	if (regs->ARM_sp & 7)
294 		goto badframe;
295 
296 	frame = (struct rt_sigframe __user *)regs->ARM_sp;
297 
298 	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
299 		goto badframe;
300 
301 	if (restore_sigframe(regs, &frame->sig))
302 		goto badframe;
303 
304 	if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
305 		goto badframe;
306 
307 	return regs->ARM_r0;
308 
309 badframe:
310 	force_sig(SIGSEGV, current);
311 	return 0;
312 }
313 
314 static int
315 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
316 {
317 	struct aux_sigframe __user *aux;
318 	int err = 0;
319 
320 	__put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
321 	__put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
322 	__put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
323 	__put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
324 	__put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
325 	__put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
326 	__put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
327 	__put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
328 	__put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
329 	__put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
330 	__put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
331 	__put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
332 	__put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
333 	__put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
334 	__put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
335 	__put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
336 	__put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
337 
338 	__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
339 	__put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
340 	__put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
341 	__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
342 
343 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
344 
345 	aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
346 #ifdef CONFIG_CRUNCH
347 	if (err == 0)
348 		err |= preserve_crunch_context(&aux->crunch);
349 #endif
350 #ifdef CONFIG_IWMMXT
351 	if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
352 		err |= preserve_iwmmxt_context(&aux->iwmmxt);
353 #endif
354 #ifdef CONFIG_VFP
355 	if (err == 0)
356 		err |= preserve_vfp_context(&aux->vfp);
357 #endif
358 	__put_user_error(0, &aux->end_magic, err);
359 
360 	return err;
361 }
362 
363 static inline void __user *
364 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize)
365 {
366 	unsigned long sp = regs->ARM_sp;
367 	void __user *frame;
368 
369 	/*
370 	 * This is the X/Open sanctioned signal stack switching.
371 	 */
372 	if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
373 		sp = current->sas_ss_sp + current->sas_ss_size;
374 
375 	/*
376 	 * ATPCS B01 mandates 8-byte alignment
377 	 */
378 	frame = (void __user *)((sp - framesize) & ~7);
379 
380 	/*
381 	 * Check that we can actually write to the signal frame.
382 	 */
383 	if (!access_ok(VERIFY_WRITE, frame, framesize))
384 		frame = NULL;
385 
386 	return frame;
387 }
388 
389 static int
390 setup_return(struct pt_regs *regs, struct k_sigaction *ka,
391 	     unsigned long __user *rc, void __user *frame, int usig)
392 {
393 	unsigned long handler = (unsigned long)ka->sa.sa_handler;
394 	unsigned long retcode;
395 	int thumb = 0;
396 	unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
397 
398 	cpsr |= PSR_ENDSTATE;
399 
400 	/*
401 	 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
402 	 */
403 	if (ka->sa.sa_flags & SA_THIRTYTWO)
404 		cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
405 
406 #ifdef CONFIG_ARM_THUMB
407 	if (elf_hwcap & HWCAP_THUMB) {
408 		/*
409 		 * The LSB of the handler determines if we're going to
410 		 * be using THUMB or ARM mode for this signal handler.
411 		 */
412 		thumb = handler & 1;
413 
414 		if (thumb) {
415 			cpsr |= PSR_T_BIT;
416 #if __LINUX_ARM_ARCH__ >= 7
417 			/* clear the If-Then Thumb-2 execution state */
418 			cpsr &= ~PSR_IT_MASK;
419 #endif
420 		} else
421 			cpsr &= ~PSR_T_BIT;
422 	}
423 #endif
424 
425 	if (ka->sa.sa_flags & SA_RESTORER) {
426 		retcode = (unsigned long)ka->sa.sa_restorer;
427 	} else {
428 		unsigned int idx = thumb << 1;
429 
430 		if (ka->sa.sa_flags & SA_SIGINFO)
431 			idx += 3;
432 
433 		if (__put_user(sigreturn_codes[idx],   rc) ||
434 		    __put_user(sigreturn_codes[idx+1], rc+1))
435 			return 1;
436 
437 		if (cpsr & MODE32_BIT) {
438 			/*
439 			 * 32-bit code can use the new high-page
440 			 * signal return code support.
441 			 */
442 			retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
443 		} else {
444 			/*
445 			 * Ensure that the instruction cache sees
446 			 * the return code written onto the stack.
447 			 */
448 			flush_icache_range((unsigned long)rc,
449 					   (unsigned long)(rc + 2));
450 
451 			retcode = ((unsigned long)rc) + thumb;
452 		}
453 	}
454 
455 	regs->ARM_r0 = usig;
456 	regs->ARM_sp = (unsigned long)frame;
457 	regs->ARM_lr = retcode;
458 	regs->ARM_pc = handler;
459 	regs->ARM_cpsr = cpsr;
460 
461 	return 0;
462 }
463 
464 static int
465 setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs)
466 {
467 	struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
468 	int err = 0;
469 
470 	if (!frame)
471 		return 1;
472 
473 	/*
474 	 * Set uc.uc_flags to a value which sc.trap_no would never have.
475 	 */
476 	__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
477 
478 	err |= setup_sigframe(frame, regs, set);
479 	if (err == 0)
480 		err = setup_return(regs, ka, frame->retcode, frame, usig);
481 
482 	return err;
483 }
484 
485 static int
486 setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
487 	       sigset_t *set, struct pt_regs *regs)
488 {
489 	struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
490 	stack_t stack;
491 	int err = 0;
492 
493 	if (!frame)
494 		return 1;
495 
496 	err |= copy_siginfo_to_user(&frame->info, info);
497 
498 	__put_user_error(0, &frame->sig.uc.uc_flags, err);
499 	__put_user_error(NULL, &frame->sig.uc.uc_link, err);
500 
501 	memset(&stack, 0, sizeof(stack));
502 	stack.ss_sp = (void __user *)current->sas_ss_sp;
503 	stack.ss_flags = sas_ss_flags(regs->ARM_sp);
504 	stack.ss_size = current->sas_ss_size;
505 	err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack));
506 
507 	err |= setup_sigframe(&frame->sig, regs, set);
508 	if (err == 0)
509 		err = setup_return(regs, ka, frame->sig.retcode, frame, usig);
510 
511 	if (err == 0) {
512 		/*
513 		 * For realtime signals we must also set the second and third
514 		 * arguments for the signal handler.
515 		 *   -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
516 		 */
517 		regs->ARM_r1 = (unsigned long)&frame->info;
518 		regs->ARM_r2 = (unsigned long)&frame->sig.uc;
519 	}
520 
521 	return err;
522 }
523 
524 /*
525  * OK, we're invoking a handler
526  */
527 static void
528 handle_signal(unsigned long sig, struct k_sigaction *ka,
529 	      siginfo_t *info, struct pt_regs *regs)
530 {
531 	struct thread_info *thread = current_thread_info();
532 	struct task_struct *tsk = current;
533 	sigset_t *oldset = sigmask_to_save();
534 	int usig = sig;
535 	int ret;
536 
537 	/*
538 	 * translate the signal
539 	 */
540 	if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
541 		usig = thread->exec_domain->signal_invmap[usig];
542 
543 	/*
544 	 * Set up the stack frame
545 	 */
546 	if (ka->sa.sa_flags & SA_SIGINFO)
547 		ret = setup_rt_frame(usig, ka, info, oldset, regs);
548 	else
549 		ret = setup_frame(usig, ka, oldset, regs);
550 
551 	/*
552 	 * Check that the resulting registers are actually sane.
553 	 */
554 	ret |= !valid_user_regs(regs);
555 
556 	if (ret != 0) {
557 		force_sigsegv(sig, tsk);
558 		return;
559 	}
560 	signal_delivered(sig, info, ka, regs, 0);
561 }
562 
563 /*
564  * Note that 'init' is a special process: it doesn't get signals it doesn't
565  * want to handle. Thus you cannot kill init even with a SIGKILL even by
566  * mistake.
567  *
568  * Note that we go through the signals twice: once to check the signals that
569  * the kernel can handle, and then we build all the user-level signal handling
570  * stack-frames in one go after that.
571  */
572 static int do_signal(struct pt_regs *regs, int syscall)
573 {
574 	unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
575 	struct k_sigaction ka;
576 	siginfo_t info;
577 	int signr;
578 	int restart = 0;
579 
580 	/*
581 	 * If we were from a system call, check for system call restarting...
582 	 */
583 	if (syscall) {
584 		continue_addr = regs->ARM_pc;
585 		restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
586 		retval = regs->ARM_r0;
587 
588 		/*
589 		 * Prepare for system call restart.  We do this here so that a
590 		 * debugger will see the already changed PSW.
591 		 */
592 		switch (retval) {
593 		case -ERESTART_RESTARTBLOCK:
594 			restart -= 2;
595 		case -ERESTARTNOHAND:
596 		case -ERESTARTSYS:
597 		case -ERESTARTNOINTR:
598 			restart++;
599 			regs->ARM_r0 = regs->ARM_ORIG_r0;
600 			regs->ARM_pc = restart_addr;
601 			break;
602 		}
603 	}
604 
605 	/*
606 	 * Get the signal to deliver.  When running under ptrace, at this
607 	 * point the debugger may change all our registers ...
608 	 */
609 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
610 	/*
611 	 * Depending on the signal settings we may need to revert the
612 	 * decision to restart the system call.  But skip this if a
613 	 * debugger has chosen to restart at a different PC.
614 	 */
615 	if (regs->ARM_pc != restart_addr)
616 		restart = 0;
617 	if (signr > 0) {
618 		if (unlikely(restart)) {
619 			if (retval == -ERESTARTNOHAND ||
620 			    retval == -ERESTART_RESTARTBLOCK
621 			    || (retval == -ERESTARTSYS
622 				&& !(ka.sa.sa_flags & SA_RESTART))) {
623 				regs->ARM_r0 = -EINTR;
624 				regs->ARM_pc = continue_addr;
625 			}
626 		}
627 
628 		handle_signal(signr, &ka, &info, regs);
629 		return 0;
630 	}
631 
632 	restore_saved_sigmask();
633 	if (unlikely(restart))
634 		regs->ARM_pc = continue_addr;
635 	return restart;
636 }
637 
638 asmlinkage int
639 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
640 {
641 	do {
642 		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
643 			schedule();
644 		} else {
645 			if (unlikely(!user_mode(regs)))
646 				return 0;
647 			local_irq_enable();
648 			if (thread_flags & _TIF_SIGPENDING) {
649 				int restart = do_signal(regs, syscall);
650 				if (unlikely(restart)) {
651 					/*
652 					 * Restart without handlers.
653 					 * Deal with it without leaving
654 					 * the kernel space.
655 					 */
656 					return restart;
657 				}
658 				syscall = 0;
659 			} else {
660 				clear_thread_flag(TIF_NOTIFY_RESUME);
661 				tracehook_notify_resume(regs);
662 			}
663 		}
664 		local_irq_disable();
665 		thread_flags = current_thread_info()->flags;
666 	} while (thread_flags & _TIF_WORK_MASK);
667 	return 0;
668 }
669