1 /* 2 * linux/arch/arm/kernel/signal.c 3 * 4 * Copyright (C) 1995-2009 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/errno.h> 11 #include <linux/signal.h> 12 #include <linux/personality.h> 13 #include <linux/freezer.h> 14 #include <linux/uaccess.h> 15 #include <linux/tracehook.h> 16 17 #include <asm/elf.h> 18 #include <asm/cacheflush.h> 19 #include <asm/ucontext.h> 20 #include <asm/unistd.h> 21 #include <asm/vfp.h> 22 23 #include "signal.h" 24 25 /* 26 * For ARM syscalls, we encode the syscall number into the instruction. 27 */ 28 #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)) 29 #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)) 30 #define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE) 31 32 /* 33 * With EABI, the syscall number has to be loaded into r7. 34 */ 35 #define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE)) 36 #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) 37 38 /* 39 * For Thumb syscalls, we pass the syscall number via r7. We therefore 40 * need two 16-bit instructions. 41 */ 42 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) 43 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) 44 45 const unsigned long sigreturn_codes[7] = { 46 MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 47 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, 48 }; 49 50 /* 51 * Either we support OABI only, or we have EABI with the OABI 52 * compat layer enabled. In the later case we don't know if 53 * user space is EABI or not, and if not we must not clobber r7. 54 * Always using the OABI syscall solves that issue and works for 55 * all those cases. 56 */ 57 const unsigned long syscall_restart_code[2] = { 58 SWI_SYS_RESTART, /* swi __NR_restart_syscall */ 59 0xe49df004, /* ldr pc, [sp], #4 */ 60 }; 61 62 /* 63 * atomically swap in the new signal mask, and wait for a signal. 64 */ 65 asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) 66 { 67 sigset_t blocked; 68 siginitset(&blocked, mask); 69 return sigsuspend(&blocked); 70 } 71 72 asmlinkage int 73 sys_sigaction(int sig, const struct old_sigaction __user *act, 74 struct old_sigaction __user *oact) 75 { 76 struct k_sigaction new_ka, old_ka; 77 int ret; 78 79 if (act) { 80 old_sigset_t mask; 81 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 82 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 83 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || 84 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 85 __get_user(mask, &act->sa_mask)) 86 return -EFAULT; 87 siginitset(&new_ka.sa.sa_mask, mask); 88 } 89 90 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 91 92 if (!ret && oact) { 93 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 94 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 95 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || 96 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 97 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 98 return -EFAULT; 99 } 100 101 return ret; 102 } 103 104 #ifdef CONFIG_CRUNCH 105 static int preserve_crunch_context(struct crunch_sigframe __user *frame) 106 { 107 char kbuf[sizeof(*frame) + 8]; 108 struct crunch_sigframe *kframe; 109 110 /* the crunch context must be 64 bit aligned */ 111 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); 112 kframe->magic = CRUNCH_MAGIC; 113 kframe->size = CRUNCH_STORAGE_SIZE; 114 crunch_task_copy(current_thread_info(), &kframe->storage); 115 return __copy_to_user(frame, kframe, sizeof(*frame)); 116 } 117 118 static int restore_crunch_context(struct crunch_sigframe __user *frame) 119 { 120 char kbuf[sizeof(*frame) + 8]; 121 struct crunch_sigframe *kframe; 122 123 /* the crunch context must be 64 bit aligned */ 124 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); 125 if (__copy_from_user(kframe, frame, sizeof(*frame))) 126 return -1; 127 if (kframe->magic != CRUNCH_MAGIC || 128 kframe->size != CRUNCH_STORAGE_SIZE) 129 return -1; 130 crunch_task_restore(current_thread_info(), &kframe->storage); 131 return 0; 132 } 133 #endif 134 135 #ifdef CONFIG_IWMMXT 136 137 static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) 138 { 139 char kbuf[sizeof(*frame) + 8]; 140 struct iwmmxt_sigframe *kframe; 141 142 /* the iWMMXt context must be 64 bit aligned */ 143 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); 144 kframe->magic = IWMMXT_MAGIC; 145 kframe->size = IWMMXT_STORAGE_SIZE; 146 iwmmxt_task_copy(current_thread_info(), &kframe->storage); 147 return __copy_to_user(frame, kframe, sizeof(*frame)); 148 } 149 150 static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) 151 { 152 char kbuf[sizeof(*frame) + 8]; 153 struct iwmmxt_sigframe *kframe; 154 155 /* the iWMMXt context must be 64 bit aligned */ 156 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); 157 if (__copy_from_user(kframe, frame, sizeof(*frame))) 158 return -1; 159 if (kframe->magic != IWMMXT_MAGIC || 160 kframe->size != IWMMXT_STORAGE_SIZE) 161 return -1; 162 iwmmxt_task_restore(current_thread_info(), &kframe->storage); 163 return 0; 164 } 165 166 #endif 167 168 #ifdef CONFIG_VFP 169 170 static int preserve_vfp_context(struct vfp_sigframe __user *frame) 171 { 172 const unsigned long magic = VFP_MAGIC; 173 const unsigned long size = VFP_STORAGE_SIZE; 174 int err = 0; 175 176 __put_user_error(magic, &frame->magic, err); 177 __put_user_error(size, &frame->size, err); 178 179 if (err) 180 return -EFAULT; 181 182 return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); 183 } 184 185 static int restore_vfp_context(struct vfp_sigframe __user *frame) 186 { 187 unsigned long magic; 188 unsigned long size; 189 int err = 0; 190 191 __get_user_error(magic, &frame->magic, err); 192 __get_user_error(size, &frame->size, err); 193 194 if (err) 195 return -EFAULT; 196 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) 197 return -EINVAL; 198 199 return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc); 200 } 201 202 #endif 203 204 /* 205 * Do a signal return; undo the signal stack. These are aligned to 64-bit. 206 */ 207 struct sigframe { 208 struct ucontext uc; 209 unsigned long retcode[2]; 210 }; 211 212 struct rt_sigframe { 213 struct siginfo info; 214 struct sigframe sig; 215 }; 216 217 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) 218 { 219 struct aux_sigframe __user *aux; 220 sigset_t set; 221 int err; 222 223 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 224 if (err == 0) 225 set_current_blocked(&set); 226 227 __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); 228 __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); 229 __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); 230 __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); 231 __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); 232 __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); 233 __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); 234 __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); 235 __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); 236 __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); 237 __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); 238 __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); 239 __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); 240 __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); 241 __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); 242 __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); 243 __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); 244 245 err |= !valid_user_regs(regs); 246 247 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; 248 #ifdef CONFIG_CRUNCH 249 if (err == 0) 250 err |= restore_crunch_context(&aux->crunch); 251 #endif 252 #ifdef CONFIG_IWMMXT 253 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) 254 err |= restore_iwmmxt_context(&aux->iwmmxt); 255 #endif 256 #ifdef CONFIG_VFP 257 if (err == 0) 258 err |= restore_vfp_context(&aux->vfp); 259 #endif 260 261 return err; 262 } 263 264 asmlinkage int sys_sigreturn(struct pt_regs *regs) 265 { 266 struct sigframe __user *frame; 267 268 /* Always make any pending restarted system calls return -EINTR */ 269 current_thread_info()->restart_block.fn = do_no_restart_syscall; 270 271 /* 272 * Since we stacked the signal on a 64-bit boundary, 273 * then 'sp' should be word aligned here. If it's 274 * not, then the user is trying to mess with us. 275 */ 276 if (regs->ARM_sp & 7) 277 goto badframe; 278 279 frame = (struct sigframe __user *)regs->ARM_sp; 280 281 if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) 282 goto badframe; 283 284 if (restore_sigframe(regs, frame)) 285 goto badframe; 286 287 return regs->ARM_r0; 288 289 badframe: 290 force_sig(SIGSEGV, current); 291 return 0; 292 } 293 294 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) 295 { 296 struct rt_sigframe __user *frame; 297 298 /* Always make any pending restarted system calls return -EINTR */ 299 current_thread_info()->restart_block.fn = do_no_restart_syscall; 300 301 /* 302 * Since we stacked the signal on a 64-bit boundary, 303 * then 'sp' should be word aligned here. If it's 304 * not, then the user is trying to mess with us. 305 */ 306 if (regs->ARM_sp & 7) 307 goto badframe; 308 309 frame = (struct rt_sigframe __user *)regs->ARM_sp; 310 311 if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) 312 goto badframe; 313 314 if (restore_sigframe(regs, &frame->sig)) 315 goto badframe; 316 317 if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) 318 goto badframe; 319 320 return regs->ARM_r0; 321 322 badframe: 323 force_sig(SIGSEGV, current); 324 return 0; 325 } 326 327 static int 328 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) 329 { 330 struct aux_sigframe __user *aux; 331 int err = 0; 332 333 __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); 334 __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); 335 __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); 336 __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); 337 __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); 338 __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); 339 __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); 340 __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); 341 __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); 342 __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); 343 __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); 344 __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); 345 __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); 346 __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); 347 __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); 348 __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); 349 __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); 350 351 __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err); 352 __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err); 353 __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err); 354 __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); 355 356 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 357 358 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; 359 #ifdef CONFIG_CRUNCH 360 if (err == 0) 361 err |= preserve_crunch_context(&aux->crunch); 362 #endif 363 #ifdef CONFIG_IWMMXT 364 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) 365 err |= preserve_iwmmxt_context(&aux->iwmmxt); 366 #endif 367 #ifdef CONFIG_VFP 368 if (err == 0) 369 err |= preserve_vfp_context(&aux->vfp); 370 #endif 371 __put_user_error(0, &aux->end_magic, err); 372 373 return err; 374 } 375 376 static inline void __user * 377 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize) 378 { 379 unsigned long sp = regs->ARM_sp; 380 void __user *frame; 381 382 /* 383 * This is the X/Open sanctioned signal stack switching. 384 */ 385 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) 386 sp = current->sas_ss_sp + current->sas_ss_size; 387 388 /* 389 * ATPCS B01 mandates 8-byte alignment 390 */ 391 frame = (void __user *)((sp - framesize) & ~7); 392 393 /* 394 * Check that we can actually write to the signal frame. 395 */ 396 if (!access_ok(VERIFY_WRITE, frame, framesize)) 397 frame = NULL; 398 399 return frame; 400 } 401 402 static int 403 setup_return(struct pt_regs *regs, struct k_sigaction *ka, 404 unsigned long __user *rc, void __user *frame, int usig) 405 { 406 unsigned long handler = (unsigned long)ka->sa.sa_handler; 407 unsigned long retcode; 408 int thumb = 0; 409 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); 410 411 cpsr |= PSR_ENDSTATE; 412 413 /* 414 * Maybe we need to deliver a 32-bit signal to a 26-bit task. 415 */ 416 if (ka->sa.sa_flags & SA_THIRTYTWO) 417 cpsr = (cpsr & ~MODE_MASK) | USR_MODE; 418 419 #ifdef CONFIG_ARM_THUMB 420 if (elf_hwcap & HWCAP_THUMB) { 421 /* 422 * The LSB of the handler determines if we're going to 423 * be using THUMB or ARM mode for this signal handler. 424 */ 425 thumb = handler & 1; 426 427 if (thumb) { 428 cpsr |= PSR_T_BIT; 429 #if __LINUX_ARM_ARCH__ >= 7 430 /* clear the If-Then Thumb-2 execution state */ 431 cpsr &= ~PSR_IT_MASK; 432 #endif 433 } else 434 cpsr &= ~PSR_T_BIT; 435 } 436 #endif 437 438 if (ka->sa.sa_flags & SA_RESTORER) { 439 retcode = (unsigned long)ka->sa.sa_restorer; 440 } else { 441 unsigned int idx = thumb << 1; 442 443 if (ka->sa.sa_flags & SA_SIGINFO) 444 idx += 3; 445 446 if (__put_user(sigreturn_codes[idx], rc) || 447 __put_user(sigreturn_codes[idx+1], rc+1)) 448 return 1; 449 450 if (cpsr & MODE32_BIT) { 451 /* 452 * 32-bit code can use the new high-page 453 * signal return code support. 454 */ 455 retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; 456 } else { 457 /* 458 * Ensure that the instruction cache sees 459 * the return code written onto the stack. 460 */ 461 flush_icache_range((unsigned long)rc, 462 (unsigned long)(rc + 2)); 463 464 retcode = ((unsigned long)rc) + thumb; 465 } 466 } 467 468 regs->ARM_r0 = usig; 469 regs->ARM_sp = (unsigned long)frame; 470 regs->ARM_lr = retcode; 471 regs->ARM_pc = handler; 472 regs->ARM_cpsr = cpsr; 473 474 return 0; 475 } 476 477 static int 478 setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) 479 { 480 struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); 481 int err = 0; 482 483 if (!frame) 484 return 1; 485 486 /* 487 * Set uc.uc_flags to a value which sc.trap_no would never have. 488 */ 489 __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); 490 491 err |= setup_sigframe(frame, regs, set); 492 if (err == 0) 493 err = setup_return(regs, ka, frame->retcode, frame, usig); 494 495 return err; 496 } 497 498 static int 499 setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, 500 sigset_t *set, struct pt_regs *regs) 501 { 502 struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); 503 stack_t stack; 504 int err = 0; 505 506 if (!frame) 507 return 1; 508 509 err |= copy_siginfo_to_user(&frame->info, info); 510 511 __put_user_error(0, &frame->sig.uc.uc_flags, err); 512 __put_user_error(NULL, &frame->sig.uc.uc_link, err); 513 514 memset(&stack, 0, sizeof(stack)); 515 stack.ss_sp = (void __user *)current->sas_ss_sp; 516 stack.ss_flags = sas_ss_flags(regs->ARM_sp); 517 stack.ss_size = current->sas_ss_size; 518 err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack)); 519 520 err |= setup_sigframe(&frame->sig, regs, set); 521 if (err == 0) 522 err = setup_return(regs, ka, frame->sig.retcode, frame, usig); 523 524 if (err == 0) { 525 /* 526 * For realtime signals we must also set the second and third 527 * arguments for the signal handler. 528 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06 529 */ 530 regs->ARM_r1 = (unsigned long)&frame->info; 531 regs->ARM_r2 = (unsigned long)&frame->sig.uc; 532 } 533 534 return err; 535 } 536 537 /* 538 * OK, we're invoking a handler 539 */ 540 static void 541 handle_signal(unsigned long sig, struct k_sigaction *ka, 542 siginfo_t *info, struct pt_regs *regs) 543 { 544 struct thread_info *thread = current_thread_info(); 545 struct task_struct *tsk = current; 546 sigset_t *oldset = sigmask_to_save(); 547 int usig = sig; 548 int ret; 549 550 /* 551 * translate the signal 552 */ 553 if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) 554 usig = thread->exec_domain->signal_invmap[usig]; 555 556 /* 557 * Set up the stack frame 558 */ 559 if (ka->sa.sa_flags & SA_SIGINFO) 560 ret = setup_rt_frame(usig, ka, info, oldset, regs); 561 else 562 ret = setup_frame(usig, ka, oldset, regs); 563 564 /* 565 * Check that the resulting registers are actually sane. 566 */ 567 ret |= !valid_user_regs(regs); 568 569 if (ret != 0) { 570 force_sigsegv(sig, tsk); 571 return; 572 } 573 signal_delivered(sig, info, ka, regs, 0); 574 } 575 576 /* 577 * Note that 'init' is a special process: it doesn't get signals it doesn't 578 * want to handle. Thus you cannot kill init even with a SIGKILL even by 579 * mistake. 580 * 581 * Note that we go through the signals twice: once to check the signals that 582 * the kernel can handle, and then we build all the user-level signal handling 583 * stack-frames in one go after that. 584 */ 585 static void do_signal(struct pt_regs *regs, int syscall) 586 { 587 unsigned int retval = 0, continue_addr = 0, restart_addr = 0; 588 struct k_sigaction ka; 589 siginfo_t info; 590 int signr; 591 592 /* 593 * If we were from a system call, check for system call restarting... 594 */ 595 if (syscall) { 596 continue_addr = regs->ARM_pc; 597 restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4); 598 retval = regs->ARM_r0; 599 600 /* 601 * Prepare for system call restart. We do this here so that a 602 * debugger will see the already changed PSW. 603 */ 604 switch (retval) { 605 case -ERESTARTNOHAND: 606 case -ERESTARTSYS: 607 case -ERESTARTNOINTR: 608 regs->ARM_r0 = regs->ARM_ORIG_r0; 609 regs->ARM_pc = restart_addr; 610 break; 611 case -ERESTART_RESTARTBLOCK: 612 regs->ARM_r0 = -EINTR; 613 break; 614 } 615 } 616 617 /* 618 * Get the signal to deliver. When running under ptrace, at this 619 * point the debugger may change all our registers ... 620 */ 621 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 622 if (signr > 0) { 623 /* 624 * Depending on the signal settings we may need to revert the 625 * decision to restart the system call. But skip this if a 626 * debugger has chosen to restart at a different PC. 627 */ 628 if (regs->ARM_pc == restart_addr) { 629 if (retval == -ERESTARTNOHAND 630 || (retval == -ERESTARTSYS 631 && !(ka.sa.sa_flags & SA_RESTART))) { 632 regs->ARM_r0 = -EINTR; 633 regs->ARM_pc = continue_addr; 634 } 635 } 636 637 handle_signal(signr, &ka, &info, regs); 638 return; 639 } 640 641 if (syscall) { 642 /* 643 * Handle restarting a different system call. As above, 644 * if a debugger has chosen to restart at a different PC, 645 * ignore the restart. 646 */ 647 if (retval == -ERESTART_RESTARTBLOCK 648 && regs->ARM_pc == continue_addr) { 649 if (thumb_mode(regs)) { 650 regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; 651 regs->ARM_pc -= 2; 652 } else { 653 #if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT) 654 regs->ARM_r7 = __NR_restart_syscall; 655 regs->ARM_pc -= 4; 656 #else 657 u32 __user *usp; 658 659 regs->ARM_sp -= 4; 660 usp = (u32 __user *)regs->ARM_sp; 661 662 if (put_user(regs->ARM_pc, usp) == 0) { 663 regs->ARM_pc = KERN_RESTART_CODE; 664 } else { 665 regs->ARM_sp += 4; 666 force_sigsegv(0, current); 667 } 668 #endif 669 } 670 } 671 } 672 673 restore_saved_sigmask(); 674 } 675 676 asmlinkage void 677 do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) 678 { 679 if (thread_flags & _TIF_SIGPENDING) 680 do_signal(regs, syscall); 681 682 if (thread_flags & _TIF_NOTIFY_RESUME) { 683 clear_thread_flag(TIF_NOTIFY_RESUME); 684 tracehook_notify_resume(regs); 685 } 686 } 687