1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1991, 1992 Linus Torvalds 7 * Copyright (C) 1994 - 2000 Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 */ 10 #include <linux/cache.h> 11 #include <linux/irqflags.h> 12 #include <linux/sched.h> 13 #include <linux/mm.h> 14 #include <linux/personality.h> 15 #include <linux/smp.h> 16 #include <linux/kernel.h> 17 #include <linux/signal.h> 18 #include <linux/errno.h> 19 #include <linux/wait.h> 20 #include <linux/ptrace.h> 21 #include <linux/unistd.h> 22 #include <linux/compiler.h> 23 #include <linux/syscalls.h> 24 #include <linux/uaccess.h> 25 #include <linux/tracehook.h> 26 27 #include <asm/abi.h> 28 #include <asm/asm.h> 29 #include <linux/bitops.h> 30 #include <asm/cacheflush.h> 31 #include <asm/fpu.h> 32 #include <asm/sim.h> 33 #include <asm/ucontext.h> 34 #include <asm/cpu-features.h> 35 #include <asm/war.h> 36 #include <asm/vdso.h> 37 #include <asm/dsp.h> 38 39 #include "signal-common.h" 40 41 static int (*save_fp_context)(struct sigcontext __user *sc); 42 static int (*restore_fp_context)(struct sigcontext __user *sc); 43 44 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 45 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 46 47 extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); 48 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); 49 50 struct sigframe { 51 u32 sf_ass[4]; /* argument save space for o32 */ 52 u32 sf_pad[2]; /* Was: signal trampoline */ 53 struct sigcontext sf_sc; 54 sigset_t sf_mask; 55 }; 56 57 struct rt_sigframe { 58 u32 rs_ass[4]; /* argument save space for o32 */ 59 u32 rs_pad[2]; /* Was: signal trampoline */ 60 struct siginfo rs_info; 61 struct ucontext rs_uc; 62 }; 63 64 /* 65 * Helper routines 66 */ 67 static int protected_save_fp_context(struct sigcontext __user *sc) 68 { 69 int err; 70 while (1) { 71 lock_fpu_owner(); 72 own_fpu_inatomic(1); 73 err = save_fp_context(sc); /* this might fail */ 74 unlock_fpu_owner(); 75 if (likely(!err)) 76 break; 77 /* touch the sigcontext and try again */ 78 err = __put_user(0, &sc->sc_fpregs[0]) | 79 __put_user(0, &sc->sc_fpregs[31]) | 80 __put_user(0, &sc->sc_fpc_csr); 81 if (err) 82 break; /* really bad sigcontext */ 83 } 84 return err; 85 } 86 87 static int protected_restore_fp_context(struct sigcontext __user *sc) 88 { 89 int err, tmp __maybe_unused; 90 while (1) { 91 lock_fpu_owner(); 92 own_fpu_inatomic(0); 93 err = restore_fp_context(sc); /* this might fail */ 94 unlock_fpu_owner(); 95 if (likely(!err)) 96 break; 97 /* touch the sigcontext and try again */ 98 err = __get_user(tmp, &sc->sc_fpregs[0]) | 99 __get_user(tmp, &sc->sc_fpregs[31]) | 100 __get_user(tmp, &sc->sc_fpc_csr); 101 if (err) 102 break; /* really bad sigcontext */ 103 } 104 return err; 105 } 106 107 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 108 { 109 int err = 0; 110 int i; 111 unsigned int used_math; 112 113 err |= __put_user(regs->cp0_epc, &sc->sc_pc); 114 115 err |= __put_user(0, &sc->sc_regs[0]); 116 for (i = 1; i < 32; i++) 117 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); 118 119 #ifdef CONFIG_CPU_HAS_SMARTMIPS 120 err |= __put_user(regs->acx, &sc->sc_acx); 121 #endif 122 err |= __put_user(regs->hi, &sc->sc_mdhi); 123 err |= __put_user(regs->lo, &sc->sc_mdlo); 124 if (cpu_has_dsp) { 125 err |= __put_user(mfhi1(), &sc->sc_hi1); 126 err |= __put_user(mflo1(), &sc->sc_lo1); 127 err |= __put_user(mfhi2(), &sc->sc_hi2); 128 err |= __put_user(mflo2(), &sc->sc_lo2); 129 err |= __put_user(mfhi3(), &sc->sc_hi3); 130 err |= __put_user(mflo3(), &sc->sc_lo3); 131 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 132 } 133 134 used_math = !!used_math(); 135 err |= __put_user(used_math, &sc->sc_used_math); 136 137 if (used_math) { 138 /* 139 * Save FPU state to signal context. Signal handler 140 * will "inherit" current FPU state. 141 */ 142 err |= protected_save_fp_context(sc); 143 } 144 return err; 145 } 146 147 int fpcsr_pending(unsigned int __user *fpcsr) 148 { 149 int err, sig = 0; 150 unsigned int csr, enabled; 151 152 err = __get_user(csr, fpcsr); 153 enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); 154 /* 155 * If the signal handler set some FPU exceptions, clear it and 156 * send SIGFPE. 157 */ 158 if (csr & enabled) { 159 csr &= ~enabled; 160 err |= __put_user(csr, fpcsr); 161 sig = SIGFPE; 162 } 163 return err ?: sig; 164 } 165 166 static int 167 check_and_restore_fp_context(struct sigcontext __user *sc) 168 { 169 int err, sig; 170 171 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 172 if (err > 0) 173 err = 0; 174 err |= protected_restore_fp_context(sc); 175 return err ?: sig; 176 } 177 178 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 179 { 180 unsigned int used_math; 181 unsigned long treg; 182 int err = 0; 183 int i; 184 185 /* Always make any pending restarted system calls return -EINTR */ 186 current_thread_info()->restart_block.fn = do_no_restart_syscall; 187 188 err |= __get_user(regs->cp0_epc, &sc->sc_pc); 189 190 #ifdef CONFIG_CPU_HAS_SMARTMIPS 191 err |= __get_user(regs->acx, &sc->sc_acx); 192 #endif 193 err |= __get_user(regs->hi, &sc->sc_mdhi); 194 err |= __get_user(regs->lo, &sc->sc_mdlo); 195 if (cpu_has_dsp) { 196 err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); 197 err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); 198 err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); 199 err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); 200 err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); 201 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 202 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 203 } 204 205 for (i = 1; i < 32; i++) 206 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); 207 208 err |= __get_user(used_math, &sc->sc_used_math); 209 conditional_used_math(used_math); 210 211 if (used_math) { 212 /* restore fpu context if we have used it before */ 213 if (!err) 214 err = check_and_restore_fp_context(sc); 215 } else { 216 /* signal handler may have used FPU. Give it up. */ 217 lose_fpu(0); 218 } 219 220 return err; 221 } 222 223 void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 224 size_t frame_size) 225 { 226 unsigned long sp; 227 228 /* Default to using normal stack */ 229 sp = regs->regs[29]; 230 231 /* 232 * FPU emulator may have it's own trampoline active just 233 * above the user stack, 16-bytes before the next lowest 234 * 16 byte boundary. Try to avoid trashing it. 235 */ 236 sp -= 32; 237 238 /* This is the X/Open sanctioned signal stack switching. */ 239 if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) 240 sp = current->sas_ss_sp + current->sas_ss_size; 241 242 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); 243 } 244 245 /* 246 * Atomically swap in the new signal mask, and wait for a signal. 247 */ 248 249 #ifdef CONFIG_TRAD_SIGNALS 250 asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs) 251 { 252 sigset_t newset; 253 sigset_t __user *uset; 254 255 uset = (sigset_t __user *) regs.regs[4]; 256 if (copy_from_user(&newset, uset, sizeof(sigset_t))) 257 return -EFAULT; 258 sigdelsetmask(&newset, ~_BLOCKABLE); 259 260 spin_lock_irq(¤t->sighand->siglock); 261 current->saved_sigmask = current->blocked; 262 current->blocked = newset; 263 recalc_sigpending(); 264 spin_unlock_irq(¤t->sighand->siglock); 265 266 current->state = TASK_INTERRUPTIBLE; 267 schedule(); 268 set_thread_flag(TIF_RESTORE_SIGMASK); 269 return -ERESTARTNOHAND; 270 } 271 #endif 272 273 asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) 274 { 275 sigset_t newset; 276 sigset_t __user *unewset; 277 size_t sigsetsize; 278 279 /* XXX Don't preclude handling different sized sigset_t's. */ 280 sigsetsize = regs.regs[5]; 281 if (sigsetsize != sizeof(sigset_t)) 282 return -EINVAL; 283 284 unewset = (sigset_t __user *) regs.regs[4]; 285 if (copy_from_user(&newset, unewset, sizeof(newset))) 286 return -EFAULT; 287 sigdelsetmask(&newset, ~_BLOCKABLE); 288 289 spin_lock_irq(¤t->sighand->siglock); 290 current->saved_sigmask = current->blocked; 291 current->blocked = newset; 292 recalc_sigpending(); 293 spin_unlock_irq(¤t->sighand->siglock); 294 295 current->state = TASK_INTERRUPTIBLE; 296 schedule(); 297 set_thread_flag(TIF_RESTORE_SIGMASK); 298 return -ERESTARTNOHAND; 299 } 300 301 #ifdef CONFIG_TRAD_SIGNALS 302 SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, 303 struct sigaction __user *, oact) 304 { 305 struct k_sigaction new_ka, old_ka; 306 int ret; 307 int err = 0; 308 309 if (act) { 310 old_sigset_t mask; 311 312 if (!access_ok(VERIFY_READ, act, sizeof(*act))) 313 return -EFAULT; 314 err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); 315 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 316 err |= __get_user(mask, &act->sa_mask.sig[0]); 317 if (err) 318 return -EFAULT; 319 320 siginitset(&new_ka.sa.sa_mask, mask); 321 } 322 323 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 324 325 if (!ret && oact) { 326 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 327 return -EFAULT; 328 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 329 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); 330 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 331 err |= __put_user(0, &oact->sa_mask.sig[1]); 332 err |= __put_user(0, &oact->sa_mask.sig[2]); 333 err |= __put_user(0, &oact->sa_mask.sig[3]); 334 if (err) 335 return -EFAULT; 336 } 337 338 return ret; 339 } 340 #endif 341 342 asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs) 343 { 344 const stack_t __user *uss = (const stack_t __user *) regs.regs[4]; 345 stack_t __user *uoss = (stack_t __user *) regs.regs[5]; 346 unsigned long usp = regs.regs[29]; 347 348 return do_sigaltstack(uss, uoss, usp); 349 } 350 351 #ifdef CONFIG_TRAD_SIGNALS 352 asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) 353 { 354 struct sigframe __user *frame; 355 sigset_t blocked; 356 int sig; 357 358 frame = (struct sigframe __user *) regs.regs[29]; 359 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 360 goto badframe; 361 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) 362 goto badframe; 363 364 sigdelsetmask(&blocked, ~_BLOCKABLE); 365 spin_lock_irq(¤t->sighand->siglock); 366 current->blocked = blocked; 367 recalc_sigpending(); 368 spin_unlock_irq(¤t->sighand->siglock); 369 370 sig = restore_sigcontext(®s, &frame->sf_sc); 371 if (sig < 0) 372 goto badframe; 373 else if (sig) 374 force_sig(sig, current); 375 376 /* 377 * Don't let your children do this ... 378 */ 379 __asm__ __volatile__( 380 "move\t$29, %0\n\t" 381 "j\tsyscall_exit" 382 :/* no outputs */ 383 :"r" (®s)); 384 /* Unreached */ 385 386 badframe: 387 force_sig(SIGSEGV, current); 388 } 389 #endif /* CONFIG_TRAD_SIGNALS */ 390 391 asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) 392 { 393 struct rt_sigframe __user *frame; 394 sigset_t set; 395 int sig; 396 397 frame = (struct rt_sigframe __user *) regs.regs[29]; 398 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 399 goto badframe; 400 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 401 goto badframe; 402 403 sigdelsetmask(&set, ~_BLOCKABLE); 404 spin_lock_irq(¤t->sighand->siglock); 405 current->blocked = set; 406 recalc_sigpending(); 407 spin_unlock_irq(¤t->sighand->siglock); 408 409 sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); 410 if (sig < 0) 411 goto badframe; 412 else if (sig) 413 force_sig(sig, current); 414 415 /* It is more difficult to avoid calling this function than to 416 call it and ignore errors. */ 417 do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]); 418 419 /* 420 * Don't let your children do this ... 421 */ 422 __asm__ __volatile__( 423 "move\t$29, %0\n\t" 424 "j\tsyscall_exit" 425 :/* no outputs */ 426 :"r" (®s)); 427 /* Unreached */ 428 429 badframe: 430 force_sig(SIGSEGV, current); 431 } 432 433 #ifdef CONFIG_TRAD_SIGNALS 434 static int setup_frame(void *sig_return, struct k_sigaction *ka, 435 struct pt_regs *regs, int signr, sigset_t *set) 436 { 437 struct sigframe __user *frame; 438 int err = 0; 439 440 frame = get_sigframe(ka, regs, sizeof(*frame)); 441 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 442 goto give_sigsegv; 443 444 err |= setup_sigcontext(regs, &frame->sf_sc); 445 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); 446 if (err) 447 goto give_sigsegv; 448 449 /* 450 * Arguments to signal handler: 451 * 452 * a0 = signal number 453 * a1 = 0 (should be cause) 454 * a2 = pointer to struct sigcontext 455 * 456 * $25 and c0_epc point to the signal handler, $29 points to the 457 * struct sigframe. 458 */ 459 regs->regs[ 4] = signr; 460 regs->regs[ 5] = 0; 461 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 462 regs->regs[29] = (unsigned long) frame; 463 regs->regs[31] = (unsigned long) sig_return; 464 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 465 466 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 467 current->comm, current->pid, 468 frame, regs->cp0_epc, regs->regs[31]); 469 return 0; 470 471 give_sigsegv: 472 force_sigsegv(signr, current); 473 return -EFAULT; 474 } 475 #endif 476 477 static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, 478 struct pt_regs *regs, int signr, sigset_t *set, 479 siginfo_t *info) 480 { 481 struct rt_sigframe __user *frame; 482 int err = 0; 483 484 frame = get_sigframe(ka, regs, sizeof(*frame)); 485 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 486 goto give_sigsegv; 487 488 /* Create siginfo. */ 489 err |= copy_siginfo_to_user(&frame->rs_info, info); 490 491 /* Create the ucontext. */ 492 err |= __put_user(0, &frame->rs_uc.uc_flags); 493 err |= __put_user(NULL, &frame->rs_uc.uc_link); 494 err |= __put_user((void __user *)current->sas_ss_sp, 495 &frame->rs_uc.uc_stack.ss_sp); 496 err |= __put_user(sas_ss_flags(regs->regs[29]), 497 &frame->rs_uc.uc_stack.ss_flags); 498 err |= __put_user(current->sas_ss_size, 499 &frame->rs_uc.uc_stack.ss_size); 500 err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); 501 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); 502 503 if (err) 504 goto give_sigsegv; 505 506 /* 507 * Arguments to signal handler: 508 * 509 * a0 = signal number 510 * a1 = 0 (should be cause) 511 * a2 = pointer to ucontext 512 * 513 * $25 and c0_epc point to the signal handler, $29 points to 514 * the struct rt_sigframe. 515 */ 516 regs->regs[ 4] = signr; 517 regs->regs[ 5] = (unsigned long) &frame->rs_info; 518 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 519 regs->regs[29] = (unsigned long) frame; 520 regs->regs[31] = (unsigned long) sig_return; 521 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 522 523 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 524 current->comm, current->pid, 525 frame, regs->cp0_epc, regs->regs[31]); 526 527 return 0; 528 529 give_sigsegv: 530 force_sigsegv(signr, current); 531 return -EFAULT; 532 } 533 534 struct mips_abi mips_abi = { 535 #ifdef CONFIG_TRAD_SIGNALS 536 .setup_frame = setup_frame, 537 .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline), 538 #endif 539 .setup_rt_frame = setup_rt_frame, 540 .rt_signal_return_offset = 541 offsetof(struct mips_vdso, rt_signal_trampoline), 542 .restart = __NR_restart_syscall 543 }; 544 545 static int handle_signal(unsigned long sig, siginfo_t *info, 546 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) 547 { 548 int ret; 549 struct mips_abi *abi = current->thread.abi; 550 void *vdso = current->mm->context.vdso; 551 552 if (regs->regs[0]) { 553 switch(regs->regs[2]) { 554 case ERESTART_RESTARTBLOCK: 555 case ERESTARTNOHAND: 556 regs->regs[2] = EINTR; 557 break; 558 case ERESTARTSYS: 559 if (!(ka->sa.sa_flags & SA_RESTART)) { 560 regs->regs[2] = EINTR; 561 break; 562 } 563 /* fallthrough */ 564 case ERESTARTNOINTR: 565 regs->regs[7] = regs->regs[26]; 566 regs->regs[2] = regs->regs[0]; 567 regs->cp0_epc -= 4; 568 } 569 570 regs->regs[0] = 0; /* Don't deal with this again. */ 571 } 572 573 if (sig_uses_siginfo(ka)) 574 ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, 575 ka, regs, sig, oldset, info); 576 else 577 ret = abi->setup_frame(vdso + abi->signal_return_offset, 578 ka, regs, sig, oldset); 579 580 if (ret) 581 return ret; 582 583 spin_lock_irq(¤t->sighand->siglock); 584 sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); 585 if (!(ka->sa.sa_flags & SA_NODEFER)) 586 sigaddset(¤t->blocked, sig); 587 recalc_sigpending(); 588 spin_unlock_irq(¤t->sighand->siglock); 589 590 return ret; 591 } 592 593 static void do_signal(struct pt_regs *regs) 594 { 595 struct k_sigaction ka; 596 sigset_t *oldset; 597 siginfo_t info; 598 int signr; 599 600 /* 601 * We want the common case to go fast, which is why we may in certain 602 * cases get here from kernel mode. Just return without doing anything 603 * if so. 604 */ 605 if (!user_mode(regs)) 606 return; 607 608 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 609 oldset = ¤t->saved_sigmask; 610 else 611 oldset = ¤t->blocked; 612 613 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 614 if (signr > 0) { 615 /* Whee! Actually deliver the signal. */ 616 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { 617 /* 618 * A signal was successfully delivered; the saved 619 * sigmask will have been stored in the signal frame, 620 * and will be restored by sigreturn, so we can simply 621 * clear the TIF_RESTORE_SIGMASK flag. 622 */ 623 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 624 clear_thread_flag(TIF_RESTORE_SIGMASK); 625 } 626 627 return; 628 } 629 630 if (regs->regs[0]) { 631 if (regs->regs[2] == ERESTARTNOHAND || 632 regs->regs[2] == ERESTARTSYS || 633 regs->regs[2] == ERESTARTNOINTR) { 634 regs->regs[2] = regs->regs[0]; 635 regs->regs[7] = regs->regs[26]; 636 regs->cp0_epc -= 4; 637 } 638 if (regs->regs[2] == ERESTART_RESTARTBLOCK) { 639 regs->regs[2] = current->thread.abi->restart; 640 regs->regs[7] = regs->regs[26]; 641 regs->cp0_epc -= 4; 642 } 643 regs->regs[0] = 0; /* Don't deal with this again. */ 644 } 645 646 /* 647 * If there's no signal to deliver, we just put the saved sigmask 648 * back 649 */ 650 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 651 clear_thread_flag(TIF_RESTORE_SIGMASK); 652 sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); 653 } 654 } 655 656 /* 657 * notification of userspace execution resumption 658 * - triggered by the TIF_WORK_MASK flags 659 */ 660 asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, 661 __u32 thread_info_flags) 662 { 663 local_irq_enable(); 664 665 /* deal with pending signal delivery */ 666 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 667 do_signal(regs); 668 669 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 670 clear_thread_flag(TIF_NOTIFY_RESUME); 671 tracehook_notify_resume(regs); 672 if (current->replacement_session_keyring) 673 key_replace_session_keyring(); 674 } 675 } 676 677 #ifdef CONFIG_SMP 678 static int smp_save_fp_context(struct sigcontext __user *sc) 679 { 680 return raw_cpu_has_fpu 681 ? _save_fp_context(sc) 682 : fpu_emulator_save_context(sc); 683 } 684 685 static int smp_restore_fp_context(struct sigcontext __user *sc) 686 { 687 return raw_cpu_has_fpu 688 ? _restore_fp_context(sc) 689 : fpu_emulator_restore_context(sc); 690 } 691 #endif 692 693 static int signal_setup(void) 694 { 695 #ifdef CONFIG_SMP 696 /* For now just do the cpu_has_fpu check when the functions are invoked */ 697 save_fp_context = smp_save_fp_context; 698 restore_fp_context = smp_restore_fp_context; 699 #else 700 if (cpu_has_fpu) { 701 save_fp_context = _save_fp_context; 702 restore_fp_context = _restore_fp_context; 703 } else { 704 save_fp_context = fpu_emulator_save_context; 705 restore_fp_context = fpu_emulator_restore_context; 706 } 707 #endif 708 709 return 0; 710 } 711 712 arch_initcall(signal_setup); 713