1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1991, 1992 Linus Torvalds 7 * Copyright (C) 1994 - 2000 Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 */ 10 #include <linux/cache.h> 11 #include <linux/irqflags.h> 12 #include <linux/sched.h> 13 #include <linux/mm.h> 14 #include <linux/personality.h> 15 #include <linux/smp.h> 16 #include <linux/kernel.h> 17 #include <linux/signal.h> 18 #include <linux/errno.h> 19 #include <linux/wait.h> 20 #include <linux/ptrace.h> 21 #include <linux/unistd.h> 22 #include <linux/compiler.h> 23 #include <linux/syscalls.h> 24 #include <linux/uaccess.h> 25 #include <linux/tracehook.h> 26 27 #include <asm/abi.h> 28 #include <asm/asm.h> 29 #include <linux/bitops.h> 30 #include <asm/cacheflush.h> 31 #include <asm/fpu.h> 32 #include <asm/sim.h> 33 #include <asm/ucontext.h> 34 #include <asm/cpu-features.h> 35 #include <asm/war.h> 36 #include <asm/vdso.h> 37 38 #include "signal-common.h" 39 40 static int (*save_fp_context)(struct sigcontext __user *sc); 41 static int (*restore_fp_context)(struct sigcontext __user *sc); 42 43 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 44 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 45 46 extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); 47 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); 48 49 struct sigframe { 50 u32 sf_ass[4]; /* argument save space for o32 */ 51 u32 sf_pad[2]; /* Was: signal trampoline */ 52 struct sigcontext sf_sc; 53 sigset_t sf_mask; 54 }; 55 56 struct rt_sigframe { 57 u32 rs_ass[4]; /* argument save space for o32 */ 58 u32 rs_pad[2]; /* Was: signal trampoline */ 59 struct siginfo rs_info; 60 struct ucontext rs_uc; 61 }; 62 63 /* 64 * Helper routines 65 */ 66 static int protected_save_fp_context(struct sigcontext __user *sc) 67 { 68 int err; 69 while (1) { 70 lock_fpu_owner(); 71 own_fpu_inatomic(1); 72 err = save_fp_context(sc); /* this might fail */ 73 unlock_fpu_owner(); 74 if (likely(!err)) 75 break; 76 /* touch the sigcontext and try again */ 77 err = __put_user(0, &sc->sc_fpregs[0]) | 78 __put_user(0, &sc->sc_fpregs[31]) | 79 __put_user(0, &sc->sc_fpc_csr); 80 if (err) 81 break; /* really bad sigcontext */ 82 } 83 return err; 84 } 85 86 static int protected_restore_fp_context(struct sigcontext __user *sc) 87 { 88 int err, tmp __maybe_unused; 89 while (1) { 90 lock_fpu_owner(); 91 own_fpu_inatomic(0); 92 err = restore_fp_context(sc); /* this might fail */ 93 unlock_fpu_owner(); 94 if (likely(!err)) 95 break; 96 /* touch the sigcontext and try again */ 97 err = __get_user(tmp, &sc->sc_fpregs[0]) | 98 __get_user(tmp, &sc->sc_fpregs[31]) | 99 __get_user(tmp, &sc->sc_fpc_csr); 100 if (err) 101 break; /* really bad sigcontext */ 102 } 103 return err; 104 } 105 106 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 107 { 108 int err = 0; 109 int i; 110 unsigned int used_math; 111 112 err |= __put_user(regs->cp0_epc, &sc->sc_pc); 113 114 err |= __put_user(0, &sc->sc_regs[0]); 115 for (i = 1; i < 32; i++) 116 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); 117 118 #ifdef CONFIG_CPU_HAS_SMARTMIPS 119 err |= __put_user(regs->acx, &sc->sc_acx); 120 #endif 121 err |= __put_user(regs->hi, &sc->sc_mdhi); 122 err |= __put_user(regs->lo, &sc->sc_mdlo); 123 if (cpu_has_dsp) { 124 err |= __put_user(mfhi1(), &sc->sc_hi1); 125 err |= __put_user(mflo1(), &sc->sc_lo1); 126 err |= __put_user(mfhi2(), &sc->sc_hi2); 127 err |= __put_user(mflo2(), &sc->sc_lo2); 128 err |= __put_user(mfhi3(), &sc->sc_hi3); 129 err |= __put_user(mflo3(), &sc->sc_lo3); 130 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 131 } 132 133 used_math = !!used_math(); 134 err |= __put_user(used_math, &sc->sc_used_math); 135 136 if (used_math) { 137 /* 138 * Save FPU state to signal context. Signal handler 139 * will "inherit" current FPU state. 140 */ 141 err |= protected_save_fp_context(sc); 142 } 143 return err; 144 } 145 146 int fpcsr_pending(unsigned int __user *fpcsr) 147 { 148 int err, sig = 0; 149 unsigned int csr, enabled; 150 151 err = __get_user(csr, fpcsr); 152 enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); 153 /* 154 * If the signal handler set some FPU exceptions, clear it and 155 * send SIGFPE. 156 */ 157 if (csr & enabled) { 158 csr &= ~enabled; 159 err |= __put_user(csr, fpcsr); 160 sig = SIGFPE; 161 } 162 return err ?: sig; 163 } 164 165 static int 166 check_and_restore_fp_context(struct sigcontext __user *sc) 167 { 168 int err, sig; 169 170 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 171 if (err > 0) 172 err = 0; 173 err |= protected_restore_fp_context(sc); 174 return err ?: sig; 175 } 176 177 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 178 { 179 unsigned int used_math; 180 unsigned long treg; 181 int err = 0; 182 int i; 183 184 /* Always make any pending restarted system calls return -EINTR */ 185 current_thread_info()->restart_block.fn = do_no_restart_syscall; 186 187 err |= __get_user(regs->cp0_epc, &sc->sc_pc); 188 189 #ifdef CONFIG_CPU_HAS_SMARTMIPS 190 err |= __get_user(regs->acx, &sc->sc_acx); 191 #endif 192 err |= __get_user(regs->hi, &sc->sc_mdhi); 193 err |= __get_user(regs->lo, &sc->sc_mdlo); 194 if (cpu_has_dsp) { 195 err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); 196 err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); 197 err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); 198 err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); 199 err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); 200 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 201 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 202 } 203 204 for (i = 1; i < 32; i++) 205 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); 206 207 err |= __get_user(used_math, &sc->sc_used_math); 208 conditional_used_math(used_math); 209 210 if (used_math) { 211 /* restore fpu context if we have used it before */ 212 if (!err) 213 err = check_and_restore_fp_context(sc); 214 } else { 215 /* signal handler may have used FPU. Give it up. */ 216 lose_fpu(0); 217 } 218 219 return err; 220 } 221 222 void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 223 size_t frame_size) 224 { 225 unsigned long sp; 226 227 /* Default to using normal stack */ 228 sp = regs->regs[29]; 229 230 /* 231 * FPU emulator may have it's own trampoline active just 232 * above the user stack, 16-bytes before the next lowest 233 * 16 byte boundary. Try to avoid trashing it. 234 */ 235 sp -= 32; 236 237 /* This is the X/Open sanctioned signal stack switching. */ 238 if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) 239 sp = current->sas_ss_sp + current->sas_ss_size; 240 241 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); 242 } 243 244 /* 245 * Atomically swap in the new signal mask, and wait for a signal. 246 */ 247 248 #ifdef CONFIG_TRAD_SIGNALS 249 asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs) 250 { 251 sigset_t newset; 252 sigset_t __user *uset; 253 254 uset = (sigset_t __user *) regs.regs[4]; 255 if (copy_from_user(&newset, uset, sizeof(sigset_t))) 256 return -EFAULT; 257 sigdelsetmask(&newset, ~_BLOCKABLE); 258 259 spin_lock_irq(¤t->sighand->siglock); 260 current->saved_sigmask = current->blocked; 261 current->blocked = newset; 262 recalc_sigpending(); 263 spin_unlock_irq(¤t->sighand->siglock); 264 265 current->state = TASK_INTERRUPTIBLE; 266 schedule(); 267 set_thread_flag(TIF_RESTORE_SIGMASK); 268 return -ERESTARTNOHAND; 269 } 270 #endif 271 272 asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) 273 { 274 sigset_t newset; 275 sigset_t __user *unewset; 276 size_t sigsetsize; 277 278 /* XXX Don't preclude handling different sized sigset_t's. */ 279 sigsetsize = regs.regs[5]; 280 if (sigsetsize != sizeof(sigset_t)) 281 return -EINVAL; 282 283 unewset = (sigset_t __user *) regs.regs[4]; 284 if (copy_from_user(&newset, unewset, sizeof(newset))) 285 return -EFAULT; 286 sigdelsetmask(&newset, ~_BLOCKABLE); 287 288 spin_lock_irq(¤t->sighand->siglock); 289 current->saved_sigmask = current->blocked; 290 current->blocked = newset; 291 recalc_sigpending(); 292 spin_unlock_irq(¤t->sighand->siglock); 293 294 current->state = TASK_INTERRUPTIBLE; 295 schedule(); 296 set_thread_flag(TIF_RESTORE_SIGMASK); 297 return -ERESTARTNOHAND; 298 } 299 300 #ifdef CONFIG_TRAD_SIGNALS 301 SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, 302 struct sigaction __user *, oact) 303 { 304 struct k_sigaction new_ka, old_ka; 305 int ret; 306 int err = 0; 307 308 if (act) { 309 old_sigset_t mask; 310 311 if (!access_ok(VERIFY_READ, act, sizeof(*act))) 312 return -EFAULT; 313 err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); 314 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 315 err |= __get_user(mask, &act->sa_mask.sig[0]); 316 if (err) 317 return -EFAULT; 318 319 siginitset(&new_ka.sa.sa_mask, mask); 320 } 321 322 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 323 324 if (!ret && oact) { 325 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 326 return -EFAULT; 327 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 328 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); 329 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 330 err |= __put_user(0, &oact->sa_mask.sig[1]); 331 err |= __put_user(0, &oact->sa_mask.sig[2]); 332 err |= __put_user(0, &oact->sa_mask.sig[3]); 333 if (err) 334 return -EFAULT; 335 } 336 337 return ret; 338 } 339 #endif 340 341 asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs) 342 { 343 const stack_t __user *uss = (const stack_t __user *) regs.regs[4]; 344 stack_t __user *uoss = (stack_t __user *) regs.regs[5]; 345 unsigned long usp = regs.regs[29]; 346 347 return do_sigaltstack(uss, uoss, usp); 348 } 349 350 #ifdef CONFIG_TRAD_SIGNALS 351 asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) 352 { 353 struct sigframe __user *frame; 354 sigset_t blocked; 355 int sig; 356 357 frame = (struct sigframe __user *) regs.regs[29]; 358 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 359 goto badframe; 360 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) 361 goto badframe; 362 363 sigdelsetmask(&blocked, ~_BLOCKABLE); 364 spin_lock_irq(¤t->sighand->siglock); 365 current->blocked = blocked; 366 recalc_sigpending(); 367 spin_unlock_irq(¤t->sighand->siglock); 368 369 sig = restore_sigcontext(®s, &frame->sf_sc); 370 if (sig < 0) 371 goto badframe; 372 else if (sig) 373 force_sig(sig, current); 374 375 /* 376 * Don't let your children do this ... 377 */ 378 __asm__ __volatile__( 379 "move\t$29, %0\n\t" 380 "j\tsyscall_exit" 381 :/* no outputs */ 382 :"r" (®s)); 383 /* Unreached */ 384 385 badframe: 386 force_sig(SIGSEGV, current); 387 } 388 #endif /* CONFIG_TRAD_SIGNALS */ 389 390 asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) 391 { 392 struct rt_sigframe __user *frame; 393 sigset_t set; 394 int sig; 395 396 frame = (struct rt_sigframe __user *) regs.regs[29]; 397 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 398 goto badframe; 399 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 400 goto badframe; 401 402 sigdelsetmask(&set, ~_BLOCKABLE); 403 spin_lock_irq(¤t->sighand->siglock); 404 current->blocked = set; 405 recalc_sigpending(); 406 spin_unlock_irq(¤t->sighand->siglock); 407 408 sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); 409 if (sig < 0) 410 goto badframe; 411 else if (sig) 412 force_sig(sig, current); 413 414 /* It is more difficult to avoid calling this function than to 415 call it and ignore errors. */ 416 do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]); 417 418 /* 419 * Don't let your children do this ... 420 */ 421 __asm__ __volatile__( 422 "move\t$29, %0\n\t" 423 "j\tsyscall_exit" 424 :/* no outputs */ 425 :"r" (®s)); 426 /* Unreached */ 427 428 badframe: 429 force_sig(SIGSEGV, current); 430 } 431 432 #ifdef CONFIG_TRAD_SIGNALS 433 static int setup_frame(void *sig_return, struct k_sigaction *ka, 434 struct pt_regs *regs, int signr, sigset_t *set) 435 { 436 struct sigframe __user *frame; 437 int err = 0; 438 439 frame = get_sigframe(ka, regs, sizeof(*frame)); 440 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 441 goto give_sigsegv; 442 443 err |= setup_sigcontext(regs, &frame->sf_sc); 444 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); 445 if (err) 446 goto give_sigsegv; 447 448 /* 449 * Arguments to signal handler: 450 * 451 * a0 = signal number 452 * a1 = 0 (should be cause) 453 * a2 = pointer to struct sigcontext 454 * 455 * $25 and c0_epc point to the signal handler, $29 points to the 456 * struct sigframe. 457 */ 458 regs->regs[ 4] = signr; 459 regs->regs[ 5] = 0; 460 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 461 regs->regs[29] = (unsigned long) frame; 462 regs->regs[31] = (unsigned long) sig_return; 463 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 464 465 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 466 current->comm, current->pid, 467 frame, regs->cp0_epc, regs->regs[31]); 468 return 0; 469 470 give_sigsegv: 471 force_sigsegv(signr, current); 472 return -EFAULT; 473 } 474 #endif 475 476 static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, 477 struct pt_regs *regs, int signr, sigset_t *set, 478 siginfo_t *info) 479 { 480 struct rt_sigframe __user *frame; 481 int err = 0; 482 483 frame = get_sigframe(ka, regs, sizeof(*frame)); 484 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 485 goto give_sigsegv; 486 487 /* Create siginfo. */ 488 err |= copy_siginfo_to_user(&frame->rs_info, info); 489 490 /* Create the ucontext. */ 491 err |= __put_user(0, &frame->rs_uc.uc_flags); 492 err |= __put_user(NULL, &frame->rs_uc.uc_link); 493 err |= __put_user((void __user *)current->sas_ss_sp, 494 &frame->rs_uc.uc_stack.ss_sp); 495 err |= __put_user(sas_ss_flags(regs->regs[29]), 496 &frame->rs_uc.uc_stack.ss_flags); 497 err |= __put_user(current->sas_ss_size, 498 &frame->rs_uc.uc_stack.ss_size); 499 err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); 500 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); 501 502 if (err) 503 goto give_sigsegv; 504 505 /* 506 * Arguments to signal handler: 507 * 508 * a0 = signal number 509 * a1 = 0 (should be cause) 510 * a2 = pointer to ucontext 511 * 512 * $25 and c0_epc point to the signal handler, $29 points to 513 * the struct rt_sigframe. 514 */ 515 regs->regs[ 4] = signr; 516 regs->regs[ 5] = (unsigned long) &frame->rs_info; 517 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 518 regs->regs[29] = (unsigned long) frame; 519 regs->regs[31] = (unsigned long) sig_return; 520 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 521 522 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 523 current->comm, current->pid, 524 frame, regs->cp0_epc, regs->regs[31]); 525 526 return 0; 527 528 give_sigsegv: 529 force_sigsegv(signr, current); 530 return -EFAULT; 531 } 532 533 struct mips_abi mips_abi = { 534 #ifdef CONFIG_TRAD_SIGNALS 535 .setup_frame = setup_frame, 536 .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline), 537 #endif 538 .setup_rt_frame = setup_rt_frame, 539 .rt_signal_return_offset = 540 offsetof(struct mips_vdso, rt_signal_trampoline), 541 .restart = __NR_restart_syscall 542 }; 543 544 static int handle_signal(unsigned long sig, siginfo_t *info, 545 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) 546 { 547 int ret; 548 struct mips_abi *abi = current->thread.abi; 549 void *vdso = current->mm->context.vdso; 550 551 if (regs->regs[0]) { 552 switch(regs->regs[2]) { 553 case ERESTART_RESTARTBLOCK: 554 case ERESTARTNOHAND: 555 regs->regs[2] = EINTR; 556 break; 557 case ERESTARTSYS: 558 if (!(ka->sa.sa_flags & SA_RESTART)) { 559 regs->regs[2] = EINTR; 560 break; 561 } 562 /* fallthrough */ 563 case ERESTARTNOINTR: 564 regs->regs[7] = regs->regs[26]; 565 regs->regs[2] = regs->regs[0]; 566 regs->cp0_epc -= 4; 567 } 568 569 regs->regs[0] = 0; /* Don't deal with this again. */ 570 } 571 572 if (sig_uses_siginfo(ka)) 573 ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, 574 ka, regs, sig, oldset, info); 575 else 576 ret = abi->setup_frame(vdso + abi->signal_return_offset, 577 ka, regs, sig, oldset); 578 579 if (ret) 580 return ret; 581 582 spin_lock_irq(¤t->sighand->siglock); 583 sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); 584 if (!(ka->sa.sa_flags & SA_NODEFER)) 585 sigaddset(¤t->blocked, sig); 586 recalc_sigpending(); 587 spin_unlock_irq(¤t->sighand->siglock); 588 589 return ret; 590 } 591 592 static void do_signal(struct pt_regs *regs) 593 { 594 struct k_sigaction ka; 595 sigset_t *oldset; 596 siginfo_t info; 597 int signr; 598 599 /* 600 * We want the common case to go fast, which is why we may in certain 601 * cases get here from kernel mode. Just return without doing anything 602 * if so. 603 */ 604 if (!user_mode(regs)) 605 return; 606 607 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 608 oldset = ¤t->saved_sigmask; 609 else 610 oldset = ¤t->blocked; 611 612 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 613 if (signr > 0) { 614 /* Whee! Actually deliver the signal. */ 615 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { 616 /* 617 * A signal was successfully delivered; the saved 618 * sigmask will have been stored in the signal frame, 619 * and will be restored by sigreturn, so we can simply 620 * clear the TIF_RESTORE_SIGMASK flag. 621 */ 622 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 623 clear_thread_flag(TIF_RESTORE_SIGMASK); 624 } 625 626 return; 627 } 628 629 if (regs->regs[0]) { 630 if (regs->regs[2] == ERESTARTNOHAND || 631 regs->regs[2] == ERESTARTSYS || 632 regs->regs[2] == ERESTARTNOINTR) { 633 regs->regs[2] = regs->regs[0]; 634 regs->regs[7] = regs->regs[26]; 635 regs->cp0_epc -= 4; 636 } 637 if (regs->regs[2] == ERESTART_RESTARTBLOCK) { 638 regs->regs[2] = current->thread.abi->restart; 639 regs->regs[7] = regs->regs[26]; 640 regs->cp0_epc -= 4; 641 } 642 regs->regs[0] = 0; /* Don't deal with this again. */ 643 } 644 645 /* 646 * If there's no signal to deliver, we just put the saved sigmask 647 * back 648 */ 649 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 650 clear_thread_flag(TIF_RESTORE_SIGMASK); 651 sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); 652 } 653 } 654 655 /* 656 * notification of userspace execution resumption 657 * - triggered by the TIF_WORK_MASK flags 658 */ 659 asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, 660 __u32 thread_info_flags) 661 { 662 local_irq_enable(); 663 664 /* deal with pending signal delivery */ 665 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 666 do_signal(regs); 667 668 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 669 clear_thread_flag(TIF_NOTIFY_RESUME); 670 tracehook_notify_resume(regs); 671 if (current->replacement_session_keyring) 672 key_replace_session_keyring(); 673 } 674 } 675 676 #ifdef CONFIG_SMP 677 static int smp_save_fp_context(struct sigcontext __user *sc) 678 { 679 return raw_cpu_has_fpu 680 ? _save_fp_context(sc) 681 : fpu_emulator_save_context(sc); 682 } 683 684 static int smp_restore_fp_context(struct sigcontext __user *sc) 685 { 686 return raw_cpu_has_fpu 687 ? _restore_fp_context(sc) 688 : fpu_emulator_restore_context(sc); 689 } 690 #endif 691 692 static int signal_setup(void) 693 { 694 #ifdef CONFIG_SMP 695 /* For now just do the cpu_has_fpu check when the functions are invoked */ 696 save_fp_context = smp_save_fp_context; 697 restore_fp_context = smp_restore_fp_context; 698 #else 699 if (cpu_has_fpu) { 700 save_fp_context = _save_fp_context; 701 restore_fp_context = _restore_fp_context; 702 } else { 703 save_fp_context = fpu_emulator_save_context; 704 restore_fp_context = fpu_emulator_restore_context; 705 } 706 #endif 707 708 return 0; 709 } 710 711 arch_initcall(signal_setup); 712