1 /* 2 * linux/kernel/signal.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 7 * 8 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 9 * Changes to use preallocated sigqueue structures 10 * to allow signals to be sent reliably. 11 */ 12 13 #include <linux/slab.h> 14 #include <linux/export.h> 15 #include <linux/init.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/user.h> 18 #include <linux/sched/debug.h> 19 #include <linux/sched/task.h> 20 #include <linux/sched/task_stack.h> 21 #include <linux/sched/cputime.h> 22 #include <linux/fs.h> 23 #include <linux/tty.h> 24 #include <linux/binfmts.h> 25 #include <linux/coredump.h> 26 #include <linux/security.h> 27 #include <linux/syscalls.h> 28 #include <linux/ptrace.h> 29 #include <linux/signal.h> 30 #include <linux/signalfd.h> 31 #include <linux/ratelimit.h> 32 #include <linux/tracehook.h> 33 #include <linux/capability.h> 34 #include <linux/freezer.h> 35 #include <linux/pid_namespace.h> 36 #include <linux/nsproxy.h> 37 #include <linux/user_namespace.h> 38 #include <linux/uprobes.h> 39 #include <linux/compat.h> 40 #include <linux/cn_proc.h> 41 #include <linux/compiler.h> 42 #include <linux/posix-timers.h> 43 44 #define CREATE_TRACE_POINTS 45 #include <trace/events/signal.h> 46 47 #include <asm/param.h> 48 #include <linux/uaccess.h> 49 #include <asm/unistd.h> 50 #include <asm/siginfo.h> 51 #include <asm/cacheflush.h> 52 #include "audit.h" /* audit_signal_info() */ 53 54 /* 55 * SLAB caches for signal bits. 56 */ 57 58 static struct kmem_cache *sigqueue_cachep; 59 60 int print_fatal_signals __read_mostly; 61 62 static void __user *sig_handler(struct task_struct *t, int sig) 63 { 64 return t->sighand->action[sig - 1].sa.sa_handler; 65 } 66 67 static int sig_handler_ignored(void __user *handler, int sig) 68 { 69 /* Is it explicitly or implicitly ignored? */ 70 return handler == SIG_IGN || 71 (handler == SIG_DFL && sig_kernel_ignore(sig)); 72 } 73 74 static int sig_task_ignored(struct task_struct *t, int sig, bool force) 75 { 76 void __user *handler; 77 78 handler = sig_handler(t, sig); 79 80 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && 81 handler == SIG_DFL && !force) 82 return 1; 83 84 return sig_handler_ignored(handler, sig); 85 } 86 87 static int sig_ignored(struct task_struct *t, int sig, bool force) 88 { 89 /* 90 * Blocked signals are never ignored, since the 91 * signal handler may change by the time it is 92 * unblocked. 93 */ 94 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 95 return 0; 96 97 if (!sig_task_ignored(t, sig, force)) 98 return 0; 99 100 /* 101 * Tracers may want to know about even ignored signals. 102 */ 103 return !t->ptrace; 104 } 105 106 /* 107 * Re-calculate pending state from the set of locally pending 108 * signals, globally pending signals, and blocked signals. 109 */ 110 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) 111 { 112 unsigned long ready; 113 long i; 114 115 switch (_NSIG_WORDS) { 116 default: 117 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 118 ready |= signal->sig[i] &~ blocked->sig[i]; 119 break; 120 121 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 122 ready |= signal->sig[2] &~ blocked->sig[2]; 123 ready |= signal->sig[1] &~ blocked->sig[1]; 124 ready |= signal->sig[0] &~ blocked->sig[0]; 125 break; 126 127 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 128 ready |= signal->sig[0] &~ blocked->sig[0]; 129 break; 130 131 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 132 } 133 return ready != 0; 134 } 135 136 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 137 138 static int recalc_sigpending_tsk(struct task_struct *t) 139 { 140 if ((t->jobctl & JOBCTL_PENDING_MASK) || 141 PENDING(&t->pending, &t->blocked) || 142 PENDING(&t->signal->shared_pending, &t->blocked)) { 143 set_tsk_thread_flag(t, TIF_SIGPENDING); 144 return 1; 145 } 146 /* 147 * We must never clear the flag in another thread, or in current 148 * when it's possible the current syscall is returning -ERESTART*. 149 * So we don't clear it here, and only callers who know they should do. 150 */ 151 return 0; 152 } 153 154 /* 155 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. 156 * This is superfluous when called on current, the wakeup is a harmless no-op. 157 */ 158 void recalc_sigpending_and_wake(struct task_struct *t) 159 { 160 if (recalc_sigpending_tsk(t)) 161 signal_wake_up(t, 0); 162 } 163 164 void recalc_sigpending(void) 165 { 166 if (!recalc_sigpending_tsk(current) && !freezing(current)) 167 clear_thread_flag(TIF_SIGPENDING); 168 169 } 170 171 /* Given the mask, find the first available signal that should be serviced. */ 172 173 #define SYNCHRONOUS_MASK \ 174 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ 175 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) 176 177 int next_signal(struct sigpending *pending, sigset_t *mask) 178 { 179 unsigned long i, *s, *m, x; 180 int sig = 0; 181 182 s = pending->signal.sig; 183 m = mask->sig; 184 185 /* 186 * Handle the first word specially: it contains the 187 * synchronous signals that need to be dequeued first. 188 */ 189 x = *s &~ *m; 190 if (x) { 191 if (x & SYNCHRONOUS_MASK) 192 x &= SYNCHRONOUS_MASK; 193 sig = ffz(~x) + 1; 194 return sig; 195 } 196 197 switch (_NSIG_WORDS) { 198 default: 199 for (i = 1; i < _NSIG_WORDS; ++i) { 200 x = *++s &~ *++m; 201 if (!x) 202 continue; 203 sig = ffz(~x) + i*_NSIG_BPW + 1; 204 break; 205 } 206 break; 207 208 case 2: 209 x = s[1] &~ m[1]; 210 if (!x) 211 break; 212 sig = ffz(~x) + _NSIG_BPW + 1; 213 break; 214 215 case 1: 216 /* Nothing to do */ 217 break; 218 } 219 220 return sig; 221 } 222 223 static inline void print_dropped_signal(int sig) 224 { 225 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); 226 227 if (!print_fatal_signals) 228 return; 229 230 if (!__ratelimit(&ratelimit_state)) 231 return; 232 233 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", 234 current->comm, current->pid, sig); 235 } 236 237 /** 238 * task_set_jobctl_pending - set jobctl pending bits 239 * @task: target task 240 * @mask: pending bits to set 241 * 242 * Clear @mask from @task->jobctl. @mask must be subset of 243 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | 244 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is 245 * cleared. If @task is already being killed or exiting, this function 246 * becomes noop. 247 * 248 * CONTEXT: 249 * Must be called with @task->sighand->siglock held. 250 * 251 * RETURNS: 252 * %true if @mask is set, %false if made noop because @task was dying. 253 */ 254 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) 255 { 256 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | 257 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); 258 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); 259 260 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) 261 return false; 262 263 if (mask & JOBCTL_STOP_SIGMASK) 264 task->jobctl &= ~JOBCTL_STOP_SIGMASK; 265 266 task->jobctl |= mask; 267 return true; 268 } 269 270 /** 271 * task_clear_jobctl_trapping - clear jobctl trapping bit 272 * @task: target task 273 * 274 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. 275 * Clear it and wake up the ptracer. Note that we don't need any further 276 * locking. @task->siglock guarantees that @task->parent points to the 277 * ptracer. 278 * 279 * CONTEXT: 280 * Must be called with @task->sighand->siglock held. 281 */ 282 void task_clear_jobctl_trapping(struct task_struct *task) 283 { 284 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { 285 task->jobctl &= ~JOBCTL_TRAPPING; 286 smp_mb(); /* advised by wake_up_bit() */ 287 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); 288 } 289 } 290 291 /** 292 * task_clear_jobctl_pending - clear jobctl pending bits 293 * @task: target task 294 * @mask: pending bits to clear 295 * 296 * Clear @mask from @task->jobctl. @mask must be subset of 297 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other 298 * STOP bits are cleared together. 299 * 300 * If clearing of @mask leaves no stop or trap pending, this function calls 301 * task_clear_jobctl_trapping(). 302 * 303 * CONTEXT: 304 * Must be called with @task->sighand->siglock held. 305 */ 306 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) 307 { 308 BUG_ON(mask & ~JOBCTL_PENDING_MASK); 309 310 if (mask & JOBCTL_STOP_PENDING) 311 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; 312 313 task->jobctl &= ~mask; 314 315 if (!(task->jobctl & JOBCTL_PENDING_MASK)) 316 task_clear_jobctl_trapping(task); 317 } 318 319 /** 320 * task_participate_group_stop - participate in a group stop 321 * @task: task participating in a group stop 322 * 323 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. 324 * Group stop states are cleared and the group stop count is consumed if 325 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group 326 * stop, the appropriate %SIGNAL_* flags are set. 327 * 328 * CONTEXT: 329 * Must be called with @task->sighand->siglock held. 330 * 331 * RETURNS: 332 * %true if group stop completion should be notified to the parent, %false 333 * otherwise. 334 */ 335 static bool task_participate_group_stop(struct task_struct *task) 336 { 337 struct signal_struct *sig = task->signal; 338 bool consume = task->jobctl & JOBCTL_STOP_CONSUME; 339 340 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); 341 342 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); 343 344 if (!consume) 345 return false; 346 347 if (!WARN_ON_ONCE(sig->group_stop_count == 0)) 348 sig->group_stop_count--; 349 350 /* 351 * Tell the caller to notify completion iff we are entering into a 352 * fresh group stop. Read comment in do_signal_stop() for details. 353 */ 354 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { 355 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); 356 return true; 357 } 358 return false; 359 } 360 361 /* 362 * allocate a new signal queue record 363 * - this may be called without locks if and only if t == current, otherwise an 364 * appropriate lock must be held to stop the target task from exiting 365 */ 366 static struct sigqueue * 367 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) 368 { 369 struct sigqueue *q = NULL; 370 struct user_struct *user; 371 372 /* 373 * Protect access to @t credentials. This can go away when all 374 * callers hold rcu read lock. 375 */ 376 rcu_read_lock(); 377 user = get_uid(__task_cred(t)->user); 378 atomic_inc(&user->sigpending); 379 rcu_read_unlock(); 380 381 if (override_rlimit || 382 atomic_read(&user->sigpending) <= 383 task_rlimit(t, RLIMIT_SIGPENDING)) { 384 q = kmem_cache_alloc(sigqueue_cachep, flags); 385 } else { 386 print_dropped_signal(sig); 387 } 388 389 if (unlikely(q == NULL)) { 390 atomic_dec(&user->sigpending); 391 free_uid(user); 392 } else { 393 INIT_LIST_HEAD(&q->list); 394 q->flags = 0; 395 q->user = user; 396 } 397 398 return q; 399 } 400 401 static void __sigqueue_free(struct sigqueue *q) 402 { 403 if (q->flags & SIGQUEUE_PREALLOC) 404 return; 405 atomic_dec(&q->user->sigpending); 406 free_uid(q->user); 407 kmem_cache_free(sigqueue_cachep, q); 408 } 409 410 void flush_sigqueue(struct sigpending *queue) 411 { 412 struct sigqueue *q; 413 414 sigemptyset(&queue->signal); 415 while (!list_empty(&queue->list)) { 416 q = list_entry(queue->list.next, struct sigqueue , list); 417 list_del_init(&q->list); 418 __sigqueue_free(q); 419 } 420 } 421 422 /* 423 * Flush all pending signals for this kthread. 424 */ 425 void flush_signals(struct task_struct *t) 426 { 427 unsigned long flags; 428 429 spin_lock_irqsave(&t->sighand->siglock, flags); 430 clear_tsk_thread_flag(t, TIF_SIGPENDING); 431 flush_sigqueue(&t->pending); 432 flush_sigqueue(&t->signal->shared_pending); 433 spin_unlock_irqrestore(&t->sighand->siglock, flags); 434 } 435 436 #ifdef CONFIG_POSIX_TIMERS 437 static void __flush_itimer_signals(struct sigpending *pending) 438 { 439 sigset_t signal, retain; 440 struct sigqueue *q, *n; 441 442 signal = pending->signal; 443 sigemptyset(&retain); 444 445 list_for_each_entry_safe(q, n, &pending->list, list) { 446 int sig = q->info.si_signo; 447 448 if (likely(q->info.si_code != SI_TIMER)) { 449 sigaddset(&retain, sig); 450 } else { 451 sigdelset(&signal, sig); 452 list_del_init(&q->list); 453 __sigqueue_free(q); 454 } 455 } 456 457 sigorsets(&pending->signal, &signal, &retain); 458 } 459 460 void flush_itimer_signals(void) 461 { 462 struct task_struct *tsk = current; 463 unsigned long flags; 464 465 spin_lock_irqsave(&tsk->sighand->siglock, flags); 466 __flush_itimer_signals(&tsk->pending); 467 __flush_itimer_signals(&tsk->signal->shared_pending); 468 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 469 } 470 #endif 471 472 void ignore_signals(struct task_struct *t) 473 { 474 int i; 475 476 for (i = 0; i < _NSIG; ++i) 477 t->sighand->action[i].sa.sa_handler = SIG_IGN; 478 479 flush_signals(t); 480 } 481 482 /* 483 * Flush all handlers for a task. 484 */ 485 486 void 487 flush_signal_handlers(struct task_struct *t, int force_default) 488 { 489 int i; 490 struct k_sigaction *ka = &t->sighand->action[0]; 491 for (i = _NSIG ; i != 0 ; i--) { 492 if (force_default || ka->sa.sa_handler != SIG_IGN) 493 ka->sa.sa_handler = SIG_DFL; 494 ka->sa.sa_flags = 0; 495 #ifdef __ARCH_HAS_SA_RESTORER 496 ka->sa.sa_restorer = NULL; 497 #endif 498 sigemptyset(&ka->sa.sa_mask); 499 ka++; 500 } 501 } 502 503 int unhandled_signal(struct task_struct *tsk, int sig) 504 { 505 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; 506 if (is_global_init(tsk)) 507 return 1; 508 if (handler != SIG_IGN && handler != SIG_DFL) 509 return 0; 510 /* if ptraced, let the tracer determine */ 511 return !tsk->ptrace; 512 } 513 514 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, 515 bool *resched_timer) 516 { 517 struct sigqueue *q, *first = NULL; 518 519 /* 520 * Collect the siginfo appropriate to this signal. Check if 521 * there is another siginfo for the same signal. 522 */ 523 list_for_each_entry(q, &list->list, list) { 524 if (q->info.si_signo == sig) { 525 if (first) 526 goto still_pending; 527 first = q; 528 } 529 } 530 531 sigdelset(&list->signal, sig); 532 533 if (first) { 534 still_pending: 535 list_del_init(&first->list); 536 copy_siginfo(info, &first->info); 537 538 *resched_timer = 539 (first->flags & SIGQUEUE_PREALLOC) && 540 (info->si_code == SI_TIMER) && 541 (info->si_sys_private); 542 543 __sigqueue_free(first); 544 } else { 545 /* 546 * Ok, it wasn't in the queue. This must be 547 * a fast-pathed signal or we must have been 548 * out of queue space. So zero out the info. 549 */ 550 info->si_signo = sig; 551 info->si_errno = 0; 552 info->si_code = SI_USER; 553 info->si_pid = 0; 554 info->si_uid = 0; 555 } 556 } 557 558 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 559 siginfo_t *info, bool *resched_timer) 560 { 561 int sig = next_signal(pending, mask); 562 563 if (sig) 564 collect_signal(sig, pending, info, resched_timer); 565 return sig; 566 } 567 568 /* 569 * Dequeue a signal and return the element to the caller, which is 570 * expected to free it. 571 * 572 * All callers have to hold the siglock. 573 */ 574 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 575 { 576 bool resched_timer = false; 577 int signr; 578 579 /* We only dequeue private signals from ourselves, we don't let 580 * signalfd steal them 581 */ 582 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); 583 if (!signr) { 584 signr = __dequeue_signal(&tsk->signal->shared_pending, 585 mask, info, &resched_timer); 586 #ifdef CONFIG_POSIX_TIMERS 587 /* 588 * itimer signal ? 589 * 590 * itimers are process shared and we restart periodic 591 * itimers in the signal delivery path to prevent DoS 592 * attacks in the high resolution timer case. This is 593 * compliant with the old way of self-restarting 594 * itimers, as the SIGALRM is a legacy signal and only 595 * queued once. Changing the restart behaviour to 596 * restart the timer in the signal dequeue path is 597 * reducing the timer noise on heavy loaded !highres 598 * systems too. 599 */ 600 if (unlikely(signr == SIGALRM)) { 601 struct hrtimer *tmr = &tsk->signal->real_timer; 602 603 if (!hrtimer_is_queued(tmr) && 604 tsk->signal->it_real_incr != 0) { 605 hrtimer_forward(tmr, tmr->base->get_time(), 606 tsk->signal->it_real_incr); 607 hrtimer_restart(tmr); 608 } 609 } 610 #endif 611 } 612 613 recalc_sigpending(); 614 if (!signr) 615 return 0; 616 617 if (unlikely(sig_kernel_stop(signr))) { 618 /* 619 * Set a marker that we have dequeued a stop signal. Our 620 * caller might release the siglock and then the pending 621 * stop signal it is about to process is no longer in the 622 * pending bitmasks, but must still be cleared by a SIGCONT 623 * (and overruled by a SIGKILL). So those cases clear this 624 * shared flag after we've set it. Note that this flag may 625 * remain set after the signal we return is ignored or 626 * handled. That doesn't matter because its only purpose 627 * is to alert stop-signal processing code when another 628 * processor has come along and cleared the flag. 629 */ 630 current->jobctl |= JOBCTL_STOP_DEQUEUED; 631 } 632 #ifdef CONFIG_POSIX_TIMERS 633 if (resched_timer) { 634 /* 635 * Release the siglock to ensure proper locking order 636 * of timer locks outside of siglocks. Note, we leave 637 * irqs disabled here, since the posix-timers code is 638 * about to disable them again anyway. 639 */ 640 spin_unlock(&tsk->sighand->siglock); 641 posixtimer_rearm(info); 642 spin_lock(&tsk->sighand->siglock); 643 } 644 #endif 645 return signr; 646 } 647 648 /* 649 * Tell a process that it has a new active signal.. 650 * 651 * NOTE! we rely on the previous spin_lock to 652 * lock interrupts for us! We can only be called with 653 * "siglock" held, and the local interrupt must 654 * have been disabled when that got acquired! 655 * 656 * No need to set need_resched since signal event passing 657 * goes through ->blocked 658 */ 659 void signal_wake_up_state(struct task_struct *t, unsigned int state) 660 { 661 set_tsk_thread_flag(t, TIF_SIGPENDING); 662 /* 663 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable 664 * case. We don't check t->state here because there is a race with it 665 * executing another processor and just now entering stopped state. 666 * By using wake_up_state, we ensure the process will wake up and 667 * handle its death signal. 668 */ 669 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) 670 kick_process(t); 671 } 672 673 /* 674 * Remove signals in mask from the pending set and queue. 675 * Returns 1 if any signals were found. 676 * 677 * All callers must be holding the siglock. 678 */ 679 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) 680 { 681 struct sigqueue *q, *n; 682 sigset_t m; 683 684 sigandsets(&m, mask, &s->signal); 685 if (sigisemptyset(&m)) 686 return 0; 687 688 sigandnsets(&s->signal, &s->signal, mask); 689 list_for_each_entry_safe(q, n, &s->list, list) { 690 if (sigismember(mask, q->info.si_signo)) { 691 list_del_init(&q->list); 692 __sigqueue_free(q); 693 } 694 } 695 return 1; 696 } 697 698 static inline int is_si_special(const struct siginfo *info) 699 { 700 return info <= SEND_SIG_FORCED; 701 } 702 703 static inline bool si_fromuser(const struct siginfo *info) 704 { 705 return info == SEND_SIG_NOINFO || 706 (!is_si_special(info) && SI_FROMUSER(info)); 707 } 708 709 /* 710 * called with RCU read lock from check_kill_permission() 711 */ 712 static int kill_ok_by_cred(struct task_struct *t) 713 { 714 const struct cred *cred = current_cred(); 715 const struct cred *tcred = __task_cred(t); 716 717 if (uid_eq(cred->euid, tcred->suid) || 718 uid_eq(cred->euid, tcred->uid) || 719 uid_eq(cred->uid, tcred->suid) || 720 uid_eq(cred->uid, tcred->uid)) 721 return 1; 722 723 if (ns_capable(tcred->user_ns, CAP_KILL)) 724 return 1; 725 726 return 0; 727 } 728 729 /* 730 * Bad permissions for sending the signal 731 * - the caller must hold the RCU read lock 732 */ 733 static int check_kill_permission(int sig, struct siginfo *info, 734 struct task_struct *t) 735 { 736 struct pid *sid; 737 int error; 738 739 if (!valid_signal(sig)) 740 return -EINVAL; 741 742 if (!si_fromuser(info)) 743 return 0; 744 745 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 746 if (error) 747 return error; 748 749 if (!same_thread_group(current, t) && 750 !kill_ok_by_cred(t)) { 751 switch (sig) { 752 case SIGCONT: 753 sid = task_session(t); 754 /* 755 * We don't return the error if sid == NULL. The 756 * task was unhashed, the caller must notice this. 757 */ 758 if (!sid || sid == task_session(current)) 759 break; 760 default: 761 return -EPERM; 762 } 763 } 764 765 return security_task_kill(t, info, sig, 0); 766 } 767 768 /** 769 * ptrace_trap_notify - schedule trap to notify ptracer 770 * @t: tracee wanting to notify tracer 771 * 772 * This function schedules sticky ptrace trap which is cleared on the next 773 * TRAP_STOP to notify ptracer of an event. @t must have been seized by 774 * ptracer. 775 * 776 * If @t is running, STOP trap will be taken. If trapped for STOP and 777 * ptracer is listening for events, tracee is woken up so that it can 778 * re-trap for the new event. If trapped otherwise, STOP trap will be 779 * eventually taken without returning to userland after the existing traps 780 * are finished by PTRACE_CONT. 781 * 782 * CONTEXT: 783 * Must be called with @task->sighand->siglock held. 784 */ 785 static void ptrace_trap_notify(struct task_struct *t) 786 { 787 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); 788 assert_spin_locked(&t->sighand->siglock); 789 790 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); 791 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); 792 } 793 794 /* 795 * Handle magic process-wide effects of stop/continue signals. Unlike 796 * the signal actions, these happen immediately at signal-generation 797 * time regardless of blocking, ignoring, or handling. This does the 798 * actual continuing for SIGCONT, but not the actual stopping for stop 799 * signals. The process stop is done as a signal action for SIG_DFL. 800 * 801 * Returns true if the signal should be actually delivered, otherwise 802 * it should be dropped. 803 */ 804 static bool prepare_signal(int sig, struct task_struct *p, bool force) 805 { 806 struct signal_struct *signal = p->signal; 807 struct task_struct *t; 808 sigset_t flush; 809 810 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { 811 if (!(signal->flags & SIGNAL_GROUP_EXIT)) 812 return sig == SIGKILL; 813 /* 814 * The process is in the middle of dying, nothing to do. 815 */ 816 } else if (sig_kernel_stop(sig)) { 817 /* 818 * This is a stop signal. Remove SIGCONT from all queues. 819 */ 820 siginitset(&flush, sigmask(SIGCONT)); 821 flush_sigqueue_mask(&flush, &signal->shared_pending); 822 for_each_thread(p, t) 823 flush_sigqueue_mask(&flush, &t->pending); 824 } else if (sig == SIGCONT) { 825 unsigned int why; 826 /* 827 * Remove all stop signals from all queues, wake all threads. 828 */ 829 siginitset(&flush, SIG_KERNEL_STOP_MASK); 830 flush_sigqueue_mask(&flush, &signal->shared_pending); 831 for_each_thread(p, t) { 832 flush_sigqueue_mask(&flush, &t->pending); 833 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); 834 if (likely(!(t->ptrace & PT_SEIZED))) 835 wake_up_state(t, __TASK_STOPPED); 836 else 837 ptrace_trap_notify(t); 838 } 839 840 /* 841 * Notify the parent with CLD_CONTINUED if we were stopped. 842 * 843 * If we were in the middle of a group stop, we pretend it 844 * was already finished, and then continued. Since SIGCHLD 845 * doesn't queue we report only CLD_STOPPED, as if the next 846 * CLD_CONTINUED was dropped. 847 */ 848 why = 0; 849 if (signal->flags & SIGNAL_STOP_STOPPED) 850 why |= SIGNAL_CLD_CONTINUED; 851 else if (signal->group_stop_count) 852 why |= SIGNAL_CLD_STOPPED; 853 854 if (why) { 855 /* 856 * The first thread which returns from do_signal_stop() 857 * will take ->siglock, notice SIGNAL_CLD_MASK, and 858 * notify its parent. See get_signal_to_deliver(). 859 */ 860 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); 861 signal->group_stop_count = 0; 862 signal->group_exit_code = 0; 863 } 864 } 865 866 return !sig_ignored(p, sig, force); 867 } 868 869 /* 870 * Test if P wants to take SIG. After we've checked all threads with this, 871 * it's equivalent to finding no threads not blocking SIG. Any threads not 872 * blocking SIG were ruled out because they are not running and already 873 * have pending signals. Such threads will dequeue from the shared queue 874 * as soon as they're available, so putting the signal on the shared queue 875 * will be equivalent to sending it to one such thread. 876 */ 877 static inline int wants_signal(int sig, struct task_struct *p) 878 { 879 if (sigismember(&p->blocked, sig)) 880 return 0; 881 if (p->flags & PF_EXITING) 882 return 0; 883 if (sig == SIGKILL) 884 return 1; 885 if (task_is_stopped_or_traced(p)) 886 return 0; 887 return task_curr(p) || !signal_pending(p); 888 } 889 890 static void complete_signal(int sig, struct task_struct *p, int group) 891 { 892 struct signal_struct *signal = p->signal; 893 struct task_struct *t; 894 895 /* 896 * Now find a thread we can wake up to take the signal off the queue. 897 * 898 * If the main thread wants the signal, it gets first crack. 899 * Probably the least surprising to the average bear. 900 */ 901 if (wants_signal(sig, p)) 902 t = p; 903 else if (!group || thread_group_empty(p)) 904 /* 905 * There is just one thread and it does not need to be woken. 906 * It will dequeue unblocked signals before it runs again. 907 */ 908 return; 909 else { 910 /* 911 * Otherwise try to find a suitable thread. 912 */ 913 t = signal->curr_target; 914 while (!wants_signal(sig, t)) { 915 t = next_thread(t); 916 if (t == signal->curr_target) 917 /* 918 * No thread needs to be woken. 919 * Any eligible threads will see 920 * the signal in the queue soon. 921 */ 922 return; 923 } 924 signal->curr_target = t; 925 } 926 927 /* 928 * Found a killable thread. If the signal will be fatal, 929 * then start taking the whole group down immediately. 930 */ 931 if (sig_fatal(p, sig) && 932 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && 933 !sigismember(&t->real_blocked, sig) && 934 (sig == SIGKILL || !t->ptrace)) { 935 /* 936 * This signal will be fatal to the whole group. 937 */ 938 if (!sig_kernel_coredump(sig)) { 939 /* 940 * Start a group exit and wake everybody up. 941 * This way we don't have other threads 942 * running and doing things after a slower 943 * thread has the fatal signal pending. 944 */ 945 signal->flags = SIGNAL_GROUP_EXIT; 946 signal->group_exit_code = sig; 947 signal->group_stop_count = 0; 948 t = p; 949 do { 950 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 951 sigaddset(&t->pending.signal, SIGKILL); 952 signal_wake_up(t, 1); 953 } while_each_thread(p, t); 954 return; 955 } 956 } 957 958 /* 959 * The signal is already in the shared-pending queue. 960 * Tell the chosen thread to wake up and dequeue it. 961 */ 962 signal_wake_up(t, sig == SIGKILL); 963 return; 964 } 965 966 static inline int legacy_queue(struct sigpending *signals, int sig) 967 { 968 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 969 } 970 971 #ifdef CONFIG_USER_NS 972 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) 973 { 974 if (current_user_ns() == task_cred_xxx(t, user_ns)) 975 return; 976 977 if (SI_FROMKERNEL(info)) 978 return; 979 980 rcu_read_lock(); 981 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns), 982 make_kuid(current_user_ns(), info->si_uid)); 983 rcu_read_unlock(); 984 } 985 #else 986 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) 987 { 988 return; 989 } 990 #endif 991 992 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, 993 int group, int from_ancestor_ns) 994 { 995 struct sigpending *pending; 996 struct sigqueue *q; 997 int override_rlimit; 998 int ret = 0, result; 999 1000 assert_spin_locked(&t->sighand->siglock); 1001 1002 result = TRACE_SIGNAL_IGNORED; 1003 if (!prepare_signal(sig, t, 1004 from_ancestor_ns || (info == SEND_SIG_FORCED))) 1005 goto ret; 1006 1007 pending = group ? &t->signal->shared_pending : &t->pending; 1008 /* 1009 * Short-circuit ignored signals and support queuing 1010 * exactly one non-rt signal, so that we can get more 1011 * detailed information about the cause of the signal. 1012 */ 1013 result = TRACE_SIGNAL_ALREADY_PENDING; 1014 if (legacy_queue(pending, sig)) 1015 goto ret; 1016 1017 result = TRACE_SIGNAL_DELIVERED; 1018 /* 1019 * fast-pathed signals for kernel-internal things like SIGSTOP 1020 * or SIGKILL. 1021 */ 1022 if (info == SEND_SIG_FORCED) 1023 goto out_set; 1024 1025 /* 1026 * Real-time signals must be queued if sent by sigqueue, or 1027 * some other real-time mechanism. It is implementation 1028 * defined whether kill() does so. We attempt to do so, on 1029 * the principle of least surprise, but since kill is not 1030 * allowed to fail with EAGAIN when low on memory we just 1031 * make sure at least one signal gets delivered and don't 1032 * pass on the info struct. 1033 */ 1034 if (sig < SIGRTMIN) 1035 override_rlimit = (is_si_special(info) || info->si_code >= 0); 1036 else 1037 override_rlimit = 0; 1038 1039 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, 1040 override_rlimit); 1041 if (q) { 1042 list_add_tail(&q->list, &pending->list); 1043 switch ((unsigned long) info) { 1044 case (unsigned long) SEND_SIG_NOINFO: 1045 q->info.si_signo = sig; 1046 q->info.si_errno = 0; 1047 q->info.si_code = SI_USER; 1048 q->info.si_pid = task_tgid_nr_ns(current, 1049 task_active_pid_ns(t)); 1050 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 1051 break; 1052 case (unsigned long) SEND_SIG_PRIV: 1053 q->info.si_signo = sig; 1054 q->info.si_errno = 0; 1055 q->info.si_code = SI_KERNEL; 1056 q->info.si_pid = 0; 1057 q->info.si_uid = 0; 1058 break; 1059 default: 1060 copy_siginfo(&q->info, info); 1061 if (from_ancestor_ns) 1062 q->info.si_pid = 0; 1063 break; 1064 } 1065 1066 userns_fixup_signal_uid(&q->info, t); 1067 1068 } else if (!is_si_special(info)) { 1069 if (sig >= SIGRTMIN && info->si_code != SI_USER) { 1070 /* 1071 * Queue overflow, abort. We may abort if the 1072 * signal was rt and sent by user using something 1073 * other than kill(). 1074 */ 1075 result = TRACE_SIGNAL_OVERFLOW_FAIL; 1076 ret = -EAGAIN; 1077 goto ret; 1078 } else { 1079 /* 1080 * This is a silent loss of information. We still 1081 * send the signal, but the *info bits are lost. 1082 */ 1083 result = TRACE_SIGNAL_LOSE_INFO; 1084 } 1085 } 1086 1087 out_set: 1088 signalfd_notify(t, sig); 1089 sigaddset(&pending->signal, sig); 1090 complete_signal(sig, t, group); 1091 ret: 1092 trace_signal_generate(sig, info, t, group, result); 1093 return ret; 1094 } 1095 1096 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, 1097 int group) 1098 { 1099 int from_ancestor_ns = 0; 1100 1101 #ifdef CONFIG_PID_NS 1102 from_ancestor_ns = si_fromuser(info) && 1103 !task_pid_nr_ns(current, task_active_pid_ns(t)); 1104 #endif 1105 1106 return __send_signal(sig, info, t, group, from_ancestor_ns); 1107 } 1108 1109 static void print_fatal_signal(int signr) 1110 { 1111 struct pt_regs *regs = signal_pt_regs(); 1112 pr_info("potentially unexpected fatal signal %d.\n", signr); 1113 1114 #if defined(__i386__) && !defined(__arch_um__) 1115 pr_info("code at %08lx: ", regs->ip); 1116 { 1117 int i; 1118 for (i = 0; i < 16; i++) { 1119 unsigned char insn; 1120 1121 if (get_user(insn, (unsigned char *)(regs->ip + i))) 1122 break; 1123 pr_cont("%02x ", insn); 1124 } 1125 } 1126 pr_cont("\n"); 1127 #endif 1128 preempt_disable(); 1129 show_regs(regs); 1130 preempt_enable(); 1131 } 1132 1133 static int __init setup_print_fatal_signals(char *str) 1134 { 1135 get_option (&str, &print_fatal_signals); 1136 1137 return 1; 1138 } 1139 1140 __setup("print-fatal-signals=", setup_print_fatal_signals); 1141 1142 int 1143 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1144 { 1145 return send_signal(sig, info, p, 1); 1146 } 1147 1148 static int 1149 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) 1150 { 1151 return send_signal(sig, info, t, 0); 1152 } 1153 1154 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, 1155 bool group) 1156 { 1157 unsigned long flags; 1158 int ret = -ESRCH; 1159 1160 if (lock_task_sighand(p, &flags)) { 1161 ret = send_signal(sig, info, p, group); 1162 unlock_task_sighand(p, &flags); 1163 } 1164 1165 return ret; 1166 } 1167 1168 /* 1169 * Force a signal that the process can't ignore: if necessary 1170 * we unblock the signal and change any SIG_IGN to SIG_DFL. 1171 * 1172 * Note: If we unblock the signal, we always reset it to SIG_DFL, 1173 * since we do not want to have a signal handler that was blocked 1174 * be invoked when user space had explicitly blocked it. 1175 * 1176 * We don't want to have recursive SIGSEGV's etc, for example, 1177 * that is why we also clear SIGNAL_UNKILLABLE. 1178 */ 1179 int 1180 force_sig_info(int sig, struct siginfo *info, struct task_struct *t) 1181 { 1182 unsigned long int flags; 1183 int ret, blocked, ignored; 1184 struct k_sigaction *action; 1185 1186 spin_lock_irqsave(&t->sighand->siglock, flags); 1187 action = &t->sighand->action[sig-1]; 1188 ignored = action->sa.sa_handler == SIG_IGN; 1189 blocked = sigismember(&t->blocked, sig); 1190 if (blocked || ignored) { 1191 action->sa.sa_handler = SIG_DFL; 1192 if (blocked) { 1193 sigdelset(&t->blocked, sig); 1194 recalc_sigpending_and_wake(t); 1195 } 1196 } 1197 if (action->sa.sa_handler == SIG_DFL) 1198 t->signal->flags &= ~SIGNAL_UNKILLABLE; 1199 ret = specific_send_sig_info(sig, info, t); 1200 spin_unlock_irqrestore(&t->sighand->siglock, flags); 1201 1202 return ret; 1203 } 1204 1205 /* 1206 * Nuke all other threads in the group. 1207 */ 1208 int zap_other_threads(struct task_struct *p) 1209 { 1210 struct task_struct *t = p; 1211 int count = 0; 1212 1213 p->signal->group_stop_count = 0; 1214 1215 while_each_thread(p, t) { 1216 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 1217 count++; 1218 1219 /* Don't bother with already dead threads */ 1220 if (t->exit_state) 1221 continue; 1222 sigaddset(&t->pending.signal, SIGKILL); 1223 signal_wake_up(t, 1); 1224 } 1225 1226 return count; 1227 } 1228 1229 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 1230 unsigned long *flags) 1231 { 1232 struct sighand_struct *sighand; 1233 1234 for (;;) { 1235 /* 1236 * Disable interrupts early to avoid deadlocks. 1237 * See rcu_read_unlock() comment header for details. 1238 */ 1239 local_irq_save(*flags); 1240 rcu_read_lock(); 1241 sighand = rcu_dereference(tsk->sighand); 1242 if (unlikely(sighand == NULL)) { 1243 rcu_read_unlock(); 1244 local_irq_restore(*flags); 1245 break; 1246 } 1247 /* 1248 * This sighand can be already freed and even reused, but 1249 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which 1250 * initializes ->siglock: this slab can't go away, it has 1251 * the same object type, ->siglock can't be reinitialized. 1252 * 1253 * We need to ensure that tsk->sighand is still the same 1254 * after we take the lock, we can race with de_thread() or 1255 * __exit_signal(). In the latter case the next iteration 1256 * must see ->sighand == NULL. 1257 */ 1258 spin_lock(&sighand->siglock); 1259 if (likely(sighand == tsk->sighand)) { 1260 rcu_read_unlock(); 1261 break; 1262 } 1263 spin_unlock(&sighand->siglock); 1264 rcu_read_unlock(); 1265 local_irq_restore(*flags); 1266 } 1267 1268 return sighand; 1269 } 1270 1271 /* 1272 * send signal info to all the members of a group 1273 */ 1274 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1275 { 1276 int ret; 1277 1278 rcu_read_lock(); 1279 ret = check_kill_permission(sig, info, p); 1280 rcu_read_unlock(); 1281 1282 if (!ret && sig) 1283 ret = do_send_sig_info(sig, info, p, true); 1284 1285 return ret; 1286 } 1287 1288 /* 1289 * __kill_pgrp_info() sends a signal to a process group: this is what the tty 1290 * control characters do (^C, ^Z etc) 1291 * - the caller must hold at least a readlock on tasklist_lock 1292 */ 1293 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) 1294 { 1295 struct task_struct *p = NULL; 1296 int retval, success; 1297 1298 success = 0; 1299 retval = -ESRCH; 1300 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1301 int err = group_send_sig_info(sig, info, p); 1302 success |= !err; 1303 retval = err; 1304 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1305 return success ? 0 : retval; 1306 } 1307 1308 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) 1309 { 1310 int error = -ESRCH; 1311 struct task_struct *p; 1312 1313 for (;;) { 1314 rcu_read_lock(); 1315 p = pid_task(pid, PIDTYPE_PID); 1316 if (p) 1317 error = group_send_sig_info(sig, info, p); 1318 rcu_read_unlock(); 1319 if (likely(!p || error != -ESRCH)) 1320 return error; 1321 1322 /* 1323 * The task was unhashed in between, try again. If it 1324 * is dead, pid_task() will return NULL, if we race with 1325 * de_thread() it will find the new leader. 1326 */ 1327 } 1328 } 1329 1330 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid) 1331 { 1332 int error; 1333 rcu_read_lock(); 1334 error = kill_pid_info(sig, info, find_vpid(pid)); 1335 rcu_read_unlock(); 1336 return error; 1337 } 1338 1339 static int kill_as_cred_perm(const struct cred *cred, 1340 struct task_struct *target) 1341 { 1342 const struct cred *pcred = __task_cred(target); 1343 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) && 1344 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid)) 1345 return 0; 1346 return 1; 1347 } 1348 1349 /* like kill_pid_info(), but doesn't use uid/euid of "current" */ 1350 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid, 1351 const struct cred *cred, u32 secid) 1352 { 1353 int ret = -EINVAL; 1354 struct task_struct *p; 1355 unsigned long flags; 1356 1357 if (!valid_signal(sig)) 1358 return ret; 1359 1360 rcu_read_lock(); 1361 p = pid_task(pid, PIDTYPE_PID); 1362 if (!p) { 1363 ret = -ESRCH; 1364 goto out_unlock; 1365 } 1366 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) { 1367 ret = -EPERM; 1368 goto out_unlock; 1369 } 1370 ret = security_task_kill(p, info, sig, secid); 1371 if (ret) 1372 goto out_unlock; 1373 1374 if (sig) { 1375 if (lock_task_sighand(p, &flags)) { 1376 ret = __send_signal(sig, info, p, 1, 0); 1377 unlock_task_sighand(p, &flags); 1378 } else 1379 ret = -ESRCH; 1380 } 1381 out_unlock: 1382 rcu_read_unlock(); 1383 return ret; 1384 } 1385 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); 1386 1387 /* 1388 * kill_something_info() interprets pid in interesting ways just like kill(2). 1389 * 1390 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1391 * is probably wrong. Should make it like BSD or SYSV. 1392 */ 1393 1394 static int kill_something_info(int sig, struct siginfo *info, pid_t pid) 1395 { 1396 int ret; 1397 1398 if (pid > 0) { 1399 rcu_read_lock(); 1400 ret = kill_pid_info(sig, info, find_vpid(pid)); 1401 rcu_read_unlock(); 1402 return ret; 1403 } 1404 1405 read_lock(&tasklist_lock); 1406 if (pid != -1) { 1407 ret = __kill_pgrp_info(sig, info, 1408 pid ? find_vpid(-pid) : task_pgrp(current)); 1409 } else { 1410 int retval = 0, count = 0; 1411 struct task_struct * p; 1412 1413 for_each_process(p) { 1414 if (task_pid_vnr(p) > 1 && 1415 !same_thread_group(p, current)) { 1416 int err = group_send_sig_info(sig, info, p); 1417 ++count; 1418 if (err != -EPERM) 1419 retval = err; 1420 } 1421 } 1422 ret = count ? retval : -ESRCH; 1423 } 1424 read_unlock(&tasklist_lock); 1425 1426 return ret; 1427 } 1428 1429 /* 1430 * These are for backward compatibility with the rest of the kernel source. 1431 */ 1432 1433 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1434 { 1435 /* 1436 * Make sure legacy kernel users don't send in bad values 1437 * (normal paths check this in check_kill_permission). 1438 */ 1439 if (!valid_signal(sig)) 1440 return -EINVAL; 1441 1442 return do_send_sig_info(sig, info, p, false); 1443 } 1444 1445 #define __si_special(priv) \ 1446 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1447 1448 int 1449 send_sig(int sig, struct task_struct *p, int priv) 1450 { 1451 return send_sig_info(sig, __si_special(priv), p); 1452 } 1453 1454 void 1455 force_sig(int sig, struct task_struct *p) 1456 { 1457 force_sig_info(sig, SEND_SIG_PRIV, p); 1458 } 1459 1460 /* 1461 * When things go south during signal handling, we 1462 * will force a SIGSEGV. And if the signal that caused 1463 * the problem was already a SIGSEGV, we'll want to 1464 * make sure we don't even try to deliver the signal.. 1465 */ 1466 int 1467 force_sigsegv(int sig, struct task_struct *p) 1468 { 1469 if (sig == SIGSEGV) { 1470 unsigned long flags; 1471 spin_lock_irqsave(&p->sighand->siglock, flags); 1472 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; 1473 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1474 } 1475 force_sig(SIGSEGV, p); 1476 return 0; 1477 } 1478 1479 int kill_pgrp(struct pid *pid, int sig, int priv) 1480 { 1481 int ret; 1482 1483 read_lock(&tasklist_lock); 1484 ret = __kill_pgrp_info(sig, __si_special(priv), pid); 1485 read_unlock(&tasklist_lock); 1486 1487 return ret; 1488 } 1489 EXPORT_SYMBOL(kill_pgrp); 1490 1491 int kill_pid(struct pid *pid, int sig, int priv) 1492 { 1493 return kill_pid_info(sig, __si_special(priv), pid); 1494 } 1495 EXPORT_SYMBOL(kill_pid); 1496 1497 /* 1498 * These functions support sending signals using preallocated sigqueue 1499 * structures. This is needed "because realtime applications cannot 1500 * afford to lose notifications of asynchronous events, like timer 1501 * expirations or I/O completions". In the case of POSIX Timers 1502 * we allocate the sigqueue structure from the timer_create. If this 1503 * allocation fails we are able to report the failure to the application 1504 * with an EAGAIN error. 1505 */ 1506 struct sigqueue *sigqueue_alloc(void) 1507 { 1508 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); 1509 1510 if (q) 1511 q->flags |= SIGQUEUE_PREALLOC; 1512 1513 return q; 1514 } 1515 1516 void sigqueue_free(struct sigqueue *q) 1517 { 1518 unsigned long flags; 1519 spinlock_t *lock = ¤t->sighand->siglock; 1520 1521 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1522 /* 1523 * We must hold ->siglock while testing q->list 1524 * to serialize with collect_signal() or with 1525 * __exit_signal()->flush_sigqueue(). 1526 */ 1527 spin_lock_irqsave(lock, flags); 1528 q->flags &= ~SIGQUEUE_PREALLOC; 1529 /* 1530 * If it is queued it will be freed when dequeued, 1531 * like the "regular" sigqueue. 1532 */ 1533 if (!list_empty(&q->list)) 1534 q = NULL; 1535 spin_unlock_irqrestore(lock, flags); 1536 1537 if (q) 1538 __sigqueue_free(q); 1539 } 1540 1541 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) 1542 { 1543 int sig = q->info.si_signo; 1544 struct sigpending *pending; 1545 unsigned long flags; 1546 int ret, result; 1547 1548 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1549 1550 ret = -1; 1551 if (!likely(lock_task_sighand(t, &flags))) 1552 goto ret; 1553 1554 ret = 1; /* the signal is ignored */ 1555 result = TRACE_SIGNAL_IGNORED; 1556 if (!prepare_signal(sig, t, false)) 1557 goto out; 1558 1559 ret = 0; 1560 if (unlikely(!list_empty(&q->list))) { 1561 /* 1562 * If an SI_TIMER entry is already queue just increment 1563 * the overrun count. 1564 */ 1565 BUG_ON(q->info.si_code != SI_TIMER); 1566 q->info.si_overrun++; 1567 result = TRACE_SIGNAL_ALREADY_PENDING; 1568 goto out; 1569 } 1570 q->info.si_overrun = 0; 1571 1572 signalfd_notify(t, sig); 1573 pending = group ? &t->signal->shared_pending : &t->pending; 1574 list_add_tail(&q->list, &pending->list); 1575 sigaddset(&pending->signal, sig); 1576 complete_signal(sig, t, group); 1577 result = TRACE_SIGNAL_DELIVERED; 1578 out: 1579 trace_signal_generate(sig, &q->info, t, group, result); 1580 unlock_task_sighand(t, &flags); 1581 ret: 1582 return ret; 1583 } 1584 1585 /* 1586 * Let a parent know about the death of a child. 1587 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1588 * 1589 * Returns true if our parent ignored us and so we've switched to 1590 * self-reaping. 1591 */ 1592 bool do_notify_parent(struct task_struct *tsk, int sig) 1593 { 1594 struct siginfo info; 1595 unsigned long flags; 1596 struct sighand_struct *psig; 1597 bool autoreap = false; 1598 u64 utime, stime; 1599 1600 BUG_ON(sig == -1); 1601 1602 /* do_notify_parent_cldstop should have been called instead. */ 1603 BUG_ON(task_is_stopped_or_traced(tsk)); 1604 1605 BUG_ON(!tsk->ptrace && 1606 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1607 1608 if (sig != SIGCHLD) { 1609 /* 1610 * This is only possible if parent == real_parent. 1611 * Check if it has changed security domain. 1612 */ 1613 if (tsk->parent_exec_id != tsk->parent->self_exec_id) 1614 sig = SIGCHLD; 1615 } 1616 1617 info.si_signo = sig; 1618 info.si_errno = 0; 1619 /* 1620 * We are under tasklist_lock here so our parent is tied to 1621 * us and cannot change. 1622 * 1623 * task_active_pid_ns will always return the same pid namespace 1624 * until a task passes through release_task. 1625 * 1626 * write_lock() currently calls preempt_disable() which is the 1627 * same as rcu_read_lock(), but according to Oleg, this is not 1628 * correct to rely on this 1629 */ 1630 rcu_read_lock(); 1631 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); 1632 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), 1633 task_uid(tsk)); 1634 rcu_read_unlock(); 1635 1636 task_cputime(tsk, &utime, &stime); 1637 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime); 1638 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime); 1639 1640 info.si_status = tsk->exit_code & 0x7f; 1641 if (tsk->exit_code & 0x80) 1642 info.si_code = CLD_DUMPED; 1643 else if (tsk->exit_code & 0x7f) 1644 info.si_code = CLD_KILLED; 1645 else { 1646 info.si_code = CLD_EXITED; 1647 info.si_status = tsk->exit_code >> 8; 1648 } 1649 1650 psig = tsk->parent->sighand; 1651 spin_lock_irqsave(&psig->siglock, flags); 1652 if (!tsk->ptrace && sig == SIGCHLD && 1653 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 1654 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 1655 /* 1656 * We are exiting and our parent doesn't care. POSIX.1 1657 * defines special semantics for setting SIGCHLD to SIG_IGN 1658 * or setting the SA_NOCLDWAIT flag: we should be reaped 1659 * automatically and not left for our parent's wait4 call. 1660 * Rather than having the parent do it as a magic kind of 1661 * signal handler, we just set this to tell do_exit that we 1662 * can be cleaned up without becoming a zombie. Note that 1663 * we still call __wake_up_parent in this case, because a 1664 * blocked sys_wait4 might now return -ECHILD. 1665 * 1666 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 1667 * is implementation-defined: we do (if you don't want 1668 * it, just use SIG_IGN instead). 1669 */ 1670 autoreap = true; 1671 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1672 sig = 0; 1673 } 1674 if (valid_signal(sig) && sig) 1675 __group_send_sig_info(sig, &info, tsk->parent); 1676 __wake_up_parent(tsk, tsk->parent); 1677 spin_unlock_irqrestore(&psig->siglock, flags); 1678 1679 return autoreap; 1680 } 1681 1682 /** 1683 * do_notify_parent_cldstop - notify parent of stopped/continued state change 1684 * @tsk: task reporting the state change 1685 * @for_ptracer: the notification is for ptracer 1686 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report 1687 * 1688 * Notify @tsk's parent that the stopped/continued state has changed. If 1689 * @for_ptracer is %false, @tsk's group leader notifies to its real parent. 1690 * If %true, @tsk reports to @tsk->parent which should be the ptracer. 1691 * 1692 * CONTEXT: 1693 * Must be called with tasklist_lock at least read locked. 1694 */ 1695 static void do_notify_parent_cldstop(struct task_struct *tsk, 1696 bool for_ptracer, int why) 1697 { 1698 struct siginfo info; 1699 unsigned long flags; 1700 struct task_struct *parent; 1701 struct sighand_struct *sighand; 1702 u64 utime, stime; 1703 1704 if (for_ptracer) { 1705 parent = tsk->parent; 1706 } else { 1707 tsk = tsk->group_leader; 1708 parent = tsk->real_parent; 1709 } 1710 1711 info.si_signo = SIGCHLD; 1712 info.si_errno = 0; 1713 /* 1714 * see comment in do_notify_parent() about the following 4 lines 1715 */ 1716 rcu_read_lock(); 1717 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); 1718 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); 1719 rcu_read_unlock(); 1720 1721 task_cputime(tsk, &utime, &stime); 1722 info.si_utime = nsec_to_clock_t(utime); 1723 info.si_stime = nsec_to_clock_t(stime); 1724 1725 info.si_code = why; 1726 switch (why) { 1727 case CLD_CONTINUED: 1728 info.si_status = SIGCONT; 1729 break; 1730 case CLD_STOPPED: 1731 info.si_status = tsk->signal->group_exit_code & 0x7f; 1732 break; 1733 case CLD_TRAPPED: 1734 info.si_status = tsk->exit_code & 0x7f; 1735 break; 1736 default: 1737 BUG(); 1738 } 1739 1740 sighand = parent->sighand; 1741 spin_lock_irqsave(&sighand->siglock, flags); 1742 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 1743 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 1744 __group_send_sig_info(SIGCHLD, &info, parent); 1745 /* 1746 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 1747 */ 1748 __wake_up_parent(tsk, parent); 1749 spin_unlock_irqrestore(&sighand->siglock, flags); 1750 } 1751 1752 static inline int may_ptrace_stop(void) 1753 { 1754 if (!likely(current->ptrace)) 1755 return 0; 1756 /* 1757 * Are we in the middle of do_coredump? 1758 * If so and our tracer is also part of the coredump stopping 1759 * is a deadlock situation, and pointless because our tracer 1760 * is dead so don't allow us to stop. 1761 * If SIGKILL was already sent before the caller unlocked 1762 * ->siglock we must see ->core_state != NULL. Otherwise it 1763 * is safe to enter schedule(). 1764 * 1765 * This is almost outdated, a task with the pending SIGKILL can't 1766 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported 1767 * after SIGKILL was already dequeued. 1768 */ 1769 if (unlikely(current->mm->core_state) && 1770 unlikely(current->mm == current->parent->mm)) 1771 return 0; 1772 1773 return 1; 1774 } 1775 1776 /* 1777 * Return non-zero if there is a SIGKILL that should be waking us up. 1778 * Called with the siglock held. 1779 */ 1780 static int sigkill_pending(struct task_struct *tsk) 1781 { 1782 return sigismember(&tsk->pending.signal, SIGKILL) || 1783 sigismember(&tsk->signal->shared_pending.signal, SIGKILL); 1784 } 1785 1786 /* 1787 * This must be called with current->sighand->siglock held. 1788 * 1789 * This should be the path for all ptrace stops. 1790 * We always set current->last_siginfo while stopped here. 1791 * That makes it a way to test a stopped process for 1792 * being ptrace-stopped vs being job-control-stopped. 1793 * 1794 * If we actually decide not to stop at all because the tracer 1795 * is gone, we keep current->exit_code unless clear_code. 1796 */ 1797 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) 1798 __releases(¤t->sighand->siglock) 1799 __acquires(¤t->sighand->siglock) 1800 { 1801 bool gstop_done = false; 1802 1803 if (arch_ptrace_stop_needed(exit_code, info)) { 1804 /* 1805 * The arch code has something special to do before a 1806 * ptrace stop. This is allowed to block, e.g. for faults 1807 * on user stack pages. We can't keep the siglock while 1808 * calling arch_ptrace_stop, so we must release it now. 1809 * To preserve proper semantics, we must do this before 1810 * any signal bookkeeping like checking group_stop_count. 1811 * Meanwhile, a SIGKILL could come in before we retake the 1812 * siglock. That must prevent us from sleeping in TASK_TRACED. 1813 * So after regaining the lock, we must check for SIGKILL. 1814 */ 1815 spin_unlock_irq(¤t->sighand->siglock); 1816 arch_ptrace_stop(exit_code, info); 1817 spin_lock_irq(¤t->sighand->siglock); 1818 if (sigkill_pending(current)) 1819 return; 1820 } 1821 1822 /* 1823 * We're committing to trapping. TRACED should be visible before 1824 * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). 1825 * Also, transition to TRACED and updates to ->jobctl should be 1826 * atomic with respect to siglock and should be done after the arch 1827 * hook as siglock is released and regrabbed across it. 1828 */ 1829 set_current_state(TASK_TRACED); 1830 1831 current->last_siginfo = info; 1832 current->exit_code = exit_code; 1833 1834 /* 1835 * If @why is CLD_STOPPED, we're trapping to participate in a group 1836 * stop. Do the bookkeeping. Note that if SIGCONT was delievered 1837 * across siglock relocks since INTERRUPT was scheduled, PENDING 1838 * could be clear now. We act as if SIGCONT is received after 1839 * TASK_TRACED is entered - ignore it. 1840 */ 1841 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) 1842 gstop_done = task_participate_group_stop(current); 1843 1844 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ 1845 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); 1846 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) 1847 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); 1848 1849 /* entering a trap, clear TRAPPING */ 1850 task_clear_jobctl_trapping(current); 1851 1852 spin_unlock_irq(¤t->sighand->siglock); 1853 read_lock(&tasklist_lock); 1854 if (may_ptrace_stop()) { 1855 /* 1856 * Notify parents of the stop. 1857 * 1858 * While ptraced, there are two parents - the ptracer and 1859 * the real_parent of the group_leader. The ptracer should 1860 * know about every stop while the real parent is only 1861 * interested in the completion of group stop. The states 1862 * for the two don't interact with each other. Notify 1863 * separately unless they're gonna be duplicates. 1864 */ 1865 do_notify_parent_cldstop(current, true, why); 1866 if (gstop_done && ptrace_reparented(current)) 1867 do_notify_parent_cldstop(current, false, why); 1868 1869 /* 1870 * Don't want to allow preemption here, because 1871 * sys_ptrace() needs this task to be inactive. 1872 * 1873 * XXX: implement read_unlock_no_resched(). 1874 */ 1875 preempt_disable(); 1876 read_unlock(&tasklist_lock); 1877 preempt_enable_no_resched(); 1878 freezable_schedule(); 1879 } else { 1880 /* 1881 * By the time we got the lock, our tracer went away. 1882 * Don't drop the lock yet, another tracer may come. 1883 * 1884 * If @gstop_done, the ptracer went away between group stop 1885 * completion and here. During detach, it would have set 1886 * JOBCTL_STOP_PENDING on us and we'll re-enter 1887 * TASK_STOPPED in do_signal_stop() on return, so notifying 1888 * the real parent of the group stop completion is enough. 1889 */ 1890 if (gstop_done) 1891 do_notify_parent_cldstop(current, false, why); 1892 1893 /* tasklist protects us from ptrace_freeze_traced() */ 1894 __set_current_state(TASK_RUNNING); 1895 if (clear_code) 1896 current->exit_code = 0; 1897 read_unlock(&tasklist_lock); 1898 } 1899 1900 /* 1901 * We are back. Now reacquire the siglock before touching 1902 * last_siginfo, so that we are sure to have synchronized with 1903 * any signal-sending on another CPU that wants to examine it. 1904 */ 1905 spin_lock_irq(¤t->sighand->siglock); 1906 current->last_siginfo = NULL; 1907 1908 /* LISTENING can be set only during STOP traps, clear it */ 1909 current->jobctl &= ~JOBCTL_LISTENING; 1910 1911 /* 1912 * Queued signals ignored us while we were stopped for tracing. 1913 * So check for any that we should take before resuming user mode. 1914 * This sets TIF_SIGPENDING, but never clears it. 1915 */ 1916 recalc_sigpending_tsk(current); 1917 } 1918 1919 static void ptrace_do_notify(int signr, int exit_code, int why) 1920 { 1921 siginfo_t info; 1922 1923 memset(&info, 0, sizeof info); 1924 info.si_signo = signr; 1925 info.si_code = exit_code; 1926 info.si_pid = task_pid_vnr(current); 1927 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 1928 1929 /* Let the debugger run. */ 1930 ptrace_stop(exit_code, why, 1, &info); 1931 } 1932 1933 void ptrace_notify(int exit_code) 1934 { 1935 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 1936 if (unlikely(current->task_works)) 1937 task_work_run(); 1938 1939 spin_lock_irq(¤t->sighand->siglock); 1940 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); 1941 spin_unlock_irq(¤t->sighand->siglock); 1942 } 1943 1944 /** 1945 * do_signal_stop - handle group stop for SIGSTOP and other stop signals 1946 * @signr: signr causing group stop if initiating 1947 * 1948 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr 1949 * and participate in it. If already set, participate in the existing 1950 * group stop. If participated in a group stop (and thus slept), %true is 1951 * returned with siglock released. 1952 * 1953 * If ptraced, this function doesn't handle stop itself. Instead, 1954 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock 1955 * untouched. The caller must ensure that INTERRUPT trap handling takes 1956 * places afterwards. 1957 * 1958 * CONTEXT: 1959 * Must be called with @current->sighand->siglock held, which is released 1960 * on %true return. 1961 * 1962 * RETURNS: 1963 * %false if group stop is already cancelled or ptrace trap is scheduled. 1964 * %true if participated in group stop. 1965 */ 1966 static bool do_signal_stop(int signr) 1967 __releases(¤t->sighand->siglock) 1968 { 1969 struct signal_struct *sig = current->signal; 1970 1971 if (!(current->jobctl & JOBCTL_STOP_PENDING)) { 1972 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; 1973 struct task_struct *t; 1974 1975 /* signr will be recorded in task->jobctl for retries */ 1976 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); 1977 1978 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || 1979 unlikely(signal_group_exit(sig))) 1980 return false; 1981 /* 1982 * There is no group stop already in progress. We must 1983 * initiate one now. 1984 * 1985 * While ptraced, a task may be resumed while group stop is 1986 * still in effect and then receive a stop signal and 1987 * initiate another group stop. This deviates from the 1988 * usual behavior as two consecutive stop signals can't 1989 * cause two group stops when !ptraced. That is why we 1990 * also check !task_is_stopped(t) below. 1991 * 1992 * The condition can be distinguished by testing whether 1993 * SIGNAL_STOP_STOPPED is already set. Don't generate 1994 * group_exit_code in such case. 1995 * 1996 * This is not necessary for SIGNAL_STOP_CONTINUED because 1997 * an intervening stop signal is required to cause two 1998 * continued events regardless of ptrace. 1999 */ 2000 if (!(sig->flags & SIGNAL_STOP_STOPPED)) 2001 sig->group_exit_code = signr; 2002 2003 sig->group_stop_count = 0; 2004 2005 if (task_set_jobctl_pending(current, signr | gstop)) 2006 sig->group_stop_count++; 2007 2008 t = current; 2009 while_each_thread(current, t) { 2010 /* 2011 * Setting state to TASK_STOPPED for a group 2012 * stop is always done with the siglock held, 2013 * so this check has no races. 2014 */ 2015 if (!task_is_stopped(t) && 2016 task_set_jobctl_pending(t, signr | gstop)) { 2017 sig->group_stop_count++; 2018 if (likely(!(t->ptrace & PT_SEIZED))) 2019 signal_wake_up(t, 0); 2020 else 2021 ptrace_trap_notify(t); 2022 } 2023 } 2024 } 2025 2026 if (likely(!current->ptrace)) { 2027 int notify = 0; 2028 2029 /* 2030 * If there are no other threads in the group, or if there 2031 * is a group stop in progress and we are the last to stop, 2032 * report to the parent. 2033 */ 2034 if (task_participate_group_stop(current)) 2035 notify = CLD_STOPPED; 2036 2037 __set_current_state(TASK_STOPPED); 2038 spin_unlock_irq(¤t->sighand->siglock); 2039 2040 /* 2041 * Notify the parent of the group stop completion. Because 2042 * we're not holding either the siglock or tasklist_lock 2043 * here, ptracer may attach inbetween; however, this is for 2044 * group stop and should always be delivered to the real 2045 * parent of the group leader. The new ptracer will get 2046 * its notification when this task transitions into 2047 * TASK_TRACED. 2048 */ 2049 if (notify) { 2050 read_lock(&tasklist_lock); 2051 do_notify_parent_cldstop(current, false, notify); 2052 read_unlock(&tasklist_lock); 2053 } 2054 2055 /* Now we don't run again until woken by SIGCONT or SIGKILL */ 2056 freezable_schedule(); 2057 return true; 2058 } else { 2059 /* 2060 * While ptraced, group stop is handled by STOP trap. 2061 * Schedule it and let the caller deal with it. 2062 */ 2063 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); 2064 return false; 2065 } 2066 } 2067 2068 /** 2069 * do_jobctl_trap - take care of ptrace jobctl traps 2070 * 2071 * When PT_SEIZED, it's used for both group stop and explicit 2072 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with 2073 * accompanying siginfo. If stopped, lower eight bits of exit_code contain 2074 * the stop signal; otherwise, %SIGTRAP. 2075 * 2076 * When !PT_SEIZED, it's used only for group stop trap with stop signal 2077 * number as exit_code and no siginfo. 2078 * 2079 * CONTEXT: 2080 * Must be called with @current->sighand->siglock held, which may be 2081 * released and re-acquired before returning with intervening sleep. 2082 */ 2083 static void do_jobctl_trap(void) 2084 { 2085 struct signal_struct *signal = current->signal; 2086 int signr = current->jobctl & JOBCTL_STOP_SIGMASK; 2087 2088 if (current->ptrace & PT_SEIZED) { 2089 if (!signal->group_stop_count && 2090 !(signal->flags & SIGNAL_STOP_STOPPED)) 2091 signr = SIGTRAP; 2092 WARN_ON_ONCE(!signr); 2093 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), 2094 CLD_STOPPED); 2095 } else { 2096 WARN_ON_ONCE(!signr); 2097 ptrace_stop(signr, CLD_STOPPED, 0, NULL); 2098 current->exit_code = 0; 2099 } 2100 } 2101 2102 static int ptrace_signal(int signr, siginfo_t *info) 2103 { 2104 /* 2105 * We do not check sig_kernel_stop(signr) but set this marker 2106 * unconditionally because we do not know whether debugger will 2107 * change signr. This flag has no meaning unless we are going 2108 * to stop after return from ptrace_stop(). In this case it will 2109 * be checked in do_signal_stop(), we should only stop if it was 2110 * not cleared by SIGCONT while we were sleeping. See also the 2111 * comment in dequeue_signal(). 2112 */ 2113 current->jobctl |= JOBCTL_STOP_DEQUEUED; 2114 ptrace_stop(signr, CLD_TRAPPED, 0, info); 2115 2116 /* We're back. Did the debugger cancel the sig? */ 2117 signr = current->exit_code; 2118 if (signr == 0) 2119 return signr; 2120 2121 current->exit_code = 0; 2122 2123 /* 2124 * Update the siginfo structure if the signal has 2125 * changed. If the debugger wanted something 2126 * specific in the siginfo structure then it should 2127 * have updated *info via PTRACE_SETSIGINFO. 2128 */ 2129 if (signr != info->si_signo) { 2130 info->si_signo = signr; 2131 info->si_errno = 0; 2132 info->si_code = SI_USER; 2133 rcu_read_lock(); 2134 info->si_pid = task_pid_vnr(current->parent); 2135 info->si_uid = from_kuid_munged(current_user_ns(), 2136 task_uid(current->parent)); 2137 rcu_read_unlock(); 2138 } 2139 2140 /* If the (new) signal is now blocked, requeue it. */ 2141 if (sigismember(¤t->blocked, signr)) { 2142 specific_send_sig_info(signr, info, current); 2143 signr = 0; 2144 } 2145 2146 return signr; 2147 } 2148 2149 int get_signal(struct ksignal *ksig) 2150 { 2151 struct sighand_struct *sighand = current->sighand; 2152 struct signal_struct *signal = current->signal; 2153 int signr; 2154 2155 if (unlikely(current->task_works)) 2156 task_work_run(); 2157 2158 if (unlikely(uprobe_deny_signal())) 2159 return 0; 2160 2161 /* 2162 * Do this once, we can't return to user-mode if freezing() == T. 2163 * do_signal_stop() and ptrace_stop() do freezable_schedule() and 2164 * thus do not need another check after return. 2165 */ 2166 try_to_freeze(); 2167 2168 relock: 2169 spin_lock_irq(&sighand->siglock); 2170 /* 2171 * Every stopped thread goes here after wakeup. Check to see if 2172 * we should notify the parent, prepare_signal(SIGCONT) encodes 2173 * the CLD_ si_code into SIGNAL_CLD_MASK bits. 2174 */ 2175 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { 2176 int why; 2177 2178 if (signal->flags & SIGNAL_CLD_CONTINUED) 2179 why = CLD_CONTINUED; 2180 else 2181 why = CLD_STOPPED; 2182 2183 signal->flags &= ~SIGNAL_CLD_MASK; 2184 2185 spin_unlock_irq(&sighand->siglock); 2186 2187 /* 2188 * Notify the parent that we're continuing. This event is 2189 * always per-process and doesn't make whole lot of sense 2190 * for ptracers, who shouldn't consume the state via 2191 * wait(2) either, but, for backward compatibility, notify 2192 * the ptracer of the group leader too unless it's gonna be 2193 * a duplicate. 2194 */ 2195 read_lock(&tasklist_lock); 2196 do_notify_parent_cldstop(current, false, why); 2197 2198 if (ptrace_reparented(current->group_leader)) 2199 do_notify_parent_cldstop(current->group_leader, 2200 true, why); 2201 read_unlock(&tasklist_lock); 2202 2203 goto relock; 2204 } 2205 2206 for (;;) { 2207 struct k_sigaction *ka; 2208 2209 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && 2210 do_signal_stop(0)) 2211 goto relock; 2212 2213 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { 2214 do_jobctl_trap(); 2215 spin_unlock_irq(&sighand->siglock); 2216 goto relock; 2217 } 2218 2219 signr = dequeue_signal(current, ¤t->blocked, &ksig->info); 2220 2221 if (!signr) 2222 break; /* will return 0 */ 2223 2224 if (unlikely(current->ptrace) && signr != SIGKILL) { 2225 signr = ptrace_signal(signr, &ksig->info); 2226 if (!signr) 2227 continue; 2228 } 2229 2230 ka = &sighand->action[signr-1]; 2231 2232 /* Trace actually delivered signals. */ 2233 trace_signal_deliver(signr, &ksig->info, ka); 2234 2235 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 2236 continue; 2237 if (ka->sa.sa_handler != SIG_DFL) { 2238 /* Run the handler. */ 2239 ksig->ka = *ka; 2240 2241 if (ka->sa.sa_flags & SA_ONESHOT) 2242 ka->sa.sa_handler = SIG_DFL; 2243 2244 break; /* will return non-zero "signr" value */ 2245 } 2246 2247 /* 2248 * Now we are doing the default action for this signal. 2249 */ 2250 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 2251 continue; 2252 2253 /* 2254 * Global init gets no signals it doesn't want. 2255 * Container-init gets no signals it doesn't want from same 2256 * container. 2257 * 2258 * Note that if global/container-init sees a sig_kernel_only() 2259 * signal here, the signal must have been generated internally 2260 * or must have come from an ancestor namespace. In either 2261 * case, the signal cannot be dropped. 2262 */ 2263 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && 2264 !sig_kernel_only(signr)) 2265 continue; 2266 2267 if (sig_kernel_stop(signr)) { 2268 /* 2269 * The default action is to stop all threads in 2270 * the thread group. The job control signals 2271 * do nothing in an orphaned pgrp, but SIGSTOP 2272 * always works. Note that siglock needs to be 2273 * dropped during the call to is_orphaned_pgrp() 2274 * because of lock ordering with tasklist_lock. 2275 * This allows an intervening SIGCONT to be posted. 2276 * We need to check for that and bail out if necessary. 2277 */ 2278 if (signr != SIGSTOP) { 2279 spin_unlock_irq(&sighand->siglock); 2280 2281 /* signals can be posted during this window */ 2282 2283 if (is_current_pgrp_orphaned()) 2284 goto relock; 2285 2286 spin_lock_irq(&sighand->siglock); 2287 } 2288 2289 if (likely(do_signal_stop(ksig->info.si_signo))) { 2290 /* It released the siglock. */ 2291 goto relock; 2292 } 2293 2294 /* 2295 * We didn't actually stop, due to a race 2296 * with SIGCONT or something like that. 2297 */ 2298 continue; 2299 } 2300 2301 spin_unlock_irq(&sighand->siglock); 2302 2303 /* 2304 * Anything else is fatal, maybe with a core dump. 2305 */ 2306 current->flags |= PF_SIGNALED; 2307 2308 if (sig_kernel_coredump(signr)) { 2309 if (print_fatal_signals) 2310 print_fatal_signal(ksig->info.si_signo); 2311 proc_coredump_connector(current); 2312 /* 2313 * If it was able to dump core, this kills all 2314 * other threads in the group and synchronizes with 2315 * their demise. If we lost the race with another 2316 * thread getting here, it set group_exit_code 2317 * first and our do_group_exit call below will use 2318 * that value and ignore the one we pass it. 2319 */ 2320 do_coredump(&ksig->info); 2321 } 2322 2323 /* 2324 * Death signals, no core dump. 2325 */ 2326 do_group_exit(ksig->info.si_signo); 2327 /* NOTREACHED */ 2328 } 2329 spin_unlock_irq(&sighand->siglock); 2330 2331 ksig->sig = signr; 2332 return ksig->sig > 0; 2333 } 2334 2335 /** 2336 * signal_delivered - 2337 * @ksig: kernel signal struct 2338 * @stepping: nonzero if debugger single-step or block-step in use 2339 * 2340 * This function should be called when a signal has successfully been 2341 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask 2342 * is always blocked, and the signal itself is blocked unless %SA_NODEFER 2343 * is set in @ksig->ka.sa.sa_flags. Tracing is notified. 2344 */ 2345 static void signal_delivered(struct ksignal *ksig, int stepping) 2346 { 2347 sigset_t blocked; 2348 2349 /* A signal was successfully delivered, and the 2350 saved sigmask was stored on the signal frame, 2351 and will be restored by sigreturn. So we can 2352 simply clear the restore sigmask flag. */ 2353 clear_restore_sigmask(); 2354 2355 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); 2356 if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) 2357 sigaddset(&blocked, ksig->sig); 2358 set_current_blocked(&blocked); 2359 tracehook_signal_handler(stepping); 2360 } 2361 2362 void signal_setup_done(int failed, struct ksignal *ksig, int stepping) 2363 { 2364 if (failed) 2365 force_sigsegv(ksig->sig, current); 2366 else 2367 signal_delivered(ksig, stepping); 2368 } 2369 2370 /* 2371 * It could be that complete_signal() picked us to notify about the 2372 * group-wide signal. Other threads should be notified now to take 2373 * the shared signals in @which since we will not. 2374 */ 2375 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) 2376 { 2377 sigset_t retarget; 2378 struct task_struct *t; 2379 2380 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); 2381 if (sigisemptyset(&retarget)) 2382 return; 2383 2384 t = tsk; 2385 while_each_thread(tsk, t) { 2386 if (t->flags & PF_EXITING) 2387 continue; 2388 2389 if (!has_pending_signals(&retarget, &t->blocked)) 2390 continue; 2391 /* Remove the signals this thread can handle. */ 2392 sigandsets(&retarget, &retarget, &t->blocked); 2393 2394 if (!signal_pending(t)) 2395 signal_wake_up(t, 0); 2396 2397 if (sigisemptyset(&retarget)) 2398 break; 2399 } 2400 } 2401 2402 void exit_signals(struct task_struct *tsk) 2403 { 2404 int group_stop = 0; 2405 sigset_t unblocked; 2406 2407 /* 2408 * @tsk is about to have PF_EXITING set - lock out users which 2409 * expect stable threadgroup. 2410 */ 2411 cgroup_threadgroup_change_begin(tsk); 2412 2413 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { 2414 tsk->flags |= PF_EXITING; 2415 cgroup_threadgroup_change_end(tsk); 2416 return; 2417 } 2418 2419 spin_lock_irq(&tsk->sighand->siglock); 2420 /* 2421 * From now this task is not visible for group-wide signals, 2422 * see wants_signal(), do_signal_stop(). 2423 */ 2424 tsk->flags |= PF_EXITING; 2425 2426 cgroup_threadgroup_change_end(tsk); 2427 2428 if (!signal_pending(tsk)) 2429 goto out; 2430 2431 unblocked = tsk->blocked; 2432 signotset(&unblocked); 2433 retarget_shared_pending(tsk, &unblocked); 2434 2435 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && 2436 task_participate_group_stop(tsk)) 2437 group_stop = CLD_STOPPED; 2438 out: 2439 spin_unlock_irq(&tsk->sighand->siglock); 2440 2441 /* 2442 * If group stop has completed, deliver the notification. This 2443 * should always go to the real parent of the group leader. 2444 */ 2445 if (unlikely(group_stop)) { 2446 read_lock(&tasklist_lock); 2447 do_notify_parent_cldstop(tsk, false, group_stop); 2448 read_unlock(&tasklist_lock); 2449 } 2450 } 2451 2452 EXPORT_SYMBOL(recalc_sigpending); 2453 EXPORT_SYMBOL_GPL(dequeue_signal); 2454 EXPORT_SYMBOL(flush_signals); 2455 EXPORT_SYMBOL(force_sig); 2456 EXPORT_SYMBOL(send_sig); 2457 EXPORT_SYMBOL(send_sig_info); 2458 EXPORT_SYMBOL(sigprocmask); 2459 2460 /* 2461 * System call entry points. 2462 */ 2463 2464 /** 2465 * sys_restart_syscall - restart a system call 2466 */ 2467 SYSCALL_DEFINE0(restart_syscall) 2468 { 2469 struct restart_block *restart = ¤t->restart_block; 2470 return restart->fn(restart); 2471 } 2472 2473 long do_no_restart_syscall(struct restart_block *param) 2474 { 2475 return -EINTR; 2476 } 2477 2478 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) 2479 { 2480 if (signal_pending(tsk) && !thread_group_empty(tsk)) { 2481 sigset_t newblocked; 2482 /* A set of now blocked but previously unblocked signals. */ 2483 sigandnsets(&newblocked, newset, ¤t->blocked); 2484 retarget_shared_pending(tsk, &newblocked); 2485 } 2486 tsk->blocked = *newset; 2487 recalc_sigpending(); 2488 } 2489 2490 /** 2491 * set_current_blocked - change current->blocked mask 2492 * @newset: new mask 2493 * 2494 * It is wrong to change ->blocked directly, this helper should be used 2495 * to ensure the process can't miss a shared signal we are going to block. 2496 */ 2497 void set_current_blocked(sigset_t *newset) 2498 { 2499 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); 2500 __set_current_blocked(newset); 2501 } 2502 2503 void __set_current_blocked(const sigset_t *newset) 2504 { 2505 struct task_struct *tsk = current; 2506 2507 /* 2508 * In case the signal mask hasn't changed, there is nothing we need 2509 * to do. The current->blocked shouldn't be modified by other task. 2510 */ 2511 if (sigequalsets(&tsk->blocked, newset)) 2512 return; 2513 2514 spin_lock_irq(&tsk->sighand->siglock); 2515 __set_task_blocked(tsk, newset); 2516 spin_unlock_irq(&tsk->sighand->siglock); 2517 } 2518 2519 /* 2520 * This is also useful for kernel threads that want to temporarily 2521 * (or permanently) block certain signals. 2522 * 2523 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 2524 * interface happily blocks "unblockable" signals like SIGKILL 2525 * and friends. 2526 */ 2527 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 2528 { 2529 struct task_struct *tsk = current; 2530 sigset_t newset; 2531 2532 /* Lockless, only current can change ->blocked, never from irq */ 2533 if (oldset) 2534 *oldset = tsk->blocked; 2535 2536 switch (how) { 2537 case SIG_BLOCK: 2538 sigorsets(&newset, &tsk->blocked, set); 2539 break; 2540 case SIG_UNBLOCK: 2541 sigandnsets(&newset, &tsk->blocked, set); 2542 break; 2543 case SIG_SETMASK: 2544 newset = *set; 2545 break; 2546 default: 2547 return -EINVAL; 2548 } 2549 2550 __set_current_blocked(&newset); 2551 return 0; 2552 } 2553 2554 /** 2555 * sys_rt_sigprocmask - change the list of currently blocked signals 2556 * @how: whether to add, remove, or set signals 2557 * @nset: stores pending signals 2558 * @oset: previous value of signal mask if non-null 2559 * @sigsetsize: size of sigset_t type 2560 */ 2561 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, 2562 sigset_t __user *, oset, size_t, sigsetsize) 2563 { 2564 sigset_t old_set, new_set; 2565 int error; 2566 2567 /* XXX: Don't preclude handling different sized sigset_t's. */ 2568 if (sigsetsize != sizeof(sigset_t)) 2569 return -EINVAL; 2570 2571 old_set = current->blocked; 2572 2573 if (nset) { 2574 if (copy_from_user(&new_set, nset, sizeof(sigset_t))) 2575 return -EFAULT; 2576 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2577 2578 error = sigprocmask(how, &new_set, NULL); 2579 if (error) 2580 return error; 2581 } 2582 2583 if (oset) { 2584 if (copy_to_user(oset, &old_set, sizeof(sigset_t))) 2585 return -EFAULT; 2586 } 2587 2588 return 0; 2589 } 2590 2591 #ifdef CONFIG_COMPAT 2592 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, 2593 compat_sigset_t __user *, oset, compat_size_t, sigsetsize) 2594 { 2595 #ifdef __BIG_ENDIAN 2596 sigset_t old_set = current->blocked; 2597 2598 /* XXX: Don't preclude handling different sized sigset_t's. */ 2599 if (sigsetsize != sizeof(sigset_t)) 2600 return -EINVAL; 2601 2602 if (nset) { 2603 compat_sigset_t new32; 2604 sigset_t new_set; 2605 int error; 2606 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t))) 2607 return -EFAULT; 2608 2609 sigset_from_compat(&new_set, &new32); 2610 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2611 2612 error = sigprocmask(how, &new_set, NULL); 2613 if (error) 2614 return error; 2615 } 2616 if (oset) { 2617 compat_sigset_t old32; 2618 sigset_to_compat(&old32, &old_set); 2619 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t))) 2620 return -EFAULT; 2621 } 2622 return 0; 2623 #else 2624 return sys_rt_sigprocmask(how, (sigset_t __user *)nset, 2625 (sigset_t __user *)oset, sigsetsize); 2626 #endif 2627 } 2628 #endif 2629 2630 static int do_sigpending(void *set, unsigned long sigsetsize) 2631 { 2632 if (sigsetsize > sizeof(sigset_t)) 2633 return -EINVAL; 2634 2635 spin_lock_irq(¤t->sighand->siglock); 2636 sigorsets(set, ¤t->pending.signal, 2637 ¤t->signal->shared_pending.signal); 2638 spin_unlock_irq(¤t->sighand->siglock); 2639 2640 /* Outside the lock because only this thread touches it. */ 2641 sigandsets(set, ¤t->blocked, set); 2642 return 0; 2643 } 2644 2645 /** 2646 * sys_rt_sigpending - examine a pending signal that has been raised 2647 * while blocked 2648 * @uset: stores pending signals 2649 * @sigsetsize: size of sigset_t type or larger 2650 */ 2651 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) 2652 { 2653 sigset_t set; 2654 int err = do_sigpending(&set, sigsetsize); 2655 if (!err && copy_to_user(uset, &set, sigsetsize)) 2656 err = -EFAULT; 2657 return err; 2658 } 2659 2660 #ifdef CONFIG_COMPAT 2661 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, 2662 compat_size_t, sigsetsize) 2663 { 2664 #ifdef __BIG_ENDIAN 2665 sigset_t set; 2666 int err = do_sigpending(&set, sigsetsize); 2667 if (!err) { 2668 compat_sigset_t set32; 2669 sigset_to_compat(&set32, &set); 2670 /* we can get here only if sigsetsize <= sizeof(set) */ 2671 if (copy_to_user(uset, &set32, sigsetsize)) 2672 err = -EFAULT; 2673 } 2674 return err; 2675 #else 2676 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize); 2677 #endif 2678 } 2679 #endif 2680 2681 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER 2682 2683 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from) 2684 { 2685 int err; 2686 2687 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) 2688 return -EFAULT; 2689 if (from->si_code < 0) 2690 return __copy_to_user(to, from, sizeof(siginfo_t)) 2691 ? -EFAULT : 0; 2692 /* 2693 * If you change siginfo_t structure, please be sure 2694 * this code is fixed accordingly. 2695 * Please remember to update the signalfd_copyinfo() function 2696 * inside fs/signalfd.c too, in case siginfo_t changes. 2697 * It should never copy any pad contained in the structure 2698 * to avoid security leaks, but must copy the generic 2699 * 3 ints plus the relevant union member. 2700 */ 2701 err = __put_user(from->si_signo, &to->si_signo); 2702 err |= __put_user(from->si_errno, &to->si_errno); 2703 err |= __put_user((short)from->si_code, &to->si_code); 2704 switch (from->si_code & __SI_MASK) { 2705 case __SI_KILL: 2706 err |= __put_user(from->si_pid, &to->si_pid); 2707 err |= __put_user(from->si_uid, &to->si_uid); 2708 break; 2709 case __SI_TIMER: 2710 err |= __put_user(from->si_tid, &to->si_tid); 2711 err |= __put_user(from->si_overrun, &to->si_overrun); 2712 err |= __put_user(from->si_ptr, &to->si_ptr); 2713 break; 2714 case __SI_POLL: 2715 err |= __put_user(from->si_band, &to->si_band); 2716 err |= __put_user(from->si_fd, &to->si_fd); 2717 break; 2718 case __SI_FAULT: 2719 err |= __put_user(from->si_addr, &to->si_addr); 2720 #ifdef __ARCH_SI_TRAPNO 2721 err |= __put_user(from->si_trapno, &to->si_trapno); 2722 #endif 2723 #ifdef BUS_MCEERR_AO 2724 /* 2725 * Other callers might not initialize the si_lsb field, 2726 * so check explicitly for the right codes here. 2727 */ 2728 if (from->si_signo == SIGBUS && 2729 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) 2730 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 2731 #endif 2732 #ifdef SEGV_BNDERR 2733 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) { 2734 err |= __put_user(from->si_lower, &to->si_lower); 2735 err |= __put_user(from->si_upper, &to->si_upper); 2736 } 2737 #endif 2738 #ifdef SEGV_PKUERR 2739 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR) 2740 err |= __put_user(from->si_pkey, &to->si_pkey); 2741 #endif 2742 break; 2743 case __SI_CHLD: 2744 err |= __put_user(from->si_pid, &to->si_pid); 2745 err |= __put_user(from->si_uid, &to->si_uid); 2746 err |= __put_user(from->si_status, &to->si_status); 2747 err |= __put_user(from->si_utime, &to->si_utime); 2748 err |= __put_user(from->si_stime, &to->si_stime); 2749 break; 2750 case __SI_RT: /* This is not generated by the kernel as of now. */ 2751 case __SI_MESGQ: /* But this is */ 2752 err |= __put_user(from->si_pid, &to->si_pid); 2753 err |= __put_user(from->si_uid, &to->si_uid); 2754 err |= __put_user(from->si_ptr, &to->si_ptr); 2755 break; 2756 #ifdef __ARCH_SIGSYS 2757 case __SI_SYS: 2758 err |= __put_user(from->si_call_addr, &to->si_call_addr); 2759 err |= __put_user(from->si_syscall, &to->si_syscall); 2760 err |= __put_user(from->si_arch, &to->si_arch); 2761 break; 2762 #endif 2763 default: /* this is just in case for now ... */ 2764 err |= __put_user(from->si_pid, &to->si_pid); 2765 err |= __put_user(from->si_uid, &to->si_uid); 2766 break; 2767 } 2768 return err; 2769 } 2770 2771 #endif 2772 2773 /** 2774 * do_sigtimedwait - wait for queued signals specified in @which 2775 * @which: queued signals to wait for 2776 * @info: if non-null, the signal's siginfo is returned here 2777 * @ts: upper bound on process time suspension 2778 */ 2779 static int do_sigtimedwait(const sigset_t *which, siginfo_t *info, 2780 const struct timespec *ts) 2781 { 2782 ktime_t *to = NULL, timeout = KTIME_MAX; 2783 struct task_struct *tsk = current; 2784 sigset_t mask = *which; 2785 int sig, ret = 0; 2786 2787 if (ts) { 2788 if (!timespec_valid(ts)) 2789 return -EINVAL; 2790 timeout = timespec_to_ktime(*ts); 2791 to = &timeout; 2792 } 2793 2794 /* 2795 * Invert the set of allowed signals to get those we want to block. 2796 */ 2797 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); 2798 signotset(&mask); 2799 2800 spin_lock_irq(&tsk->sighand->siglock); 2801 sig = dequeue_signal(tsk, &mask, info); 2802 if (!sig && timeout) { 2803 /* 2804 * None ready, temporarily unblock those we're interested 2805 * while we are sleeping in so that we'll be awakened when 2806 * they arrive. Unblocking is always fine, we can avoid 2807 * set_current_blocked(). 2808 */ 2809 tsk->real_blocked = tsk->blocked; 2810 sigandsets(&tsk->blocked, &tsk->blocked, &mask); 2811 recalc_sigpending(); 2812 spin_unlock_irq(&tsk->sighand->siglock); 2813 2814 __set_current_state(TASK_INTERRUPTIBLE); 2815 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, 2816 HRTIMER_MODE_REL); 2817 spin_lock_irq(&tsk->sighand->siglock); 2818 __set_task_blocked(tsk, &tsk->real_blocked); 2819 sigemptyset(&tsk->real_blocked); 2820 sig = dequeue_signal(tsk, &mask, info); 2821 } 2822 spin_unlock_irq(&tsk->sighand->siglock); 2823 2824 if (sig) 2825 return sig; 2826 return ret ? -EINTR : -EAGAIN; 2827 } 2828 2829 /** 2830 * sys_rt_sigtimedwait - synchronously wait for queued signals specified 2831 * in @uthese 2832 * @uthese: queued signals to wait for 2833 * @uinfo: if non-null, the signal's siginfo is returned here 2834 * @uts: upper bound on process time suspension 2835 * @sigsetsize: size of sigset_t type 2836 */ 2837 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, 2838 siginfo_t __user *, uinfo, const struct timespec __user *, uts, 2839 size_t, sigsetsize) 2840 { 2841 sigset_t these; 2842 struct timespec ts; 2843 siginfo_t info; 2844 int ret; 2845 2846 /* XXX: Don't preclude handling different sized sigset_t's. */ 2847 if (sigsetsize != sizeof(sigset_t)) 2848 return -EINVAL; 2849 2850 if (copy_from_user(&these, uthese, sizeof(these))) 2851 return -EFAULT; 2852 2853 if (uts) { 2854 if (copy_from_user(&ts, uts, sizeof(ts))) 2855 return -EFAULT; 2856 } 2857 2858 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); 2859 2860 if (ret > 0 && uinfo) { 2861 if (copy_siginfo_to_user(uinfo, &info)) 2862 ret = -EFAULT; 2863 } 2864 2865 return ret; 2866 } 2867 2868 #ifdef CONFIG_COMPAT 2869 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese, 2870 struct compat_siginfo __user *, uinfo, 2871 struct compat_timespec __user *, uts, compat_size_t, sigsetsize) 2872 { 2873 compat_sigset_t s32; 2874 sigset_t s; 2875 struct timespec t; 2876 siginfo_t info; 2877 long ret; 2878 2879 if (sigsetsize != sizeof(sigset_t)) 2880 return -EINVAL; 2881 2882 if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t))) 2883 return -EFAULT; 2884 sigset_from_compat(&s, &s32); 2885 2886 if (uts) { 2887 if (compat_get_timespec(&t, uts)) 2888 return -EFAULT; 2889 } 2890 2891 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 2892 2893 if (ret > 0 && uinfo) { 2894 if (copy_siginfo_to_user32(uinfo, &info)) 2895 ret = -EFAULT; 2896 } 2897 2898 return ret; 2899 } 2900 #endif 2901 2902 /** 2903 * sys_kill - send a signal to a process 2904 * @pid: the PID of the process 2905 * @sig: signal to be sent 2906 */ 2907 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) 2908 { 2909 struct siginfo info; 2910 2911 info.si_signo = sig; 2912 info.si_errno = 0; 2913 info.si_code = SI_USER; 2914 info.si_pid = task_tgid_vnr(current); 2915 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 2916 2917 return kill_something_info(sig, &info, pid); 2918 } 2919 2920 static int 2921 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) 2922 { 2923 struct task_struct *p; 2924 int error = -ESRCH; 2925 2926 rcu_read_lock(); 2927 p = find_task_by_vpid(pid); 2928 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { 2929 error = check_kill_permission(sig, info, p); 2930 /* 2931 * The null signal is a permissions and process existence 2932 * probe. No signal is actually delivered. 2933 */ 2934 if (!error && sig) { 2935 error = do_send_sig_info(sig, info, p, false); 2936 /* 2937 * If lock_task_sighand() failed we pretend the task 2938 * dies after receiving the signal. The window is tiny, 2939 * and the signal is private anyway. 2940 */ 2941 if (unlikely(error == -ESRCH)) 2942 error = 0; 2943 } 2944 } 2945 rcu_read_unlock(); 2946 2947 return error; 2948 } 2949 2950 static int do_tkill(pid_t tgid, pid_t pid, int sig) 2951 { 2952 struct siginfo info = {}; 2953 2954 info.si_signo = sig; 2955 info.si_errno = 0; 2956 info.si_code = SI_TKILL; 2957 info.si_pid = task_tgid_vnr(current); 2958 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 2959 2960 return do_send_specific(tgid, pid, sig, &info); 2961 } 2962 2963 /** 2964 * sys_tgkill - send signal to one specific thread 2965 * @tgid: the thread group ID of the thread 2966 * @pid: the PID of the thread 2967 * @sig: signal to be sent 2968 * 2969 * This syscall also checks the @tgid and returns -ESRCH even if the PID 2970 * exists but it's not belonging to the target process anymore. This 2971 * method solves the problem of threads exiting and PIDs getting reused. 2972 */ 2973 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) 2974 { 2975 /* This is only valid for single tasks */ 2976 if (pid <= 0 || tgid <= 0) 2977 return -EINVAL; 2978 2979 return do_tkill(tgid, pid, sig); 2980 } 2981 2982 /** 2983 * sys_tkill - send signal to one specific task 2984 * @pid: the PID of the task 2985 * @sig: signal to be sent 2986 * 2987 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2988 */ 2989 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) 2990 { 2991 /* This is only valid for single tasks */ 2992 if (pid <= 0) 2993 return -EINVAL; 2994 2995 return do_tkill(0, pid, sig); 2996 } 2997 2998 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info) 2999 { 3000 /* Not even root can pretend to send signals from the kernel. 3001 * Nor can they impersonate a kill()/tgkill(), which adds source info. 3002 */ 3003 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 3004 (task_pid_vnr(current) != pid)) 3005 return -EPERM; 3006 3007 info->si_signo = sig; 3008 3009 /* POSIX.1b doesn't mention process groups. */ 3010 return kill_proc_info(sig, info, pid); 3011 } 3012 3013 /** 3014 * sys_rt_sigqueueinfo - send signal information to a signal 3015 * @pid: the PID of the thread 3016 * @sig: signal to be sent 3017 * @uinfo: signal info to be sent 3018 */ 3019 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, 3020 siginfo_t __user *, uinfo) 3021 { 3022 siginfo_t info; 3023 if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) 3024 return -EFAULT; 3025 return do_rt_sigqueueinfo(pid, sig, &info); 3026 } 3027 3028 #ifdef CONFIG_COMPAT 3029 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, 3030 compat_pid_t, pid, 3031 int, sig, 3032 struct compat_siginfo __user *, uinfo) 3033 { 3034 siginfo_t info = {}; 3035 int ret = copy_siginfo_from_user32(&info, uinfo); 3036 if (unlikely(ret)) 3037 return ret; 3038 return do_rt_sigqueueinfo(pid, sig, &info); 3039 } 3040 #endif 3041 3042 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) 3043 { 3044 /* This is only valid for single tasks */ 3045 if (pid <= 0 || tgid <= 0) 3046 return -EINVAL; 3047 3048 /* Not even root can pretend to send signals from the kernel. 3049 * Nor can they impersonate a kill()/tgkill(), which adds source info. 3050 */ 3051 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 3052 (task_pid_vnr(current) != pid)) 3053 return -EPERM; 3054 3055 info->si_signo = sig; 3056 3057 return do_send_specific(tgid, pid, sig, info); 3058 } 3059 3060 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, 3061 siginfo_t __user *, uinfo) 3062 { 3063 siginfo_t info; 3064 3065 if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) 3066 return -EFAULT; 3067 3068 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 3069 } 3070 3071 #ifdef CONFIG_COMPAT 3072 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, 3073 compat_pid_t, tgid, 3074 compat_pid_t, pid, 3075 int, sig, 3076 struct compat_siginfo __user *, uinfo) 3077 { 3078 siginfo_t info = {}; 3079 3080 if (copy_siginfo_from_user32(&info, uinfo)) 3081 return -EFAULT; 3082 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 3083 } 3084 #endif 3085 3086 /* 3087 * For kthreads only, must not be used if cloned with CLONE_SIGHAND 3088 */ 3089 void kernel_sigaction(int sig, __sighandler_t action) 3090 { 3091 spin_lock_irq(¤t->sighand->siglock); 3092 current->sighand->action[sig - 1].sa.sa_handler = action; 3093 if (action == SIG_IGN) { 3094 sigset_t mask; 3095 3096 sigemptyset(&mask); 3097 sigaddset(&mask, sig); 3098 3099 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); 3100 flush_sigqueue_mask(&mask, ¤t->pending); 3101 recalc_sigpending(); 3102 } 3103 spin_unlock_irq(¤t->sighand->siglock); 3104 } 3105 EXPORT_SYMBOL(kernel_sigaction); 3106 3107 void __weak sigaction_compat_abi(struct k_sigaction *act, 3108 struct k_sigaction *oact) 3109 { 3110 } 3111 3112 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 3113 { 3114 struct task_struct *p = current, *t; 3115 struct k_sigaction *k; 3116 sigset_t mask; 3117 3118 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 3119 return -EINVAL; 3120 3121 k = &p->sighand->action[sig-1]; 3122 3123 spin_lock_irq(&p->sighand->siglock); 3124 if (oact) 3125 *oact = *k; 3126 3127 sigaction_compat_abi(act, oact); 3128 3129 if (act) { 3130 sigdelsetmask(&act->sa.sa_mask, 3131 sigmask(SIGKILL) | sigmask(SIGSTOP)); 3132 *k = *act; 3133 /* 3134 * POSIX 3.3.1.3: 3135 * "Setting a signal action to SIG_IGN for a signal that is 3136 * pending shall cause the pending signal to be discarded, 3137 * whether or not it is blocked." 3138 * 3139 * "Setting a signal action to SIG_DFL for a signal that is 3140 * pending and whose default action is to ignore the signal 3141 * (for example, SIGCHLD), shall cause the pending signal to 3142 * be discarded, whether or not it is blocked" 3143 */ 3144 if (sig_handler_ignored(sig_handler(p, sig), sig)) { 3145 sigemptyset(&mask); 3146 sigaddset(&mask, sig); 3147 flush_sigqueue_mask(&mask, &p->signal->shared_pending); 3148 for_each_thread(p, t) 3149 flush_sigqueue_mask(&mask, &t->pending); 3150 } 3151 } 3152 3153 spin_unlock_irq(&p->sighand->siglock); 3154 return 0; 3155 } 3156 3157 static int 3158 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp) 3159 { 3160 struct task_struct *t = current; 3161 3162 if (oss) { 3163 memset(oss, 0, sizeof(stack_t)); 3164 oss->ss_sp = (void __user *) t->sas_ss_sp; 3165 oss->ss_size = t->sas_ss_size; 3166 oss->ss_flags = sas_ss_flags(sp) | 3167 (current->sas_ss_flags & SS_FLAG_BITS); 3168 } 3169 3170 if (ss) { 3171 void __user *ss_sp = ss->ss_sp; 3172 size_t ss_size = ss->ss_size; 3173 unsigned ss_flags = ss->ss_flags; 3174 int ss_mode; 3175 3176 if (unlikely(on_sig_stack(sp))) 3177 return -EPERM; 3178 3179 ss_mode = ss_flags & ~SS_FLAG_BITS; 3180 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK && 3181 ss_mode != 0)) 3182 return -EINVAL; 3183 3184 if (ss_mode == SS_DISABLE) { 3185 ss_size = 0; 3186 ss_sp = NULL; 3187 } else { 3188 if (unlikely(ss_size < MINSIGSTKSZ)) 3189 return -ENOMEM; 3190 } 3191 3192 t->sas_ss_sp = (unsigned long) ss_sp; 3193 t->sas_ss_size = ss_size; 3194 t->sas_ss_flags = ss_flags; 3195 } 3196 return 0; 3197 } 3198 3199 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) 3200 { 3201 stack_t new, old; 3202 int err; 3203 if (uss && copy_from_user(&new, uss, sizeof(stack_t))) 3204 return -EFAULT; 3205 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL, 3206 current_user_stack_pointer()); 3207 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t))) 3208 err = -EFAULT; 3209 return err; 3210 } 3211 3212 int restore_altstack(const stack_t __user *uss) 3213 { 3214 stack_t new; 3215 if (copy_from_user(&new, uss, sizeof(stack_t))) 3216 return -EFAULT; 3217 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer()); 3218 /* squash all but EFAULT for now */ 3219 return 0; 3220 } 3221 3222 int __save_altstack(stack_t __user *uss, unsigned long sp) 3223 { 3224 struct task_struct *t = current; 3225 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | 3226 __put_user(t->sas_ss_flags, &uss->ss_flags) | 3227 __put_user(t->sas_ss_size, &uss->ss_size); 3228 if (err) 3229 return err; 3230 if (t->sas_ss_flags & SS_AUTODISARM) 3231 sas_ss_reset(t); 3232 return 0; 3233 } 3234 3235 #ifdef CONFIG_COMPAT 3236 COMPAT_SYSCALL_DEFINE2(sigaltstack, 3237 const compat_stack_t __user *, uss_ptr, 3238 compat_stack_t __user *, uoss_ptr) 3239 { 3240 stack_t uss, uoss; 3241 int ret; 3242 3243 if (uss_ptr) { 3244 compat_stack_t uss32; 3245 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) 3246 return -EFAULT; 3247 uss.ss_sp = compat_ptr(uss32.ss_sp); 3248 uss.ss_flags = uss32.ss_flags; 3249 uss.ss_size = uss32.ss_size; 3250 } 3251 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, 3252 compat_user_stack_pointer()); 3253 if (ret >= 0 && uoss_ptr) { 3254 compat_stack_t old; 3255 memset(&old, 0, sizeof(old)); 3256 old.ss_sp = ptr_to_compat(uoss.ss_sp); 3257 old.ss_flags = uoss.ss_flags; 3258 old.ss_size = uoss.ss_size; 3259 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t))) 3260 ret = -EFAULT; 3261 } 3262 return ret; 3263 } 3264 3265 int compat_restore_altstack(const compat_stack_t __user *uss) 3266 { 3267 int err = compat_sys_sigaltstack(uss, NULL); 3268 /* squash all but -EFAULT for now */ 3269 return err == -EFAULT ? err : 0; 3270 } 3271 3272 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) 3273 { 3274 int err; 3275 struct task_struct *t = current; 3276 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), 3277 &uss->ss_sp) | 3278 __put_user(t->sas_ss_flags, &uss->ss_flags) | 3279 __put_user(t->sas_ss_size, &uss->ss_size); 3280 if (err) 3281 return err; 3282 if (t->sas_ss_flags & SS_AUTODISARM) 3283 sas_ss_reset(t); 3284 return 0; 3285 } 3286 #endif 3287 3288 #ifdef __ARCH_WANT_SYS_SIGPENDING 3289 3290 /** 3291 * sys_sigpending - examine pending signals 3292 * @set: where mask of pending signal is returned 3293 */ 3294 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) 3295 { 3296 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); 3297 } 3298 3299 #ifdef CONFIG_COMPAT 3300 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32) 3301 { 3302 sigset_t set; 3303 int err = do_sigpending(&set, sizeof(old_sigset_t)); 3304 if (err == 0) 3305 if (copy_to_user(set32, &set, sizeof(old_sigset_t))) 3306 err = -EFAULT; 3307 return err; 3308 } 3309 #endif 3310 3311 #endif 3312 3313 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 3314 /** 3315 * sys_sigprocmask - examine and change blocked signals 3316 * @how: whether to add, remove, or set signals 3317 * @nset: signals to add or remove (if non-null) 3318 * @oset: previous value of signal mask if non-null 3319 * 3320 * Some platforms have their own version with special arguments; 3321 * others support only sys_rt_sigprocmask. 3322 */ 3323 3324 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, 3325 old_sigset_t __user *, oset) 3326 { 3327 old_sigset_t old_set, new_set; 3328 sigset_t new_blocked; 3329 3330 old_set = current->blocked.sig[0]; 3331 3332 if (nset) { 3333 if (copy_from_user(&new_set, nset, sizeof(*nset))) 3334 return -EFAULT; 3335 3336 new_blocked = current->blocked; 3337 3338 switch (how) { 3339 case SIG_BLOCK: 3340 sigaddsetmask(&new_blocked, new_set); 3341 break; 3342 case SIG_UNBLOCK: 3343 sigdelsetmask(&new_blocked, new_set); 3344 break; 3345 case SIG_SETMASK: 3346 new_blocked.sig[0] = new_set; 3347 break; 3348 default: 3349 return -EINVAL; 3350 } 3351 3352 set_current_blocked(&new_blocked); 3353 } 3354 3355 if (oset) { 3356 if (copy_to_user(oset, &old_set, sizeof(*oset))) 3357 return -EFAULT; 3358 } 3359 3360 return 0; 3361 } 3362 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 3363 3364 #ifndef CONFIG_ODD_RT_SIGACTION 3365 /** 3366 * sys_rt_sigaction - alter an action taken by a process 3367 * @sig: signal to be sent 3368 * @act: new sigaction 3369 * @oact: used to save the previous sigaction 3370 * @sigsetsize: size of sigset_t type 3371 */ 3372 SYSCALL_DEFINE4(rt_sigaction, int, sig, 3373 const struct sigaction __user *, act, 3374 struct sigaction __user *, oact, 3375 size_t, sigsetsize) 3376 { 3377 struct k_sigaction new_sa, old_sa; 3378 int ret = -EINVAL; 3379 3380 /* XXX: Don't preclude handling different sized sigset_t's. */ 3381 if (sigsetsize != sizeof(sigset_t)) 3382 goto out; 3383 3384 if (act) { 3385 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 3386 return -EFAULT; 3387 } 3388 3389 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 3390 3391 if (!ret && oact) { 3392 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 3393 return -EFAULT; 3394 } 3395 out: 3396 return ret; 3397 } 3398 #ifdef CONFIG_COMPAT 3399 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, 3400 const struct compat_sigaction __user *, act, 3401 struct compat_sigaction __user *, oact, 3402 compat_size_t, sigsetsize) 3403 { 3404 struct k_sigaction new_ka, old_ka; 3405 compat_sigset_t mask; 3406 #ifdef __ARCH_HAS_SA_RESTORER 3407 compat_uptr_t restorer; 3408 #endif 3409 int ret; 3410 3411 /* XXX: Don't preclude handling different sized sigset_t's. */ 3412 if (sigsetsize != sizeof(compat_sigset_t)) 3413 return -EINVAL; 3414 3415 if (act) { 3416 compat_uptr_t handler; 3417 ret = get_user(handler, &act->sa_handler); 3418 new_ka.sa.sa_handler = compat_ptr(handler); 3419 #ifdef __ARCH_HAS_SA_RESTORER 3420 ret |= get_user(restorer, &act->sa_restorer); 3421 new_ka.sa.sa_restorer = compat_ptr(restorer); 3422 #endif 3423 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask)); 3424 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); 3425 if (ret) 3426 return -EFAULT; 3427 sigset_from_compat(&new_ka.sa.sa_mask, &mask); 3428 } 3429 3430 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 3431 if (!ret && oact) { 3432 sigset_to_compat(&mask, &old_ka.sa.sa_mask); 3433 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 3434 &oact->sa_handler); 3435 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask)); 3436 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); 3437 #ifdef __ARCH_HAS_SA_RESTORER 3438 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), 3439 &oact->sa_restorer); 3440 #endif 3441 } 3442 return ret; 3443 } 3444 #endif 3445 #endif /* !CONFIG_ODD_RT_SIGACTION */ 3446 3447 #ifdef CONFIG_OLD_SIGACTION 3448 SYSCALL_DEFINE3(sigaction, int, sig, 3449 const struct old_sigaction __user *, act, 3450 struct old_sigaction __user *, oact) 3451 { 3452 struct k_sigaction new_ka, old_ka; 3453 int ret; 3454 3455 if (act) { 3456 old_sigset_t mask; 3457 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 3458 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 3459 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || 3460 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 3461 __get_user(mask, &act->sa_mask)) 3462 return -EFAULT; 3463 #ifdef __ARCH_HAS_KA_RESTORER 3464 new_ka.ka_restorer = NULL; 3465 #endif 3466 siginitset(&new_ka.sa.sa_mask, mask); 3467 } 3468 3469 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 3470 3471 if (!ret && oact) { 3472 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 3473 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 3474 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || 3475 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 3476 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 3477 return -EFAULT; 3478 } 3479 3480 return ret; 3481 } 3482 #endif 3483 #ifdef CONFIG_COMPAT_OLD_SIGACTION 3484 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, 3485 const struct compat_old_sigaction __user *, act, 3486 struct compat_old_sigaction __user *, oact) 3487 { 3488 struct k_sigaction new_ka, old_ka; 3489 int ret; 3490 compat_old_sigset_t mask; 3491 compat_uptr_t handler, restorer; 3492 3493 if (act) { 3494 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 3495 __get_user(handler, &act->sa_handler) || 3496 __get_user(restorer, &act->sa_restorer) || 3497 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 3498 __get_user(mask, &act->sa_mask)) 3499 return -EFAULT; 3500 3501 #ifdef __ARCH_HAS_KA_RESTORER 3502 new_ka.ka_restorer = NULL; 3503 #endif 3504 new_ka.sa.sa_handler = compat_ptr(handler); 3505 new_ka.sa.sa_restorer = compat_ptr(restorer); 3506 siginitset(&new_ka.sa.sa_mask, mask); 3507 } 3508 3509 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 3510 3511 if (!ret && oact) { 3512 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 3513 __put_user(ptr_to_compat(old_ka.sa.sa_handler), 3514 &oact->sa_handler) || 3515 __put_user(ptr_to_compat(old_ka.sa.sa_restorer), 3516 &oact->sa_restorer) || 3517 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 3518 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 3519 return -EFAULT; 3520 } 3521 return ret; 3522 } 3523 #endif 3524 3525 #ifdef CONFIG_SGETMASK_SYSCALL 3526 3527 /* 3528 * For backwards compatibility. Functionality superseded by sigprocmask. 3529 */ 3530 SYSCALL_DEFINE0(sgetmask) 3531 { 3532 /* SMP safe */ 3533 return current->blocked.sig[0]; 3534 } 3535 3536 SYSCALL_DEFINE1(ssetmask, int, newmask) 3537 { 3538 int old = current->blocked.sig[0]; 3539 sigset_t newset; 3540 3541 siginitset(&newset, newmask); 3542 set_current_blocked(&newset); 3543 3544 return old; 3545 } 3546 #endif /* CONFIG_SGETMASK_SYSCALL */ 3547 3548 #ifdef __ARCH_WANT_SYS_SIGNAL 3549 /* 3550 * For backwards compatibility. Functionality superseded by sigaction. 3551 */ 3552 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) 3553 { 3554 struct k_sigaction new_sa, old_sa; 3555 int ret; 3556 3557 new_sa.sa.sa_handler = handler; 3558 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 3559 sigemptyset(&new_sa.sa.sa_mask); 3560 3561 ret = do_sigaction(sig, &new_sa, &old_sa); 3562 3563 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 3564 } 3565 #endif /* __ARCH_WANT_SYS_SIGNAL */ 3566 3567 #ifdef __ARCH_WANT_SYS_PAUSE 3568 3569 SYSCALL_DEFINE0(pause) 3570 { 3571 while (!signal_pending(current)) { 3572 __set_current_state(TASK_INTERRUPTIBLE); 3573 schedule(); 3574 } 3575 return -ERESTARTNOHAND; 3576 } 3577 3578 #endif 3579 3580 static int sigsuspend(sigset_t *set) 3581 { 3582 current->saved_sigmask = current->blocked; 3583 set_current_blocked(set); 3584 3585 while (!signal_pending(current)) { 3586 __set_current_state(TASK_INTERRUPTIBLE); 3587 schedule(); 3588 } 3589 set_restore_sigmask(); 3590 return -ERESTARTNOHAND; 3591 } 3592 3593 /** 3594 * sys_rt_sigsuspend - replace the signal mask for a value with the 3595 * @unewset value until a signal is received 3596 * @unewset: new signal mask value 3597 * @sigsetsize: size of sigset_t type 3598 */ 3599 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) 3600 { 3601 sigset_t newset; 3602 3603 /* XXX: Don't preclude handling different sized sigset_t's. */ 3604 if (sigsetsize != sizeof(sigset_t)) 3605 return -EINVAL; 3606 3607 if (copy_from_user(&newset, unewset, sizeof(newset))) 3608 return -EFAULT; 3609 return sigsuspend(&newset); 3610 } 3611 3612 #ifdef CONFIG_COMPAT 3613 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) 3614 { 3615 #ifdef __BIG_ENDIAN 3616 sigset_t newset; 3617 compat_sigset_t newset32; 3618 3619 /* XXX: Don't preclude handling different sized sigset_t's. */ 3620 if (sigsetsize != sizeof(sigset_t)) 3621 return -EINVAL; 3622 3623 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t))) 3624 return -EFAULT; 3625 sigset_from_compat(&newset, &newset32); 3626 return sigsuspend(&newset); 3627 #else 3628 /* on little-endian bitmaps don't care about granularity */ 3629 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize); 3630 #endif 3631 } 3632 #endif 3633 3634 #ifdef CONFIG_OLD_SIGSUSPEND 3635 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) 3636 { 3637 sigset_t blocked; 3638 siginitset(&blocked, mask); 3639 return sigsuspend(&blocked); 3640 } 3641 #endif 3642 #ifdef CONFIG_OLD_SIGSUSPEND3 3643 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) 3644 { 3645 sigset_t blocked; 3646 siginitset(&blocked, mask); 3647 return sigsuspend(&blocked); 3648 } 3649 #endif 3650 3651 __weak const char *arch_vma_name(struct vm_area_struct *vma) 3652 { 3653 return NULL; 3654 } 3655 3656 void __init signals_init(void) 3657 { 3658 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */ 3659 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE 3660 != offsetof(struct siginfo, _sifields._pad)); 3661 3662 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); 3663 } 3664 3665 #ifdef CONFIG_KGDB_KDB 3666 #include <linux/kdb.h> 3667 /* 3668 * kdb_send_sig_info - Allows kdb to send signals without exposing 3669 * signal internals. This function checks if the required locks are 3670 * available before calling the main signal code, to avoid kdb 3671 * deadlocks. 3672 */ 3673 void 3674 kdb_send_sig_info(struct task_struct *t, struct siginfo *info) 3675 { 3676 static struct task_struct *kdb_prev_t; 3677 int sig, new_t; 3678 if (!spin_trylock(&t->sighand->siglock)) { 3679 kdb_printf("Can't do kill command now.\n" 3680 "The sigmask lock is held somewhere else in " 3681 "kernel, try again later\n"); 3682 return; 3683 } 3684 spin_unlock(&t->sighand->siglock); 3685 new_t = kdb_prev_t != t; 3686 kdb_prev_t = t; 3687 if (t->state != TASK_RUNNING && new_t) { 3688 kdb_printf("Process is not RUNNING, sending a signal from " 3689 "kdb risks deadlock\n" 3690 "on the run queue locks. " 3691 "The signal has _not_ been sent.\n" 3692 "Reissue the kill command if you want to risk " 3693 "the deadlock.\n"); 3694 return; 3695 } 3696 sig = info->si_signo; 3697 if (send_sig_info(sig, info, t)) 3698 kdb_printf("Fail to deliver Signal %d to process %d.\n", 3699 sig, t->pid); 3700 else 3701 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); 3702 } 3703 #endif /* CONFIG_KGDB_KDB */ 3704