1 /* 2 * linux/kernel/signal.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 7 * 8 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 9 * Changes to use preallocated sigqueue structures 10 * to allow signals to be sent reliably. 11 */ 12 13 #include <linux/slab.h> 14 #include <linux/export.h> 15 #include <linux/init.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/user.h> 18 #include <linux/sched/debug.h> 19 #include <linux/sched/task.h> 20 #include <linux/sched/task_stack.h> 21 #include <linux/sched/cputime.h> 22 #include <linux/fs.h> 23 #include <linux/tty.h> 24 #include <linux/binfmts.h> 25 #include <linux/coredump.h> 26 #include <linux/security.h> 27 #include <linux/syscalls.h> 28 #include <linux/ptrace.h> 29 #include <linux/signal.h> 30 #include <linux/signalfd.h> 31 #include <linux/ratelimit.h> 32 #include <linux/tracehook.h> 33 #include <linux/capability.h> 34 #include <linux/freezer.h> 35 #include <linux/pid_namespace.h> 36 #include <linux/nsproxy.h> 37 #include <linux/user_namespace.h> 38 #include <linux/uprobes.h> 39 #include <linux/compat.h> 40 #include <linux/cn_proc.h> 41 #include <linux/compiler.h> 42 #include <linux/posix-timers.h> 43 #include <linux/livepatch.h> 44 45 #define CREATE_TRACE_POINTS 46 #include <trace/events/signal.h> 47 48 #include <asm/param.h> 49 #include <linux/uaccess.h> 50 #include <asm/unistd.h> 51 #include <asm/siginfo.h> 52 #include <asm/cacheflush.h> 53 #include "audit.h" /* audit_signal_info() */ 54 55 /* 56 * SLAB caches for signal bits. 57 */ 58 59 static struct kmem_cache *sigqueue_cachep; 60 61 int print_fatal_signals __read_mostly; 62 63 static void __user *sig_handler(struct task_struct *t, int sig) 64 { 65 return t->sighand->action[sig - 1].sa.sa_handler; 66 } 67 68 static int sig_handler_ignored(void __user *handler, int sig) 69 { 70 /* Is it explicitly or implicitly ignored? */ 71 return handler == SIG_IGN || 72 (handler == SIG_DFL && sig_kernel_ignore(sig)); 73 } 74 75 static int sig_task_ignored(struct task_struct *t, int sig, bool force) 76 { 77 void __user *handler; 78 79 handler = sig_handler(t, sig); 80 81 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && 82 handler == SIG_DFL && !(force && sig_kernel_only(sig))) 83 return 1; 84 85 return sig_handler_ignored(handler, sig); 86 } 87 88 static int sig_ignored(struct task_struct *t, int sig, bool force) 89 { 90 /* 91 * Blocked signals are never ignored, since the 92 * signal handler may change by the time it is 93 * unblocked. 94 */ 95 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 96 return 0; 97 98 /* 99 * Tracers may want to know about even ignored signal unless it 100 * is SIGKILL which can't be reported anyway but can be ignored 101 * by SIGNAL_UNKILLABLE task. 102 */ 103 if (t->ptrace && sig != SIGKILL) 104 return 0; 105 106 return sig_task_ignored(t, sig, force); 107 } 108 109 /* 110 * Re-calculate pending state from the set of locally pending 111 * signals, globally pending signals, and blocked signals. 112 */ 113 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) 114 { 115 unsigned long ready; 116 long i; 117 118 switch (_NSIG_WORDS) { 119 default: 120 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 121 ready |= signal->sig[i] &~ blocked->sig[i]; 122 break; 123 124 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 125 ready |= signal->sig[2] &~ blocked->sig[2]; 126 ready |= signal->sig[1] &~ blocked->sig[1]; 127 ready |= signal->sig[0] &~ blocked->sig[0]; 128 break; 129 130 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 131 ready |= signal->sig[0] &~ blocked->sig[0]; 132 break; 133 134 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 135 } 136 return ready != 0; 137 } 138 139 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 140 141 static int recalc_sigpending_tsk(struct task_struct *t) 142 { 143 if ((t->jobctl & JOBCTL_PENDING_MASK) || 144 PENDING(&t->pending, &t->blocked) || 145 PENDING(&t->signal->shared_pending, &t->blocked)) { 146 set_tsk_thread_flag(t, TIF_SIGPENDING); 147 return 1; 148 } 149 /* 150 * We must never clear the flag in another thread, or in current 151 * when it's possible the current syscall is returning -ERESTART*. 152 * So we don't clear it here, and only callers who know they should do. 153 */ 154 return 0; 155 } 156 157 /* 158 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. 159 * This is superfluous when called on current, the wakeup is a harmless no-op. 160 */ 161 void recalc_sigpending_and_wake(struct task_struct *t) 162 { 163 if (recalc_sigpending_tsk(t)) 164 signal_wake_up(t, 0); 165 } 166 167 void recalc_sigpending(void) 168 { 169 if (!recalc_sigpending_tsk(current) && !freezing(current) && 170 !klp_patch_pending(current)) 171 clear_thread_flag(TIF_SIGPENDING); 172 173 } 174 175 /* Given the mask, find the first available signal that should be serviced. */ 176 177 #define SYNCHRONOUS_MASK \ 178 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ 179 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) 180 181 int next_signal(struct sigpending *pending, sigset_t *mask) 182 { 183 unsigned long i, *s, *m, x; 184 int sig = 0; 185 186 s = pending->signal.sig; 187 m = mask->sig; 188 189 /* 190 * Handle the first word specially: it contains the 191 * synchronous signals that need to be dequeued first. 192 */ 193 x = *s &~ *m; 194 if (x) { 195 if (x & SYNCHRONOUS_MASK) 196 x &= SYNCHRONOUS_MASK; 197 sig = ffz(~x) + 1; 198 return sig; 199 } 200 201 switch (_NSIG_WORDS) { 202 default: 203 for (i = 1; i < _NSIG_WORDS; ++i) { 204 x = *++s &~ *++m; 205 if (!x) 206 continue; 207 sig = ffz(~x) + i*_NSIG_BPW + 1; 208 break; 209 } 210 break; 211 212 case 2: 213 x = s[1] &~ m[1]; 214 if (!x) 215 break; 216 sig = ffz(~x) + _NSIG_BPW + 1; 217 break; 218 219 case 1: 220 /* Nothing to do */ 221 break; 222 } 223 224 return sig; 225 } 226 227 static inline void print_dropped_signal(int sig) 228 { 229 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); 230 231 if (!print_fatal_signals) 232 return; 233 234 if (!__ratelimit(&ratelimit_state)) 235 return; 236 237 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", 238 current->comm, current->pid, sig); 239 } 240 241 /** 242 * task_set_jobctl_pending - set jobctl pending bits 243 * @task: target task 244 * @mask: pending bits to set 245 * 246 * Clear @mask from @task->jobctl. @mask must be subset of 247 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | 248 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is 249 * cleared. If @task is already being killed or exiting, this function 250 * becomes noop. 251 * 252 * CONTEXT: 253 * Must be called with @task->sighand->siglock held. 254 * 255 * RETURNS: 256 * %true if @mask is set, %false if made noop because @task was dying. 257 */ 258 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) 259 { 260 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | 261 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); 262 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); 263 264 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) 265 return false; 266 267 if (mask & JOBCTL_STOP_SIGMASK) 268 task->jobctl &= ~JOBCTL_STOP_SIGMASK; 269 270 task->jobctl |= mask; 271 return true; 272 } 273 274 /** 275 * task_clear_jobctl_trapping - clear jobctl trapping bit 276 * @task: target task 277 * 278 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. 279 * Clear it and wake up the ptracer. Note that we don't need any further 280 * locking. @task->siglock guarantees that @task->parent points to the 281 * ptracer. 282 * 283 * CONTEXT: 284 * Must be called with @task->sighand->siglock held. 285 */ 286 void task_clear_jobctl_trapping(struct task_struct *task) 287 { 288 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { 289 task->jobctl &= ~JOBCTL_TRAPPING; 290 smp_mb(); /* advised by wake_up_bit() */ 291 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); 292 } 293 } 294 295 /** 296 * task_clear_jobctl_pending - clear jobctl pending bits 297 * @task: target task 298 * @mask: pending bits to clear 299 * 300 * Clear @mask from @task->jobctl. @mask must be subset of 301 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other 302 * STOP bits are cleared together. 303 * 304 * If clearing of @mask leaves no stop or trap pending, this function calls 305 * task_clear_jobctl_trapping(). 306 * 307 * CONTEXT: 308 * Must be called with @task->sighand->siglock held. 309 */ 310 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) 311 { 312 BUG_ON(mask & ~JOBCTL_PENDING_MASK); 313 314 if (mask & JOBCTL_STOP_PENDING) 315 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; 316 317 task->jobctl &= ~mask; 318 319 if (!(task->jobctl & JOBCTL_PENDING_MASK)) 320 task_clear_jobctl_trapping(task); 321 } 322 323 /** 324 * task_participate_group_stop - participate in a group stop 325 * @task: task participating in a group stop 326 * 327 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. 328 * Group stop states are cleared and the group stop count is consumed if 329 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group 330 * stop, the appropriate %SIGNAL_* flags are set. 331 * 332 * CONTEXT: 333 * Must be called with @task->sighand->siglock held. 334 * 335 * RETURNS: 336 * %true if group stop completion should be notified to the parent, %false 337 * otherwise. 338 */ 339 static bool task_participate_group_stop(struct task_struct *task) 340 { 341 struct signal_struct *sig = task->signal; 342 bool consume = task->jobctl & JOBCTL_STOP_CONSUME; 343 344 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); 345 346 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); 347 348 if (!consume) 349 return false; 350 351 if (!WARN_ON_ONCE(sig->group_stop_count == 0)) 352 sig->group_stop_count--; 353 354 /* 355 * Tell the caller to notify completion iff we are entering into a 356 * fresh group stop. Read comment in do_signal_stop() for details. 357 */ 358 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { 359 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); 360 return true; 361 } 362 return false; 363 } 364 365 /* 366 * allocate a new signal queue record 367 * - this may be called without locks if and only if t == current, otherwise an 368 * appropriate lock must be held to stop the target task from exiting 369 */ 370 static struct sigqueue * 371 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) 372 { 373 struct sigqueue *q = NULL; 374 struct user_struct *user; 375 376 /* 377 * Protect access to @t credentials. This can go away when all 378 * callers hold rcu read lock. 379 */ 380 rcu_read_lock(); 381 user = get_uid(__task_cred(t)->user); 382 atomic_inc(&user->sigpending); 383 rcu_read_unlock(); 384 385 if (override_rlimit || 386 atomic_read(&user->sigpending) <= 387 task_rlimit(t, RLIMIT_SIGPENDING)) { 388 q = kmem_cache_alloc(sigqueue_cachep, flags); 389 } else { 390 print_dropped_signal(sig); 391 } 392 393 if (unlikely(q == NULL)) { 394 atomic_dec(&user->sigpending); 395 free_uid(user); 396 } else { 397 INIT_LIST_HEAD(&q->list); 398 q->flags = 0; 399 q->user = user; 400 } 401 402 return q; 403 } 404 405 static void __sigqueue_free(struct sigqueue *q) 406 { 407 if (q->flags & SIGQUEUE_PREALLOC) 408 return; 409 atomic_dec(&q->user->sigpending); 410 free_uid(q->user); 411 kmem_cache_free(sigqueue_cachep, q); 412 } 413 414 void flush_sigqueue(struct sigpending *queue) 415 { 416 struct sigqueue *q; 417 418 sigemptyset(&queue->signal); 419 while (!list_empty(&queue->list)) { 420 q = list_entry(queue->list.next, struct sigqueue , list); 421 list_del_init(&q->list); 422 __sigqueue_free(q); 423 } 424 } 425 426 /* 427 * Flush all pending signals for this kthread. 428 */ 429 void flush_signals(struct task_struct *t) 430 { 431 unsigned long flags; 432 433 spin_lock_irqsave(&t->sighand->siglock, flags); 434 clear_tsk_thread_flag(t, TIF_SIGPENDING); 435 flush_sigqueue(&t->pending); 436 flush_sigqueue(&t->signal->shared_pending); 437 spin_unlock_irqrestore(&t->sighand->siglock, flags); 438 } 439 440 #ifdef CONFIG_POSIX_TIMERS 441 static void __flush_itimer_signals(struct sigpending *pending) 442 { 443 sigset_t signal, retain; 444 struct sigqueue *q, *n; 445 446 signal = pending->signal; 447 sigemptyset(&retain); 448 449 list_for_each_entry_safe(q, n, &pending->list, list) { 450 int sig = q->info.si_signo; 451 452 if (likely(q->info.si_code != SI_TIMER)) { 453 sigaddset(&retain, sig); 454 } else { 455 sigdelset(&signal, sig); 456 list_del_init(&q->list); 457 __sigqueue_free(q); 458 } 459 } 460 461 sigorsets(&pending->signal, &signal, &retain); 462 } 463 464 void flush_itimer_signals(void) 465 { 466 struct task_struct *tsk = current; 467 unsigned long flags; 468 469 spin_lock_irqsave(&tsk->sighand->siglock, flags); 470 __flush_itimer_signals(&tsk->pending); 471 __flush_itimer_signals(&tsk->signal->shared_pending); 472 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 473 } 474 #endif 475 476 void ignore_signals(struct task_struct *t) 477 { 478 int i; 479 480 for (i = 0; i < _NSIG; ++i) 481 t->sighand->action[i].sa.sa_handler = SIG_IGN; 482 483 flush_signals(t); 484 } 485 486 /* 487 * Flush all handlers for a task. 488 */ 489 490 void 491 flush_signal_handlers(struct task_struct *t, int force_default) 492 { 493 int i; 494 struct k_sigaction *ka = &t->sighand->action[0]; 495 for (i = _NSIG ; i != 0 ; i--) { 496 if (force_default || ka->sa.sa_handler != SIG_IGN) 497 ka->sa.sa_handler = SIG_DFL; 498 ka->sa.sa_flags = 0; 499 #ifdef __ARCH_HAS_SA_RESTORER 500 ka->sa.sa_restorer = NULL; 501 #endif 502 sigemptyset(&ka->sa.sa_mask); 503 ka++; 504 } 505 } 506 507 int unhandled_signal(struct task_struct *tsk, int sig) 508 { 509 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; 510 if (is_global_init(tsk)) 511 return 1; 512 if (handler != SIG_IGN && handler != SIG_DFL) 513 return 0; 514 /* if ptraced, let the tracer determine */ 515 return !tsk->ptrace; 516 } 517 518 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, 519 bool *resched_timer) 520 { 521 struct sigqueue *q, *first = NULL; 522 523 /* 524 * Collect the siginfo appropriate to this signal. Check if 525 * there is another siginfo for the same signal. 526 */ 527 list_for_each_entry(q, &list->list, list) { 528 if (q->info.si_signo == sig) { 529 if (first) 530 goto still_pending; 531 first = q; 532 } 533 } 534 535 sigdelset(&list->signal, sig); 536 537 if (first) { 538 still_pending: 539 list_del_init(&first->list); 540 copy_siginfo(info, &first->info); 541 542 *resched_timer = 543 (first->flags & SIGQUEUE_PREALLOC) && 544 (info->si_code == SI_TIMER) && 545 (info->si_sys_private); 546 547 __sigqueue_free(first); 548 } else { 549 /* 550 * Ok, it wasn't in the queue. This must be 551 * a fast-pathed signal or we must have been 552 * out of queue space. So zero out the info. 553 */ 554 clear_siginfo(info); 555 info->si_signo = sig; 556 info->si_errno = 0; 557 info->si_code = SI_USER; 558 info->si_pid = 0; 559 info->si_uid = 0; 560 } 561 } 562 563 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 564 siginfo_t *info, bool *resched_timer) 565 { 566 int sig = next_signal(pending, mask); 567 568 if (sig) 569 collect_signal(sig, pending, info, resched_timer); 570 return sig; 571 } 572 573 /* 574 * Dequeue a signal and return the element to the caller, which is 575 * expected to free it. 576 * 577 * All callers have to hold the siglock. 578 */ 579 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 580 { 581 bool resched_timer = false; 582 int signr; 583 584 /* We only dequeue private signals from ourselves, we don't let 585 * signalfd steal them 586 */ 587 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); 588 if (!signr) { 589 signr = __dequeue_signal(&tsk->signal->shared_pending, 590 mask, info, &resched_timer); 591 #ifdef CONFIG_POSIX_TIMERS 592 /* 593 * itimer signal ? 594 * 595 * itimers are process shared and we restart periodic 596 * itimers in the signal delivery path to prevent DoS 597 * attacks in the high resolution timer case. This is 598 * compliant with the old way of self-restarting 599 * itimers, as the SIGALRM is a legacy signal and only 600 * queued once. Changing the restart behaviour to 601 * restart the timer in the signal dequeue path is 602 * reducing the timer noise on heavy loaded !highres 603 * systems too. 604 */ 605 if (unlikely(signr == SIGALRM)) { 606 struct hrtimer *tmr = &tsk->signal->real_timer; 607 608 if (!hrtimer_is_queued(tmr) && 609 tsk->signal->it_real_incr != 0) { 610 hrtimer_forward(tmr, tmr->base->get_time(), 611 tsk->signal->it_real_incr); 612 hrtimer_restart(tmr); 613 } 614 } 615 #endif 616 } 617 618 recalc_sigpending(); 619 if (!signr) 620 return 0; 621 622 if (unlikely(sig_kernel_stop(signr))) { 623 /* 624 * Set a marker that we have dequeued a stop signal. Our 625 * caller might release the siglock and then the pending 626 * stop signal it is about to process is no longer in the 627 * pending bitmasks, but must still be cleared by a SIGCONT 628 * (and overruled by a SIGKILL). So those cases clear this 629 * shared flag after we've set it. Note that this flag may 630 * remain set after the signal we return is ignored or 631 * handled. That doesn't matter because its only purpose 632 * is to alert stop-signal processing code when another 633 * processor has come along and cleared the flag. 634 */ 635 current->jobctl |= JOBCTL_STOP_DEQUEUED; 636 } 637 #ifdef CONFIG_POSIX_TIMERS 638 if (resched_timer) { 639 /* 640 * Release the siglock to ensure proper locking order 641 * of timer locks outside of siglocks. Note, we leave 642 * irqs disabled here, since the posix-timers code is 643 * about to disable them again anyway. 644 */ 645 spin_unlock(&tsk->sighand->siglock); 646 posixtimer_rearm(info); 647 spin_lock(&tsk->sighand->siglock); 648 649 /* Don't expose the si_sys_private value to userspace */ 650 info->si_sys_private = 0; 651 } 652 #endif 653 return signr; 654 } 655 656 /* 657 * Tell a process that it has a new active signal.. 658 * 659 * NOTE! we rely on the previous spin_lock to 660 * lock interrupts for us! We can only be called with 661 * "siglock" held, and the local interrupt must 662 * have been disabled when that got acquired! 663 * 664 * No need to set need_resched since signal event passing 665 * goes through ->blocked 666 */ 667 void signal_wake_up_state(struct task_struct *t, unsigned int state) 668 { 669 set_tsk_thread_flag(t, TIF_SIGPENDING); 670 /* 671 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable 672 * case. We don't check t->state here because there is a race with it 673 * executing another processor and just now entering stopped state. 674 * By using wake_up_state, we ensure the process will wake up and 675 * handle its death signal. 676 */ 677 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) 678 kick_process(t); 679 } 680 681 /* 682 * Remove signals in mask from the pending set and queue. 683 * Returns 1 if any signals were found. 684 * 685 * All callers must be holding the siglock. 686 */ 687 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) 688 { 689 struct sigqueue *q, *n; 690 sigset_t m; 691 692 sigandsets(&m, mask, &s->signal); 693 if (sigisemptyset(&m)) 694 return 0; 695 696 sigandnsets(&s->signal, &s->signal, mask); 697 list_for_each_entry_safe(q, n, &s->list, list) { 698 if (sigismember(mask, q->info.si_signo)) { 699 list_del_init(&q->list); 700 __sigqueue_free(q); 701 } 702 } 703 return 1; 704 } 705 706 static inline int is_si_special(const struct siginfo *info) 707 { 708 return info <= SEND_SIG_FORCED; 709 } 710 711 static inline bool si_fromuser(const struct siginfo *info) 712 { 713 return info == SEND_SIG_NOINFO || 714 (!is_si_special(info) && SI_FROMUSER(info)); 715 } 716 717 /* 718 * called with RCU read lock from check_kill_permission() 719 */ 720 static int kill_ok_by_cred(struct task_struct *t) 721 { 722 const struct cred *cred = current_cred(); 723 const struct cred *tcred = __task_cred(t); 724 725 if (uid_eq(cred->euid, tcred->suid) || 726 uid_eq(cred->euid, tcred->uid) || 727 uid_eq(cred->uid, tcred->suid) || 728 uid_eq(cred->uid, tcred->uid)) 729 return 1; 730 731 if (ns_capable(tcred->user_ns, CAP_KILL)) 732 return 1; 733 734 return 0; 735 } 736 737 /* 738 * Bad permissions for sending the signal 739 * - the caller must hold the RCU read lock 740 */ 741 static int check_kill_permission(int sig, struct siginfo *info, 742 struct task_struct *t) 743 { 744 struct pid *sid; 745 int error; 746 747 if (!valid_signal(sig)) 748 return -EINVAL; 749 750 if (!si_fromuser(info)) 751 return 0; 752 753 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 754 if (error) 755 return error; 756 757 if (!same_thread_group(current, t) && 758 !kill_ok_by_cred(t)) { 759 switch (sig) { 760 case SIGCONT: 761 sid = task_session(t); 762 /* 763 * We don't return the error if sid == NULL. The 764 * task was unhashed, the caller must notice this. 765 */ 766 if (!sid || sid == task_session(current)) 767 break; 768 default: 769 return -EPERM; 770 } 771 } 772 773 return security_task_kill(t, info, sig, NULL); 774 } 775 776 /** 777 * ptrace_trap_notify - schedule trap to notify ptracer 778 * @t: tracee wanting to notify tracer 779 * 780 * This function schedules sticky ptrace trap which is cleared on the next 781 * TRAP_STOP to notify ptracer of an event. @t must have been seized by 782 * ptracer. 783 * 784 * If @t is running, STOP trap will be taken. If trapped for STOP and 785 * ptracer is listening for events, tracee is woken up so that it can 786 * re-trap for the new event. If trapped otherwise, STOP trap will be 787 * eventually taken without returning to userland after the existing traps 788 * are finished by PTRACE_CONT. 789 * 790 * CONTEXT: 791 * Must be called with @task->sighand->siglock held. 792 */ 793 static void ptrace_trap_notify(struct task_struct *t) 794 { 795 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); 796 assert_spin_locked(&t->sighand->siglock); 797 798 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); 799 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); 800 } 801 802 /* 803 * Handle magic process-wide effects of stop/continue signals. Unlike 804 * the signal actions, these happen immediately at signal-generation 805 * time regardless of blocking, ignoring, or handling. This does the 806 * actual continuing for SIGCONT, but not the actual stopping for stop 807 * signals. The process stop is done as a signal action for SIG_DFL. 808 * 809 * Returns true if the signal should be actually delivered, otherwise 810 * it should be dropped. 811 */ 812 static bool prepare_signal(int sig, struct task_struct *p, bool force) 813 { 814 struct signal_struct *signal = p->signal; 815 struct task_struct *t; 816 sigset_t flush; 817 818 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { 819 if (!(signal->flags & SIGNAL_GROUP_EXIT)) 820 return sig == SIGKILL; 821 /* 822 * The process is in the middle of dying, nothing to do. 823 */ 824 } else if (sig_kernel_stop(sig)) { 825 /* 826 * This is a stop signal. Remove SIGCONT from all queues. 827 */ 828 siginitset(&flush, sigmask(SIGCONT)); 829 flush_sigqueue_mask(&flush, &signal->shared_pending); 830 for_each_thread(p, t) 831 flush_sigqueue_mask(&flush, &t->pending); 832 } else if (sig == SIGCONT) { 833 unsigned int why; 834 /* 835 * Remove all stop signals from all queues, wake all threads. 836 */ 837 siginitset(&flush, SIG_KERNEL_STOP_MASK); 838 flush_sigqueue_mask(&flush, &signal->shared_pending); 839 for_each_thread(p, t) { 840 flush_sigqueue_mask(&flush, &t->pending); 841 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); 842 if (likely(!(t->ptrace & PT_SEIZED))) 843 wake_up_state(t, __TASK_STOPPED); 844 else 845 ptrace_trap_notify(t); 846 } 847 848 /* 849 * Notify the parent with CLD_CONTINUED if we were stopped. 850 * 851 * If we were in the middle of a group stop, we pretend it 852 * was already finished, and then continued. Since SIGCHLD 853 * doesn't queue we report only CLD_STOPPED, as if the next 854 * CLD_CONTINUED was dropped. 855 */ 856 why = 0; 857 if (signal->flags & SIGNAL_STOP_STOPPED) 858 why |= SIGNAL_CLD_CONTINUED; 859 else if (signal->group_stop_count) 860 why |= SIGNAL_CLD_STOPPED; 861 862 if (why) { 863 /* 864 * The first thread which returns from do_signal_stop() 865 * will take ->siglock, notice SIGNAL_CLD_MASK, and 866 * notify its parent. See get_signal_to_deliver(). 867 */ 868 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); 869 signal->group_stop_count = 0; 870 signal->group_exit_code = 0; 871 } 872 } 873 874 return !sig_ignored(p, sig, force); 875 } 876 877 /* 878 * Test if P wants to take SIG. After we've checked all threads with this, 879 * it's equivalent to finding no threads not blocking SIG. Any threads not 880 * blocking SIG were ruled out because they are not running and already 881 * have pending signals. Such threads will dequeue from the shared queue 882 * as soon as they're available, so putting the signal on the shared queue 883 * will be equivalent to sending it to one such thread. 884 */ 885 static inline int wants_signal(int sig, struct task_struct *p) 886 { 887 if (sigismember(&p->blocked, sig)) 888 return 0; 889 if (p->flags & PF_EXITING) 890 return 0; 891 if (sig == SIGKILL) 892 return 1; 893 if (task_is_stopped_or_traced(p)) 894 return 0; 895 return task_curr(p) || !signal_pending(p); 896 } 897 898 static void complete_signal(int sig, struct task_struct *p, int group) 899 { 900 struct signal_struct *signal = p->signal; 901 struct task_struct *t; 902 903 /* 904 * Now find a thread we can wake up to take the signal off the queue. 905 * 906 * If the main thread wants the signal, it gets first crack. 907 * Probably the least surprising to the average bear. 908 */ 909 if (wants_signal(sig, p)) 910 t = p; 911 else if (!group || thread_group_empty(p)) 912 /* 913 * There is just one thread and it does not need to be woken. 914 * It will dequeue unblocked signals before it runs again. 915 */ 916 return; 917 else { 918 /* 919 * Otherwise try to find a suitable thread. 920 */ 921 t = signal->curr_target; 922 while (!wants_signal(sig, t)) { 923 t = next_thread(t); 924 if (t == signal->curr_target) 925 /* 926 * No thread needs to be woken. 927 * Any eligible threads will see 928 * the signal in the queue soon. 929 */ 930 return; 931 } 932 signal->curr_target = t; 933 } 934 935 /* 936 * Found a killable thread. If the signal will be fatal, 937 * then start taking the whole group down immediately. 938 */ 939 if (sig_fatal(p, sig) && 940 !(signal->flags & SIGNAL_GROUP_EXIT) && 941 !sigismember(&t->real_blocked, sig) && 942 (sig == SIGKILL || !p->ptrace)) { 943 /* 944 * This signal will be fatal to the whole group. 945 */ 946 if (!sig_kernel_coredump(sig)) { 947 /* 948 * Start a group exit and wake everybody up. 949 * This way we don't have other threads 950 * running and doing things after a slower 951 * thread has the fatal signal pending. 952 */ 953 signal->flags = SIGNAL_GROUP_EXIT; 954 signal->group_exit_code = sig; 955 signal->group_stop_count = 0; 956 t = p; 957 do { 958 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 959 sigaddset(&t->pending.signal, SIGKILL); 960 signal_wake_up(t, 1); 961 } while_each_thread(p, t); 962 return; 963 } 964 } 965 966 /* 967 * The signal is already in the shared-pending queue. 968 * Tell the chosen thread to wake up and dequeue it. 969 */ 970 signal_wake_up(t, sig == SIGKILL); 971 return; 972 } 973 974 static inline int legacy_queue(struct sigpending *signals, int sig) 975 { 976 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 977 } 978 979 #ifdef CONFIG_USER_NS 980 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) 981 { 982 if (current_user_ns() == task_cred_xxx(t, user_ns)) 983 return; 984 985 if (SI_FROMKERNEL(info)) 986 return; 987 988 rcu_read_lock(); 989 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns), 990 make_kuid(current_user_ns(), info->si_uid)); 991 rcu_read_unlock(); 992 } 993 #else 994 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) 995 { 996 return; 997 } 998 #endif 999 1000 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, 1001 int group, int from_ancestor_ns) 1002 { 1003 struct sigpending *pending; 1004 struct sigqueue *q; 1005 int override_rlimit; 1006 int ret = 0, result; 1007 1008 assert_spin_locked(&t->sighand->siglock); 1009 1010 result = TRACE_SIGNAL_IGNORED; 1011 if (!prepare_signal(sig, t, 1012 from_ancestor_ns || (info == SEND_SIG_FORCED))) 1013 goto ret; 1014 1015 pending = group ? &t->signal->shared_pending : &t->pending; 1016 /* 1017 * Short-circuit ignored signals and support queuing 1018 * exactly one non-rt signal, so that we can get more 1019 * detailed information about the cause of the signal. 1020 */ 1021 result = TRACE_SIGNAL_ALREADY_PENDING; 1022 if (legacy_queue(pending, sig)) 1023 goto ret; 1024 1025 result = TRACE_SIGNAL_DELIVERED; 1026 /* 1027 * fast-pathed signals for kernel-internal things like SIGSTOP 1028 * or SIGKILL. 1029 */ 1030 if (info == SEND_SIG_FORCED) 1031 goto out_set; 1032 1033 /* 1034 * Real-time signals must be queued if sent by sigqueue, or 1035 * some other real-time mechanism. It is implementation 1036 * defined whether kill() does so. We attempt to do so, on 1037 * the principle of least surprise, but since kill is not 1038 * allowed to fail with EAGAIN when low on memory we just 1039 * make sure at least one signal gets delivered and don't 1040 * pass on the info struct. 1041 */ 1042 if (sig < SIGRTMIN) 1043 override_rlimit = (is_si_special(info) || info->si_code >= 0); 1044 else 1045 override_rlimit = 0; 1046 1047 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit); 1048 if (q) { 1049 list_add_tail(&q->list, &pending->list); 1050 switch ((unsigned long) info) { 1051 case (unsigned long) SEND_SIG_NOINFO: 1052 clear_siginfo(&q->info); 1053 q->info.si_signo = sig; 1054 q->info.si_errno = 0; 1055 q->info.si_code = SI_USER; 1056 q->info.si_pid = task_tgid_nr_ns(current, 1057 task_active_pid_ns(t)); 1058 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 1059 break; 1060 case (unsigned long) SEND_SIG_PRIV: 1061 clear_siginfo(&q->info); 1062 q->info.si_signo = sig; 1063 q->info.si_errno = 0; 1064 q->info.si_code = SI_KERNEL; 1065 q->info.si_pid = 0; 1066 q->info.si_uid = 0; 1067 break; 1068 default: 1069 copy_siginfo(&q->info, info); 1070 if (from_ancestor_ns) 1071 q->info.si_pid = 0; 1072 break; 1073 } 1074 1075 userns_fixup_signal_uid(&q->info, t); 1076 1077 } else if (!is_si_special(info)) { 1078 if (sig >= SIGRTMIN && info->si_code != SI_USER) { 1079 /* 1080 * Queue overflow, abort. We may abort if the 1081 * signal was rt and sent by user using something 1082 * other than kill(). 1083 */ 1084 result = TRACE_SIGNAL_OVERFLOW_FAIL; 1085 ret = -EAGAIN; 1086 goto ret; 1087 } else { 1088 /* 1089 * This is a silent loss of information. We still 1090 * send the signal, but the *info bits are lost. 1091 */ 1092 result = TRACE_SIGNAL_LOSE_INFO; 1093 } 1094 } 1095 1096 out_set: 1097 signalfd_notify(t, sig); 1098 sigaddset(&pending->signal, sig); 1099 complete_signal(sig, t, group); 1100 ret: 1101 trace_signal_generate(sig, info, t, group, result); 1102 return ret; 1103 } 1104 1105 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, 1106 int group) 1107 { 1108 int from_ancestor_ns = 0; 1109 1110 #ifdef CONFIG_PID_NS 1111 from_ancestor_ns = si_fromuser(info) && 1112 !task_pid_nr_ns(current, task_active_pid_ns(t)); 1113 #endif 1114 1115 return __send_signal(sig, info, t, group, from_ancestor_ns); 1116 } 1117 1118 static void print_fatal_signal(int signr) 1119 { 1120 struct pt_regs *regs = signal_pt_regs(); 1121 pr_info("potentially unexpected fatal signal %d.\n", signr); 1122 1123 #if defined(__i386__) && !defined(__arch_um__) 1124 pr_info("code at %08lx: ", regs->ip); 1125 { 1126 int i; 1127 for (i = 0; i < 16; i++) { 1128 unsigned char insn; 1129 1130 if (get_user(insn, (unsigned char *)(regs->ip + i))) 1131 break; 1132 pr_cont("%02x ", insn); 1133 } 1134 } 1135 pr_cont("\n"); 1136 #endif 1137 preempt_disable(); 1138 show_regs(regs); 1139 preempt_enable(); 1140 } 1141 1142 static int __init setup_print_fatal_signals(char *str) 1143 { 1144 get_option (&str, &print_fatal_signals); 1145 1146 return 1; 1147 } 1148 1149 __setup("print-fatal-signals=", setup_print_fatal_signals); 1150 1151 int 1152 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1153 { 1154 return send_signal(sig, info, p, 1); 1155 } 1156 1157 static int 1158 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) 1159 { 1160 return send_signal(sig, info, t, 0); 1161 } 1162 1163 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, 1164 bool group) 1165 { 1166 unsigned long flags; 1167 int ret = -ESRCH; 1168 1169 if (lock_task_sighand(p, &flags)) { 1170 ret = send_signal(sig, info, p, group); 1171 unlock_task_sighand(p, &flags); 1172 } 1173 1174 return ret; 1175 } 1176 1177 /* 1178 * Force a signal that the process can't ignore: if necessary 1179 * we unblock the signal and change any SIG_IGN to SIG_DFL. 1180 * 1181 * Note: If we unblock the signal, we always reset it to SIG_DFL, 1182 * since we do not want to have a signal handler that was blocked 1183 * be invoked when user space had explicitly blocked it. 1184 * 1185 * We don't want to have recursive SIGSEGV's etc, for example, 1186 * that is why we also clear SIGNAL_UNKILLABLE. 1187 */ 1188 int 1189 force_sig_info(int sig, struct siginfo *info, struct task_struct *t) 1190 { 1191 unsigned long int flags; 1192 int ret, blocked, ignored; 1193 struct k_sigaction *action; 1194 1195 spin_lock_irqsave(&t->sighand->siglock, flags); 1196 action = &t->sighand->action[sig-1]; 1197 ignored = action->sa.sa_handler == SIG_IGN; 1198 blocked = sigismember(&t->blocked, sig); 1199 if (blocked || ignored) { 1200 action->sa.sa_handler = SIG_DFL; 1201 if (blocked) { 1202 sigdelset(&t->blocked, sig); 1203 recalc_sigpending_and_wake(t); 1204 } 1205 } 1206 /* 1207 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect 1208 * debugging to leave init killable. 1209 */ 1210 if (action->sa.sa_handler == SIG_DFL && !t->ptrace) 1211 t->signal->flags &= ~SIGNAL_UNKILLABLE; 1212 ret = specific_send_sig_info(sig, info, t); 1213 spin_unlock_irqrestore(&t->sighand->siglock, flags); 1214 1215 return ret; 1216 } 1217 1218 /* 1219 * Nuke all other threads in the group. 1220 */ 1221 int zap_other_threads(struct task_struct *p) 1222 { 1223 struct task_struct *t = p; 1224 int count = 0; 1225 1226 p->signal->group_stop_count = 0; 1227 1228 while_each_thread(p, t) { 1229 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 1230 count++; 1231 1232 /* Don't bother with already dead threads */ 1233 if (t->exit_state) 1234 continue; 1235 sigaddset(&t->pending.signal, SIGKILL); 1236 signal_wake_up(t, 1); 1237 } 1238 1239 return count; 1240 } 1241 1242 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 1243 unsigned long *flags) 1244 { 1245 struct sighand_struct *sighand; 1246 1247 for (;;) { 1248 /* 1249 * Disable interrupts early to avoid deadlocks. 1250 * See rcu_read_unlock() comment header for details. 1251 */ 1252 local_irq_save(*flags); 1253 rcu_read_lock(); 1254 sighand = rcu_dereference(tsk->sighand); 1255 if (unlikely(sighand == NULL)) { 1256 rcu_read_unlock(); 1257 local_irq_restore(*flags); 1258 break; 1259 } 1260 /* 1261 * This sighand can be already freed and even reused, but 1262 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which 1263 * initializes ->siglock: this slab can't go away, it has 1264 * the same object type, ->siglock can't be reinitialized. 1265 * 1266 * We need to ensure that tsk->sighand is still the same 1267 * after we take the lock, we can race with de_thread() or 1268 * __exit_signal(). In the latter case the next iteration 1269 * must see ->sighand == NULL. 1270 */ 1271 spin_lock(&sighand->siglock); 1272 if (likely(sighand == tsk->sighand)) { 1273 rcu_read_unlock(); 1274 break; 1275 } 1276 spin_unlock(&sighand->siglock); 1277 rcu_read_unlock(); 1278 local_irq_restore(*flags); 1279 } 1280 1281 return sighand; 1282 } 1283 1284 /* 1285 * send signal info to all the members of a group 1286 */ 1287 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1288 { 1289 int ret; 1290 1291 rcu_read_lock(); 1292 ret = check_kill_permission(sig, info, p); 1293 rcu_read_unlock(); 1294 1295 if (!ret && sig) 1296 ret = do_send_sig_info(sig, info, p, true); 1297 1298 return ret; 1299 } 1300 1301 /* 1302 * __kill_pgrp_info() sends a signal to a process group: this is what the tty 1303 * control characters do (^C, ^Z etc) 1304 * - the caller must hold at least a readlock on tasklist_lock 1305 */ 1306 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) 1307 { 1308 struct task_struct *p = NULL; 1309 int retval, success; 1310 1311 success = 0; 1312 retval = -ESRCH; 1313 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1314 int err = group_send_sig_info(sig, info, p); 1315 success |= !err; 1316 retval = err; 1317 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1318 return success ? 0 : retval; 1319 } 1320 1321 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) 1322 { 1323 int error = -ESRCH; 1324 struct task_struct *p; 1325 1326 for (;;) { 1327 rcu_read_lock(); 1328 p = pid_task(pid, PIDTYPE_PID); 1329 if (p) 1330 error = group_send_sig_info(sig, info, p); 1331 rcu_read_unlock(); 1332 if (likely(!p || error != -ESRCH)) 1333 return error; 1334 1335 /* 1336 * The task was unhashed in between, try again. If it 1337 * is dead, pid_task() will return NULL, if we race with 1338 * de_thread() it will find the new leader. 1339 */ 1340 } 1341 } 1342 1343 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid) 1344 { 1345 int error; 1346 rcu_read_lock(); 1347 error = kill_pid_info(sig, info, find_vpid(pid)); 1348 rcu_read_unlock(); 1349 return error; 1350 } 1351 1352 static int kill_as_cred_perm(const struct cred *cred, 1353 struct task_struct *target) 1354 { 1355 const struct cred *pcred = __task_cred(target); 1356 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) && 1357 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid)) 1358 return 0; 1359 return 1; 1360 } 1361 1362 /* like kill_pid_info(), but doesn't use uid/euid of "current" */ 1363 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid, 1364 const struct cred *cred) 1365 { 1366 int ret = -EINVAL; 1367 struct task_struct *p; 1368 unsigned long flags; 1369 1370 if (!valid_signal(sig)) 1371 return ret; 1372 1373 rcu_read_lock(); 1374 p = pid_task(pid, PIDTYPE_PID); 1375 if (!p) { 1376 ret = -ESRCH; 1377 goto out_unlock; 1378 } 1379 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) { 1380 ret = -EPERM; 1381 goto out_unlock; 1382 } 1383 ret = security_task_kill(p, info, sig, cred); 1384 if (ret) 1385 goto out_unlock; 1386 1387 if (sig) { 1388 if (lock_task_sighand(p, &flags)) { 1389 ret = __send_signal(sig, info, p, 1, 0); 1390 unlock_task_sighand(p, &flags); 1391 } else 1392 ret = -ESRCH; 1393 } 1394 out_unlock: 1395 rcu_read_unlock(); 1396 return ret; 1397 } 1398 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); 1399 1400 /* 1401 * kill_something_info() interprets pid in interesting ways just like kill(2). 1402 * 1403 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1404 * is probably wrong. Should make it like BSD or SYSV. 1405 */ 1406 1407 static int kill_something_info(int sig, struct siginfo *info, pid_t pid) 1408 { 1409 int ret; 1410 1411 if (pid > 0) { 1412 rcu_read_lock(); 1413 ret = kill_pid_info(sig, info, find_vpid(pid)); 1414 rcu_read_unlock(); 1415 return ret; 1416 } 1417 1418 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */ 1419 if (pid == INT_MIN) 1420 return -ESRCH; 1421 1422 read_lock(&tasklist_lock); 1423 if (pid != -1) { 1424 ret = __kill_pgrp_info(sig, info, 1425 pid ? find_vpid(-pid) : task_pgrp(current)); 1426 } else { 1427 int retval = 0, count = 0; 1428 struct task_struct * p; 1429 1430 for_each_process(p) { 1431 if (task_pid_vnr(p) > 1 && 1432 !same_thread_group(p, current)) { 1433 int err = group_send_sig_info(sig, info, p); 1434 ++count; 1435 if (err != -EPERM) 1436 retval = err; 1437 } 1438 } 1439 ret = count ? retval : -ESRCH; 1440 } 1441 read_unlock(&tasklist_lock); 1442 1443 return ret; 1444 } 1445 1446 /* 1447 * These are for backward compatibility with the rest of the kernel source. 1448 */ 1449 1450 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1451 { 1452 /* 1453 * Make sure legacy kernel users don't send in bad values 1454 * (normal paths check this in check_kill_permission). 1455 */ 1456 if (!valid_signal(sig)) 1457 return -EINVAL; 1458 1459 return do_send_sig_info(sig, info, p, false); 1460 } 1461 1462 #define __si_special(priv) \ 1463 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1464 1465 int 1466 send_sig(int sig, struct task_struct *p, int priv) 1467 { 1468 return send_sig_info(sig, __si_special(priv), p); 1469 } 1470 1471 void 1472 force_sig(int sig, struct task_struct *p) 1473 { 1474 force_sig_info(sig, SEND_SIG_PRIV, p); 1475 } 1476 1477 /* 1478 * When things go south during signal handling, we 1479 * will force a SIGSEGV. And if the signal that caused 1480 * the problem was already a SIGSEGV, we'll want to 1481 * make sure we don't even try to deliver the signal.. 1482 */ 1483 int 1484 force_sigsegv(int sig, struct task_struct *p) 1485 { 1486 if (sig == SIGSEGV) { 1487 unsigned long flags; 1488 spin_lock_irqsave(&p->sighand->siglock, flags); 1489 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; 1490 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1491 } 1492 force_sig(SIGSEGV, p); 1493 return 0; 1494 } 1495 1496 int force_sig_fault(int sig, int code, void __user *addr 1497 ___ARCH_SI_TRAPNO(int trapno) 1498 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 1499 , struct task_struct *t) 1500 { 1501 struct siginfo info; 1502 1503 clear_siginfo(&info); 1504 info.si_signo = sig; 1505 info.si_errno = 0; 1506 info.si_code = code; 1507 info.si_addr = addr; 1508 #ifdef __ARCH_SI_TRAPNO 1509 info.si_trapno = trapno; 1510 #endif 1511 #ifdef __ia64__ 1512 info.si_imm = imm; 1513 info.si_flags = flags; 1514 info.si_isr = isr; 1515 #endif 1516 return force_sig_info(info.si_signo, &info, t); 1517 } 1518 1519 int send_sig_fault(int sig, int code, void __user *addr 1520 ___ARCH_SI_TRAPNO(int trapno) 1521 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 1522 , struct task_struct *t) 1523 { 1524 struct siginfo info; 1525 1526 clear_siginfo(&info); 1527 info.si_signo = sig; 1528 info.si_errno = 0; 1529 info.si_code = code; 1530 info.si_addr = addr; 1531 #ifdef __ARCH_SI_TRAPNO 1532 info.si_trapno = trapno; 1533 #endif 1534 #ifdef __ia64__ 1535 info.si_imm = imm; 1536 info.si_flags = flags; 1537 info.si_isr = isr; 1538 #endif 1539 return send_sig_info(info.si_signo, &info, t); 1540 } 1541 1542 int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) 1543 { 1544 struct siginfo info; 1545 1546 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); 1547 clear_siginfo(&info); 1548 info.si_signo = SIGBUS; 1549 info.si_errno = 0; 1550 info.si_code = code; 1551 info.si_addr = addr; 1552 info.si_addr_lsb = lsb; 1553 return force_sig_info(info.si_signo, &info, t); 1554 } 1555 1556 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) 1557 { 1558 struct siginfo info; 1559 1560 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); 1561 clear_siginfo(&info); 1562 info.si_signo = SIGBUS; 1563 info.si_errno = 0; 1564 info.si_code = code; 1565 info.si_addr = addr; 1566 info.si_addr_lsb = lsb; 1567 return send_sig_info(info.si_signo, &info, t); 1568 } 1569 EXPORT_SYMBOL(send_sig_mceerr); 1570 1571 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper) 1572 { 1573 struct siginfo info; 1574 1575 clear_siginfo(&info); 1576 info.si_signo = SIGSEGV; 1577 info.si_errno = 0; 1578 info.si_code = SEGV_BNDERR; 1579 info.si_addr = addr; 1580 info.si_lower = lower; 1581 info.si_upper = upper; 1582 return force_sig_info(info.si_signo, &info, current); 1583 } 1584 1585 #ifdef SEGV_PKUERR 1586 int force_sig_pkuerr(void __user *addr, u32 pkey) 1587 { 1588 struct siginfo info; 1589 1590 clear_siginfo(&info); 1591 info.si_signo = SIGSEGV; 1592 info.si_errno = 0; 1593 info.si_code = SEGV_PKUERR; 1594 info.si_addr = addr; 1595 info.si_pkey = pkey; 1596 return force_sig_info(info.si_signo, &info, current); 1597 } 1598 #endif 1599 1600 /* For the crazy architectures that include trap information in 1601 * the errno field, instead of an actual errno value. 1602 */ 1603 int force_sig_ptrace_errno_trap(int errno, void __user *addr) 1604 { 1605 struct siginfo info; 1606 1607 clear_siginfo(&info); 1608 info.si_signo = SIGTRAP; 1609 info.si_errno = errno; 1610 info.si_code = TRAP_HWBKPT; 1611 info.si_addr = addr; 1612 return force_sig_info(info.si_signo, &info, current); 1613 } 1614 1615 int kill_pgrp(struct pid *pid, int sig, int priv) 1616 { 1617 int ret; 1618 1619 read_lock(&tasklist_lock); 1620 ret = __kill_pgrp_info(sig, __si_special(priv), pid); 1621 read_unlock(&tasklist_lock); 1622 1623 return ret; 1624 } 1625 EXPORT_SYMBOL(kill_pgrp); 1626 1627 int kill_pid(struct pid *pid, int sig, int priv) 1628 { 1629 return kill_pid_info(sig, __si_special(priv), pid); 1630 } 1631 EXPORT_SYMBOL(kill_pid); 1632 1633 /* 1634 * These functions support sending signals using preallocated sigqueue 1635 * structures. This is needed "because realtime applications cannot 1636 * afford to lose notifications of asynchronous events, like timer 1637 * expirations or I/O completions". In the case of POSIX Timers 1638 * we allocate the sigqueue structure from the timer_create. If this 1639 * allocation fails we are able to report the failure to the application 1640 * with an EAGAIN error. 1641 */ 1642 struct sigqueue *sigqueue_alloc(void) 1643 { 1644 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); 1645 1646 if (q) 1647 q->flags |= SIGQUEUE_PREALLOC; 1648 1649 return q; 1650 } 1651 1652 void sigqueue_free(struct sigqueue *q) 1653 { 1654 unsigned long flags; 1655 spinlock_t *lock = ¤t->sighand->siglock; 1656 1657 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1658 /* 1659 * We must hold ->siglock while testing q->list 1660 * to serialize with collect_signal() or with 1661 * __exit_signal()->flush_sigqueue(). 1662 */ 1663 spin_lock_irqsave(lock, flags); 1664 q->flags &= ~SIGQUEUE_PREALLOC; 1665 /* 1666 * If it is queued it will be freed when dequeued, 1667 * like the "regular" sigqueue. 1668 */ 1669 if (!list_empty(&q->list)) 1670 q = NULL; 1671 spin_unlock_irqrestore(lock, flags); 1672 1673 if (q) 1674 __sigqueue_free(q); 1675 } 1676 1677 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) 1678 { 1679 int sig = q->info.si_signo; 1680 struct sigpending *pending; 1681 unsigned long flags; 1682 int ret, result; 1683 1684 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1685 1686 ret = -1; 1687 if (!likely(lock_task_sighand(t, &flags))) 1688 goto ret; 1689 1690 ret = 1; /* the signal is ignored */ 1691 result = TRACE_SIGNAL_IGNORED; 1692 if (!prepare_signal(sig, t, false)) 1693 goto out; 1694 1695 ret = 0; 1696 if (unlikely(!list_empty(&q->list))) { 1697 /* 1698 * If an SI_TIMER entry is already queue just increment 1699 * the overrun count. 1700 */ 1701 BUG_ON(q->info.si_code != SI_TIMER); 1702 q->info.si_overrun++; 1703 result = TRACE_SIGNAL_ALREADY_PENDING; 1704 goto out; 1705 } 1706 q->info.si_overrun = 0; 1707 1708 signalfd_notify(t, sig); 1709 pending = group ? &t->signal->shared_pending : &t->pending; 1710 list_add_tail(&q->list, &pending->list); 1711 sigaddset(&pending->signal, sig); 1712 complete_signal(sig, t, group); 1713 result = TRACE_SIGNAL_DELIVERED; 1714 out: 1715 trace_signal_generate(sig, &q->info, t, group, result); 1716 unlock_task_sighand(t, &flags); 1717 ret: 1718 return ret; 1719 } 1720 1721 /* 1722 * Let a parent know about the death of a child. 1723 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1724 * 1725 * Returns true if our parent ignored us and so we've switched to 1726 * self-reaping. 1727 */ 1728 bool do_notify_parent(struct task_struct *tsk, int sig) 1729 { 1730 struct siginfo info; 1731 unsigned long flags; 1732 struct sighand_struct *psig; 1733 bool autoreap = false; 1734 u64 utime, stime; 1735 1736 BUG_ON(sig == -1); 1737 1738 /* do_notify_parent_cldstop should have been called instead. */ 1739 BUG_ON(task_is_stopped_or_traced(tsk)); 1740 1741 BUG_ON(!tsk->ptrace && 1742 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1743 1744 if (sig != SIGCHLD) { 1745 /* 1746 * This is only possible if parent == real_parent. 1747 * Check if it has changed security domain. 1748 */ 1749 if (tsk->parent_exec_id != tsk->parent->self_exec_id) 1750 sig = SIGCHLD; 1751 } 1752 1753 clear_siginfo(&info); 1754 info.si_signo = sig; 1755 info.si_errno = 0; 1756 /* 1757 * We are under tasklist_lock here so our parent is tied to 1758 * us and cannot change. 1759 * 1760 * task_active_pid_ns will always return the same pid namespace 1761 * until a task passes through release_task. 1762 * 1763 * write_lock() currently calls preempt_disable() which is the 1764 * same as rcu_read_lock(), but according to Oleg, this is not 1765 * correct to rely on this 1766 */ 1767 rcu_read_lock(); 1768 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); 1769 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), 1770 task_uid(tsk)); 1771 rcu_read_unlock(); 1772 1773 task_cputime(tsk, &utime, &stime); 1774 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime); 1775 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime); 1776 1777 info.si_status = tsk->exit_code & 0x7f; 1778 if (tsk->exit_code & 0x80) 1779 info.si_code = CLD_DUMPED; 1780 else if (tsk->exit_code & 0x7f) 1781 info.si_code = CLD_KILLED; 1782 else { 1783 info.si_code = CLD_EXITED; 1784 info.si_status = tsk->exit_code >> 8; 1785 } 1786 1787 psig = tsk->parent->sighand; 1788 spin_lock_irqsave(&psig->siglock, flags); 1789 if (!tsk->ptrace && sig == SIGCHLD && 1790 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 1791 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 1792 /* 1793 * We are exiting and our parent doesn't care. POSIX.1 1794 * defines special semantics for setting SIGCHLD to SIG_IGN 1795 * or setting the SA_NOCLDWAIT flag: we should be reaped 1796 * automatically and not left for our parent's wait4 call. 1797 * Rather than having the parent do it as a magic kind of 1798 * signal handler, we just set this to tell do_exit that we 1799 * can be cleaned up without becoming a zombie. Note that 1800 * we still call __wake_up_parent in this case, because a 1801 * blocked sys_wait4 might now return -ECHILD. 1802 * 1803 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 1804 * is implementation-defined: we do (if you don't want 1805 * it, just use SIG_IGN instead). 1806 */ 1807 autoreap = true; 1808 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1809 sig = 0; 1810 } 1811 if (valid_signal(sig) && sig) 1812 __group_send_sig_info(sig, &info, tsk->parent); 1813 __wake_up_parent(tsk, tsk->parent); 1814 spin_unlock_irqrestore(&psig->siglock, flags); 1815 1816 return autoreap; 1817 } 1818 1819 /** 1820 * do_notify_parent_cldstop - notify parent of stopped/continued state change 1821 * @tsk: task reporting the state change 1822 * @for_ptracer: the notification is for ptracer 1823 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report 1824 * 1825 * Notify @tsk's parent that the stopped/continued state has changed. If 1826 * @for_ptracer is %false, @tsk's group leader notifies to its real parent. 1827 * If %true, @tsk reports to @tsk->parent which should be the ptracer. 1828 * 1829 * CONTEXT: 1830 * Must be called with tasklist_lock at least read locked. 1831 */ 1832 static void do_notify_parent_cldstop(struct task_struct *tsk, 1833 bool for_ptracer, int why) 1834 { 1835 struct siginfo info; 1836 unsigned long flags; 1837 struct task_struct *parent; 1838 struct sighand_struct *sighand; 1839 u64 utime, stime; 1840 1841 if (for_ptracer) { 1842 parent = tsk->parent; 1843 } else { 1844 tsk = tsk->group_leader; 1845 parent = tsk->real_parent; 1846 } 1847 1848 clear_siginfo(&info); 1849 info.si_signo = SIGCHLD; 1850 info.si_errno = 0; 1851 /* 1852 * see comment in do_notify_parent() about the following 4 lines 1853 */ 1854 rcu_read_lock(); 1855 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); 1856 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); 1857 rcu_read_unlock(); 1858 1859 task_cputime(tsk, &utime, &stime); 1860 info.si_utime = nsec_to_clock_t(utime); 1861 info.si_stime = nsec_to_clock_t(stime); 1862 1863 info.si_code = why; 1864 switch (why) { 1865 case CLD_CONTINUED: 1866 info.si_status = SIGCONT; 1867 break; 1868 case CLD_STOPPED: 1869 info.si_status = tsk->signal->group_exit_code & 0x7f; 1870 break; 1871 case CLD_TRAPPED: 1872 info.si_status = tsk->exit_code & 0x7f; 1873 break; 1874 default: 1875 BUG(); 1876 } 1877 1878 sighand = parent->sighand; 1879 spin_lock_irqsave(&sighand->siglock, flags); 1880 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 1881 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 1882 __group_send_sig_info(SIGCHLD, &info, parent); 1883 /* 1884 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 1885 */ 1886 __wake_up_parent(tsk, parent); 1887 spin_unlock_irqrestore(&sighand->siglock, flags); 1888 } 1889 1890 static inline int may_ptrace_stop(void) 1891 { 1892 if (!likely(current->ptrace)) 1893 return 0; 1894 /* 1895 * Are we in the middle of do_coredump? 1896 * If so and our tracer is also part of the coredump stopping 1897 * is a deadlock situation, and pointless because our tracer 1898 * is dead so don't allow us to stop. 1899 * If SIGKILL was already sent before the caller unlocked 1900 * ->siglock we must see ->core_state != NULL. Otherwise it 1901 * is safe to enter schedule(). 1902 * 1903 * This is almost outdated, a task with the pending SIGKILL can't 1904 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported 1905 * after SIGKILL was already dequeued. 1906 */ 1907 if (unlikely(current->mm->core_state) && 1908 unlikely(current->mm == current->parent->mm)) 1909 return 0; 1910 1911 return 1; 1912 } 1913 1914 /* 1915 * Return non-zero if there is a SIGKILL that should be waking us up. 1916 * Called with the siglock held. 1917 */ 1918 static int sigkill_pending(struct task_struct *tsk) 1919 { 1920 return sigismember(&tsk->pending.signal, SIGKILL) || 1921 sigismember(&tsk->signal->shared_pending.signal, SIGKILL); 1922 } 1923 1924 /* 1925 * This must be called with current->sighand->siglock held. 1926 * 1927 * This should be the path for all ptrace stops. 1928 * We always set current->last_siginfo while stopped here. 1929 * That makes it a way to test a stopped process for 1930 * being ptrace-stopped vs being job-control-stopped. 1931 * 1932 * If we actually decide not to stop at all because the tracer 1933 * is gone, we keep current->exit_code unless clear_code. 1934 */ 1935 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) 1936 __releases(¤t->sighand->siglock) 1937 __acquires(¤t->sighand->siglock) 1938 { 1939 bool gstop_done = false; 1940 1941 if (arch_ptrace_stop_needed(exit_code, info)) { 1942 /* 1943 * The arch code has something special to do before a 1944 * ptrace stop. This is allowed to block, e.g. for faults 1945 * on user stack pages. We can't keep the siglock while 1946 * calling arch_ptrace_stop, so we must release it now. 1947 * To preserve proper semantics, we must do this before 1948 * any signal bookkeeping like checking group_stop_count. 1949 * Meanwhile, a SIGKILL could come in before we retake the 1950 * siglock. That must prevent us from sleeping in TASK_TRACED. 1951 * So after regaining the lock, we must check for SIGKILL. 1952 */ 1953 spin_unlock_irq(¤t->sighand->siglock); 1954 arch_ptrace_stop(exit_code, info); 1955 spin_lock_irq(¤t->sighand->siglock); 1956 if (sigkill_pending(current)) 1957 return; 1958 } 1959 1960 set_special_state(TASK_TRACED); 1961 1962 /* 1963 * We're committing to trapping. TRACED should be visible before 1964 * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). 1965 * Also, transition to TRACED and updates to ->jobctl should be 1966 * atomic with respect to siglock and should be done after the arch 1967 * hook as siglock is released and regrabbed across it. 1968 * 1969 * TRACER TRACEE 1970 * 1971 * ptrace_attach() 1972 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) 1973 * do_wait() 1974 * set_current_state() smp_wmb(); 1975 * ptrace_do_wait() 1976 * wait_task_stopped() 1977 * task_stopped_code() 1978 * [L] task_is_traced() [S] task_clear_jobctl_trapping(); 1979 */ 1980 smp_wmb(); 1981 1982 current->last_siginfo = info; 1983 current->exit_code = exit_code; 1984 1985 /* 1986 * If @why is CLD_STOPPED, we're trapping to participate in a group 1987 * stop. Do the bookkeeping. Note that if SIGCONT was delievered 1988 * across siglock relocks since INTERRUPT was scheduled, PENDING 1989 * could be clear now. We act as if SIGCONT is received after 1990 * TASK_TRACED is entered - ignore it. 1991 */ 1992 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) 1993 gstop_done = task_participate_group_stop(current); 1994 1995 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ 1996 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); 1997 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) 1998 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); 1999 2000 /* entering a trap, clear TRAPPING */ 2001 task_clear_jobctl_trapping(current); 2002 2003 spin_unlock_irq(¤t->sighand->siglock); 2004 read_lock(&tasklist_lock); 2005 if (may_ptrace_stop()) { 2006 /* 2007 * Notify parents of the stop. 2008 * 2009 * While ptraced, there are two parents - the ptracer and 2010 * the real_parent of the group_leader. The ptracer should 2011 * know about every stop while the real parent is only 2012 * interested in the completion of group stop. The states 2013 * for the two don't interact with each other. Notify 2014 * separately unless they're gonna be duplicates. 2015 */ 2016 do_notify_parent_cldstop(current, true, why); 2017 if (gstop_done && ptrace_reparented(current)) 2018 do_notify_parent_cldstop(current, false, why); 2019 2020 /* 2021 * Don't want to allow preemption here, because 2022 * sys_ptrace() needs this task to be inactive. 2023 * 2024 * XXX: implement read_unlock_no_resched(). 2025 */ 2026 preempt_disable(); 2027 read_unlock(&tasklist_lock); 2028 preempt_enable_no_resched(); 2029 freezable_schedule(); 2030 } else { 2031 /* 2032 * By the time we got the lock, our tracer went away. 2033 * Don't drop the lock yet, another tracer may come. 2034 * 2035 * If @gstop_done, the ptracer went away between group stop 2036 * completion and here. During detach, it would have set 2037 * JOBCTL_STOP_PENDING on us and we'll re-enter 2038 * TASK_STOPPED in do_signal_stop() on return, so notifying 2039 * the real parent of the group stop completion is enough. 2040 */ 2041 if (gstop_done) 2042 do_notify_parent_cldstop(current, false, why); 2043 2044 /* tasklist protects us from ptrace_freeze_traced() */ 2045 __set_current_state(TASK_RUNNING); 2046 if (clear_code) 2047 current->exit_code = 0; 2048 read_unlock(&tasklist_lock); 2049 } 2050 2051 /* 2052 * We are back. Now reacquire the siglock before touching 2053 * last_siginfo, so that we are sure to have synchronized with 2054 * any signal-sending on another CPU that wants to examine it. 2055 */ 2056 spin_lock_irq(¤t->sighand->siglock); 2057 current->last_siginfo = NULL; 2058 2059 /* LISTENING can be set only during STOP traps, clear it */ 2060 current->jobctl &= ~JOBCTL_LISTENING; 2061 2062 /* 2063 * Queued signals ignored us while we were stopped for tracing. 2064 * So check for any that we should take before resuming user mode. 2065 * This sets TIF_SIGPENDING, but never clears it. 2066 */ 2067 recalc_sigpending_tsk(current); 2068 } 2069 2070 static void ptrace_do_notify(int signr, int exit_code, int why) 2071 { 2072 siginfo_t info; 2073 2074 clear_siginfo(&info); 2075 info.si_signo = signr; 2076 info.si_code = exit_code; 2077 info.si_pid = task_pid_vnr(current); 2078 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 2079 2080 /* Let the debugger run. */ 2081 ptrace_stop(exit_code, why, 1, &info); 2082 } 2083 2084 void ptrace_notify(int exit_code) 2085 { 2086 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 2087 if (unlikely(current->task_works)) 2088 task_work_run(); 2089 2090 spin_lock_irq(¤t->sighand->siglock); 2091 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); 2092 spin_unlock_irq(¤t->sighand->siglock); 2093 } 2094 2095 /** 2096 * do_signal_stop - handle group stop for SIGSTOP and other stop signals 2097 * @signr: signr causing group stop if initiating 2098 * 2099 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr 2100 * and participate in it. If already set, participate in the existing 2101 * group stop. If participated in a group stop (and thus slept), %true is 2102 * returned with siglock released. 2103 * 2104 * If ptraced, this function doesn't handle stop itself. Instead, 2105 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock 2106 * untouched. The caller must ensure that INTERRUPT trap handling takes 2107 * places afterwards. 2108 * 2109 * CONTEXT: 2110 * Must be called with @current->sighand->siglock held, which is released 2111 * on %true return. 2112 * 2113 * RETURNS: 2114 * %false if group stop is already cancelled or ptrace trap is scheduled. 2115 * %true if participated in group stop. 2116 */ 2117 static bool do_signal_stop(int signr) 2118 __releases(¤t->sighand->siglock) 2119 { 2120 struct signal_struct *sig = current->signal; 2121 2122 if (!(current->jobctl & JOBCTL_STOP_PENDING)) { 2123 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; 2124 struct task_struct *t; 2125 2126 /* signr will be recorded in task->jobctl for retries */ 2127 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); 2128 2129 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || 2130 unlikely(signal_group_exit(sig))) 2131 return false; 2132 /* 2133 * There is no group stop already in progress. We must 2134 * initiate one now. 2135 * 2136 * While ptraced, a task may be resumed while group stop is 2137 * still in effect and then receive a stop signal and 2138 * initiate another group stop. This deviates from the 2139 * usual behavior as two consecutive stop signals can't 2140 * cause two group stops when !ptraced. That is why we 2141 * also check !task_is_stopped(t) below. 2142 * 2143 * The condition can be distinguished by testing whether 2144 * SIGNAL_STOP_STOPPED is already set. Don't generate 2145 * group_exit_code in such case. 2146 * 2147 * This is not necessary for SIGNAL_STOP_CONTINUED because 2148 * an intervening stop signal is required to cause two 2149 * continued events regardless of ptrace. 2150 */ 2151 if (!(sig->flags & SIGNAL_STOP_STOPPED)) 2152 sig->group_exit_code = signr; 2153 2154 sig->group_stop_count = 0; 2155 2156 if (task_set_jobctl_pending(current, signr | gstop)) 2157 sig->group_stop_count++; 2158 2159 t = current; 2160 while_each_thread(current, t) { 2161 /* 2162 * Setting state to TASK_STOPPED for a group 2163 * stop is always done with the siglock held, 2164 * so this check has no races. 2165 */ 2166 if (!task_is_stopped(t) && 2167 task_set_jobctl_pending(t, signr | gstop)) { 2168 sig->group_stop_count++; 2169 if (likely(!(t->ptrace & PT_SEIZED))) 2170 signal_wake_up(t, 0); 2171 else 2172 ptrace_trap_notify(t); 2173 } 2174 } 2175 } 2176 2177 if (likely(!current->ptrace)) { 2178 int notify = 0; 2179 2180 /* 2181 * If there are no other threads in the group, or if there 2182 * is a group stop in progress and we are the last to stop, 2183 * report to the parent. 2184 */ 2185 if (task_participate_group_stop(current)) 2186 notify = CLD_STOPPED; 2187 2188 set_special_state(TASK_STOPPED); 2189 spin_unlock_irq(¤t->sighand->siglock); 2190 2191 /* 2192 * Notify the parent of the group stop completion. Because 2193 * we're not holding either the siglock or tasklist_lock 2194 * here, ptracer may attach inbetween; however, this is for 2195 * group stop and should always be delivered to the real 2196 * parent of the group leader. The new ptracer will get 2197 * its notification when this task transitions into 2198 * TASK_TRACED. 2199 */ 2200 if (notify) { 2201 read_lock(&tasklist_lock); 2202 do_notify_parent_cldstop(current, false, notify); 2203 read_unlock(&tasklist_lock); 2204 } 2205 2206 /* Now we don't run again until woken by SIGCONT or SIGKILL */ 2207 freezable_schedule(); 2208 return true; 2209 } else { 2210 /* 2211 * While ptraced, group stop is handled by STOP trap. 2212 * Schedule it and let the caller deal with it. 2213 */ 2214 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); 2215 return false; 2216 } 2217 } 2218 2219 /** 2220 * do_jobctl_trap - take care of ptrace jobctl traps 2221 * 2222 * When PT_SEIZED, it's used for both group stop and explicit 2223 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with 2224 * accompanying siginfo. If stopped, lower eight bits of exit_code contain 2225 * the stop signal; otherwise, %SIGTRAP. 2226 * 2227 * When !PT_SEIZED, it's used only for group stop trap with stop signal 2228 * number as exit_code and no siginfo. 2229 * 2230 * CONTEXT: 2231 * Must be called with @current->sighand->siglock held, which may be 2232 * released and re-acquired before returning with intervening sleep. 2233 */ 2234 static void do_jobctl_trap(void) 2235 { 2236 struct signal_struct *signal = current->signal; 2237 int signr = current->jobctl & JOBCTL_STOP_SIGMASK; 2238 2239 if (current->ptrace & PT_SEIZED) { 2240 if (!signal->group_stop_count && 2241 !(signal->flags & SIGNAL_STOP_STOPPED)) 2242 signr = SIGTRAP; 2243 WARN_ON_ONCE(!signr); 2244 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), 2245 CLD_STOPPED); 2246 } else { 2247 WARN_ON_ONCE(!signr); 2248 ptrace_stop(signr, CLD_STOPPED, 0, NULL); 2249 current->exit_code = 0; 2250 } 2251 } 2252 2253 static int ptrace_signal(int signr, siginfo_t *info) 2254 { 2255 /* 2256 * We do not check sig_kernel_stop(signr) but set this marker 2257 * unconditionally because we do not know whether debugger will 2258 * change signr. This flag has no meaning unless we are going 2259 * to stop after return from ptrace_stop(). In this case it will 2260 * be checked in do_signal_stop(), we should only stop if it was 2261 * not cleared by SIGCONT while we were sleeping. See also the 2262 * comment in dequeue_signal(). 2263 */ 2264 current->jobctl |= JOBCTL_STOP_DEQUEUED; 2265 ptrace_stop(signr, CLD_TRAPPED, 0, info); 2266 2267 /* We're back. Did the debugger cancel the sig? */ 2268 signr = current->exit_code; 2269 if (signr == 0) 2270 return signr; 2271 2272 current->exit_code = 0; 2273 2274 /* 2275 * Update the siginfo structure if the signal has 2276 * changed. If the debugger wanted something 2277 * specific in the siginfo structure then it should 2278 * have updated *info via PTRACE_SETSIGINFO. 2279 */ 2280 if (signr != info->si_signo) { 2281 clear_siginfo(info); 2282 info->si_signo = signr; 2283 info->si_errno = 0; 2284 info->si_code = SI_USER; 2285 rcu_read_lock(); 2286 info->si_pid = task_pid_vnr(current->parent); 2287 info->si_uid = from_kuid_munged(current_user_ns(), 2288 task_uid(current->parent)); 2289 rcu_read_unlock(); 2290 } 2291 2292 /* If the (new) signal is now blocked, requeue it. */ 2293 if (sigismember(¤t->blocked, signr)) { 2294 specific_send_sig_info(signr, info, current); 2295 signr = 0; 2296 } 2297 2298 return signr; 2299 } 2300 2301 int get_signal(struct ksignal *ksig) 2302 { 2303 struct sighand_struct *sighand = current->sighand; 2304 struct signal_struct *signal = current->signal; 2305 int signr; 2306 2307 if (unlikely(current->task_works)) 2308 task_work_run(); 2309 2310 if (unlikely(uprobe_deny_signal())) 2311 return 0; 2312 2313 /* 2314 * Do this once, we can't return to user-mode if freezing() == T. 2315 * do_signal_stop() and ptrace_stop() do freezable_schedule() and 2316 * thus do not need another check after return. 2317 */ 2318 try_to_freeze(); 2319 2320 relock: 2321 spin_lock_irq(&sighand->siglock); 2322 /* 2323 * Every stopped thread goes here after wakeup. Check to see if 2324 * we should notify the parent, prepare_signal(SIGCONT) encodes 2325 * the CLD_ si_code into SIGNAL_CLD_MASK bits. 2326 */ 2327 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { 2328 int why; 2329 2330 if (signal->flags & SIGNAL_CLD_CONTINUED) 2331 why = CLD_CONTINUED; 2332 else 2333 why = CLD_STOPPED; 2334 2335 signal->flags &= ~SIGNAL_CLD_MASK; 2336 2337 spin_unlock_irq(&sighand->siglock); 2338 2339 /* 2340 * Notify the parent that we're continuing. This event is 2341 * always per-process and doesn't make whole lot of sense 2342 * for ptracers, who shouldn't consume the state via 2343 * wait(2) either, but, for backward compatibility, notify 2344 * the ptracer of the group leader too unless it's gonna be 2345 * a duplicate. 2346 */ 2347 read_lock(&tasklist_lock); 2348 do_notify_parent_cldstop(current, false, why); 2349 2350 if (ptrace_reparented(current->group_leader)) 2351 do_notify_parent_cldstop(current->group_leader, 2352 true, why); 2353 read_unlock(&tasklist_lock); 2354 2355 goto relock; 2356 } 2357 2358 for (;;) { 2359 struct k_sigaction *ka; 2360 2361 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && 2362 do_signal_stop(0)) 2363 goto relock; 2364 2365 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { 2366 do_jobctl_trap(); 2367 spin_unlock_irq(&sighand->siglock); 2368 goto relock; 2369 } 2370 2371 signr = dequeue_signal(current, ¤t->blocked, &ksig->info); 2372 2373 if (!signr) 2374 break; /* will return 0 */ 2375 2376 if (unlikely(current->ptrace) && signr != SIGKILL) { 2377 signr = ptrace_signal(signr, &ksig->info); 2378 if (!signr) 2379 continue; 2380 } 2381 2382 ka = &sighand->action[signr-1]; 2383 2384 /* Trace actually delivered signals. */ 2385 trace_signal_deliver(signr, &ksig->info, ka); 2386 2387 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 2388 continue; 2389 if (ka->sa.sa_handler != SIG_DFL) { 2390 /* Run the handler. */ 2391 ksig->ka = *ka; 2392 2393 if (ka->sa.sa_flags & SA_ONESHOT) 2394 ka->sa.sa_handler = SIG_DFL; 2395 2396 break; /* will return non-zero "signr" value */ 2397 } 2398 2399 /* 2400 * Now we are doing the default action for this signal. 2401 */ 2402 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 2403 continue; 2404 2405 /* 2406 * Global init gets no signals it doesn't want. 2407 * Container-init gets no signals it doesn't want from same 2408 * container. 2409 * 2410 * Note that if global/container-init sees a sig_kernel_only() 2411 * signal here, the signal must have been generated internally 2412 * or must have come from an ancestor namespace. In either 2413 * case, the signal cannot be dropped. 2414 */ 2415 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && 2416 !sig_kernel_only(signr)) 2417 continue; 2418 2419 if (sig_kernel_stop(signr)) { 2420 /* 2421 * The default action is to stop all threads in 2422 * the thread group. The job control signals 2423 * do nothing in an orphaned pgrp, but SIGSTOP 2424 * always works. Note that siglock needs to be 2425 * dropped during the call to is_orphaned_pgrp() 2426 * because of lock ordering with tasklist_lock. 2427 * This allows an intervening SIGCONT to be posted. 2428 * We need to check for that and bail out if necessary. 2429 */ 2430 if (signr != SIGSTOP) { 2431 spin_unlock_irq(&sighand->siglock); 2432 2433 /* signals can be posted during this window */ 2434 2435 if (is_current_pgrp_orphaned()) 2436 goto relock; 2437 2438 spin_lock_irq(&sighand->siglock); 2439 } 2440 2441 if (likely(do_signal_stop(ksig->info.si_signo))) { 2442 /* It released the siglock. */ 2443 goto relock; 2444 } 2445 2446 /* 2447 * We didn't actually stop, due to a race 2448 * with SIGCONT or something like that. 2449 */ 2450 continue; 2451 } 2452 2453 spin_unlock_irq(&sighand->siglock); 2454 2455 /* 2456 * Anything else is fatal, maybe with a core dump. 2457 */ 2458 current->flags |= PF_SIGNALED; 2459 2460 if (sig_kernel_coredump(signr)) { 2461 if (print_fatal_signals) 2462 print_fatal_signal(ksig->info.si_signo); 2463 proc_coredump_connector(current); 2464 /* 2465 * If it was able to dump core, this kills all 2466 * other threads in the group and synchronizes with 2467 * their demise. If we lost the race with another 2468 * thread getting here, it set group_exit_code 2469 * first and our do_group_exit call below will use 2470 * that value and ignore the one we pass it. 2471 */ 2472 do_coredump(&ksig->info); 2473 } 2474 2475 /* 2476 * Death signals, no core dump. 2477 */ 2478 do_group_exit(ksig->info.si_signo); 2479 /* NOTREACHED */ 2480 } 2481 spin_unlock_irq(&sighand->siglock); 2482 2483 ksig->sig = signr; 2484 return ksig->sig > 0; 2485 } 2486 2487 /** 2488 * signal_delivered - 2489 * @ksig: kernel signal struct 2490 * @stepping: nonzero if debugger single-step or block-step in use 2491 * 2492 * This function should be called when a signal has successfully been 2493 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask 2494 * is always blocked, and the signal itself is blocked unless %SA_NODEFER 2495 * is set in @ksig->ka.sa.sa_flags. Tracing is notified. 2496 */ 2497 static void signal_delivered(struct ksignal *ksig, int stepping) 2498 { 2499 sigset_t blocked; 2500 2501 /* A signal was successfully delivered, and the 2502 saved sigmask was stored on the signal frame, 2503 and will be restored by sigreturn. So we can 2504 simply clear the restore sigmask flag. */ 2505 clear_restore_sigmask(); 2506 2507 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); 2508 if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) 2509 sigaddset(&blocked, ksig->sig); 2510 set_current_blocked(&blocked); 2511 tracehook_signal_handler(stepping); 2512 } 2513 2514 void signal_setup_done(int failed, struct ksignal *ksig, int stepping) 2515 { 2516 if (failed) 2517 force_sigsegv(ksig->sig, current); 2518 else 2519 signal_delivered(ksig, stepping); 2520 } 2521 2522 /* 2523 * It could be that complete_signal() picked us to notify about the 2524 * group-wide signal. Other threads should be notified now to take 2525 * the shared signals in @which since we will not. 2526 */ 2527 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) 2528 { 2529 sigset_t retarget; 2530 struct task_struct *t; 2531 2532 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); 2533 if (sigisemptyset(&retarget)) 2534 return; 2535 2536 t = tsk; 2537 while_each_thread(tsk, t) { 2538 if (t->flags & PF_EXITING) 2539 continue; 2540 2541 if (!has_pending_signals(&retarget, &t->blocked)) 2542 continue; 2543 /* Remove the signals this thread can handle. */ 2544 sigandsets(&retarget, &retarget, &t->blocked); 2545 2546 if (!signal_pending(t)) 2547 signal_wake_up(t, 0); 2548 2549 if (sigisemptyset(&retarget)) 2550 break; 2551 } 2552 } 2553 2554 void exit_signals(struct task_struct *tsk) 2555 { 2556 int group_stop = 0; 2557 sigset_t unblocked; 2558 2559 /* 2560 * @tsk is about to have PF_EXITING set - lock out users which 2561 * expect stable threadgroup. 2562 */ 2563 cgroup_threadgroup_change_begin(tsk); 2564 2565 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { 2566 tsk->flags |= PF_EXITING; 2567 cgroup_threadgroup_change_end(tsk); 2568 return; 2569 } 2570 2571 spin_lock_irq(&tsk->sighand->siglock); 2572 /* 2573 * From now this task is not visible for group-wide signals, 2574 * see wants_signal(), do_signal_stop(). 2575 */ 2576 tsk->flags |= PF_EXITING; 2577 2578 cgroup_threadgroup_change_end(tsk); 2579 2580 if (!signal_pending(tsk)) 2581 goto out; 2582 2583 unblocked = tsk->blocked; 2584 signotset(&unblocked); 2585 retarget_shared_pending(tsk, &unblocked); 2586 2587 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && 2588 task_participate_group_stop(tsk)) 2589 group_stop = CLD_STOPPED; 2590 out: 2591 spin_unlock_irq(&tsk->sighand->siglock); 2592 2593 /* 2594 * If group stop has completed, deliver the notification. This 2595 * should always go to the real parent of the group leader. 2596 */ 2597 if (unlikely(group_stop)) { 2598 read_lock(&tasklist_lock); 2599 do_notify_parent_cldstop(tsk, false, group_stop); 2600 read_unlock(&tasklist_lock); 2601 } 2602 } 2603 2604 EXPORT_SYMBOL(recalc_sigpending); 2605 EXPORT_SYMBOL_GPL(dequeue_signal); 2606 EXPORT_SYMBOL(flush_signals); 2607 EXPORT_SYMBOL(force_sig); 2608 EXPORT_SYMBOL(send_sig); 2609 EXPORT_SYMBOL(send_sig_info); 2610 EXPORT_SYMBOL(sigprocmask); 2611 2612 /* 2613 * System call entry points. 2614 */ 2615 2616 /** 2617 * sys_restart_syscall - restart a system call 2618 */ 2619 SYSCALL_DEFINE0(restart_syscall) 2620 { 2621 struct restart_block *restart = ¤t->restart_block; 2622 return restart->fn(restart); 2623 } 2624 2625 long do_no_restart_syscall(struct restart_block *param) 2626 { 2627 return -EINTR; 2628 } 2629 2630 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) 2631 { 2632 if (signal_pending(tsk) && !thread_group_empty(tsk)) { 2633 sigset_t newblocked; 2634 /* A set of now blocked but previously unblocked signals. */ 2635 sigandnsets(&newblocked, newset, ¤t->blocked); 2636 retarget_shared_pending(tsk, &newblocked); 2637 } 2638 tsk->blocked = *newset; 2639 recalc_sigpending(); 2640 } 2641 2642 /** 2643 * set_current_blocked - change current->blocked mask 2644 * @newset: new mask 2645 * 2646 * It is wrong to change ->blocked directly, this helper should be used 2647 * to ensure the process can't miss a shared signal we are going to block. 2648 */ 2649 void set_current_blocked(sigset_t *newset) 2650 { 2651 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); 2652 __set_current_blocked(newset); 2653 } 2654 2655 void __set_current_blocked(const sigset_t *newset) 2656 { 2657 struct task_struct *tsk = current; 2658 2659 /* 2660 * In case the signal mask hasn't changed, there is nothing we need 2661 * to do. The current->blocked shouldn't be modified by other task. 2662 */ 2663 if (sigequalsets(&tsk->blocked, newset)) 2664 return; 2665 2666 spin_lock_irq(&tsk->sighand->siglock); 2667 __set_task_blocked(tsk, newset); 2668 spin_unlock_irq(&tsk->sighand->siglock); 2669 } 2670 2671 /* 2672 * This is also useful for kernel threads that want to temporarily 2673 * (or permanently) block certain signals. 2674 * 2675 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 2676 * interface happily blocks "unblockable" signals like SIGKILL 2677 * and friends. 2678 */ 2679 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 2680 { 2681 struct task_struct *tsk = current; 2682 sigset_t newset; 2683 2684 /* Lockless, only current can change ->blocked, never from irq */ 2685 if (oldset) 2686 *oldset = tsk->blocked; 2687 2688 switch (how) { 2689 case SIG_BLOCK: 2690 sigorsets(&newset, &tsk->blocked, set); 2691 break; 2692 case SIG_UNBLOCK: 2693 sigandnsets(&newset, &tsk->blocked, set); 2694 break; 2695 case SIG_SETMASK: 2696 newset = *set; 2697 break; 2698 default: 2699 return -EINVAL; 2700 } 2701 2702 __set_current_blocked(&newset); 2703 return 0; 2704 } 2705 2706 /** 2707 * sys_rt_sigprocmask - change the list of currently blocked signals 2708 * @how: whether to add, remove, or set signals 2709 * @nset: stores pending signals 2710 * @oset: previous value of signal mask if non-null 2711 * @sigsetsize: size of sigset_t type 2712 */ 2713 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, 2714 sigset_t __user *, oset, size_t, sigsetsize) 2715 { 2716 sigset_t old_set, new_set; 2717 int error; 2718 2719 /* XXX: Don't preclude handling different sized sigset_t's. */ 2720 if (sigsetsize != sizeof(sigset_t)) 2721 return -EINVAL; 2722 2723 old_set = current->blocked; 2724 2725 if (nset) { 2726 if (copy_from_user(&new_set, nset, sizeof(sigset_t))) 2727 return -EFAULT; 2728 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2729 2730 error = sigprocmask(how, &new_set, NULL); 2731 if (error) 2732 return error; 2733 } 2734 2735 if (oset) { 2736 if (copy_to_user(oset, &old_set, sizeof(sigset_t))) 2737 return -EFAULT; 2738 } 2739 2740 return 0; 2741 } 2742 2743 #ifdef CONFIG_COMPAT 2744 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, 2745 compat_sigset_t __user *, oset, compat_size_t, sigsetsize) 2746 { 2747 sigset_t old_set = current->blocked; 2748 2749 /* XXX: Don't preclude handling different sized sigset_t's. */ 2750 if (sigsetsize != sizeof(sigset_t)) 2751 return -EINVAL; 2752 2753 if (nset) { 2754 sigset_t new_set; 2755 int error; 2756 if (get_compat_sigset(&new_set, nset)) 2757 return -EFAULT; 2758 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2759 2760 error = sigprocmask(how, &new_set, NULL); 2761 if (error) 2762 return error; 2763 } 2764 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0; 2765 } 2766 #endif 2767 2768 static int do_sigpending(sigset_t *set) 2769 { 2770 spin_lock_irq(¤t->sighand->siglock); 2771 sigorsets(set, ¤t->pending.signal, 2772 ¤t->signal->shared_pending.signal); 2773 spin_unlock_irq(¤t->sighand->siglock); 2774 2775 /* Outside the lock because only this thread touches it. */ 2776 sigandsets(set, ¤t->blocked, set); 2777 return 0; 2778 } 2779 2780 /** 2781 * sys_rt_sigpending - examine a pending signal that has been raised 2782 * while blocked 2783 * @uset: stores pending signals 2784 * @sigsetsize: size of sigset_t type or larger 2785 */ 2786 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) 2787 { 2788 sigset_t set; 2789 int err; 2790 2791 if (sigsetsize > sizeof(*uset)) 2792 return -EINVAL; 2793 2794 err = do_sigpending(&set); 2795 if (!err && copy_to_user(uset, &set, sigsetsize)) 2796 err = -EFAULT; 2797 return err; 2798 } 2799 2800 #ifdef CONFIG_COMPAT 2801 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, 2802 compat_size_t, sigsetsize) 2803 { 2804 sigset_t set; 2805 int err; 2806 2807 if (sigsetsize > sizeof(*uset)) 2808 return -EINVAL; 2809 2810 err = do_sigpending(&set); 2811 if (!err) 2812 err = put_compat_sigset(uset, &set, sigsetsize); 2813 return err; 2814 } 2815 #endif 2816 2817 enum siginfo_layout siginfo_layout(int sig, int si_code) 2818 { 2819 enum siginfo_layout layout = SIL_KILL; 2820 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) { 2821 static const struct { 2822 unsigned char limit, layout; 2823 } filter[] = { 2824 [SIGILL] = { NSIGILL, SIL_FAULT }, 2825 [SIGFPE] = { NSIGFPE, SIL_FAULT }, 2826 [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, 2827 [SIGBUS] = { NSIGBUS, SIL_FAULT }, 2828 [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, 2829 #if defined(SIGEMT) && defined(NSIGEMT) 2830 [SIGEMT] = { NSIGEMT, SIL_FAULT }, 2831 #endif 2832 [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, 2833 [SIGPOLL] = { NSIGPOLL, SIL_POLL }, 2834 [SIGSYS] = { NSIGSYS, SIL_SYS }, 2835 }; 2836 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) { 2837 layout = filter[sig].layout; 2838 /* Handle the exceptions */ 2839 if ((sig == SIGBUS) && 2840 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO)) 2841 layout = SIL_FAULT_MCEERR; 2842 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR)) 2843 layout = SIL_FAULT_BNDERR; 2844 #ifdef SEGV_PKUERR 2845 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR)) 2846 layout = SIL_FAULT_PKUERR; 2847 #endif 2848 } 2849 else if (si_code <= NSIGPOLL) 2850 layout = SIL_POLL; 2851 } else { 2852 if (si_code == SI_TIMER) 2853 layout = SIL_TIMER; 2854 else if (si_code == SI_SIGIO) 2855 layout = SIL_POLL; 2856 else if (si_code < 0) 2857 layout = SIL_RT; 2858 } 2859 return layout; 2860 } 2861 2862 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from) 2863 { 2864 if (copy_to_user(to, from , sizeof(struct siginfo))) 2865 return -EFAULT; 2866 return 0; 2867 } 2868 2869 #ifdef CONFIG_COMPAT 2870 int copy_siginfo_to_user32(struct compat_siginfo __user *to, 2871 const struct siginfo *from) 2872 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) 2873 { 2874 return __copy_siginfo_to_user32(to, from, in_x32_syscall()); 2875 } 2876 int __copy_siginfo_to_user32(struct compat_siginfo __user *to, 2877 const struct siginfo *from, bool x32_ABI) 2878 #endif 2879 { 2880 struct compat_siginfo new; 2881 memset(&new, 0, sizeof(new)); 2882 2883 new.si_signo = from->si_signo; 2884 new.si_errno = from->si_errno; 2885 new.si_code = from->si_code; 2886 switch(siginfo_layout(from->si_signo, from->si_code)) { 2887 case SIL_KILL: 2888 new.si_pid = from->si_pid; 2889 new.si_uid = from->si_uid; 2890 break; 2891 case SIL_TIMER: 2892 new.si_tid = from->si_tid; 2893 new.si_overrun = from->si_overrun; 2894 new.si_int = from->si_int; 2895 break; 2896 case SIL_POLL: 2897 new.si_band = from->si_band; 2898 new.si_fd = from->si_fd; 2899 break; 2900 case SIL_FAULT: 2901 new.si_addr = ptr_to_compat(from->si_addr); 2902 #ifdef __ARCH_SI_TRAPNO 2903 new.si_trapno = from->si_trapno; 2904 #endif 2905 break; 2906 case SIL_FAULT_MCEERR: 2907 new.si_addr = ptr_to_compat(from->si_addr); 2908 #ifdef __ARCH_SI_TRAPNO 2909 new.si_trapno = from->si_trapno; 2910 #endif 2911 new.si_addr_lsb = from->si_addr_lsb; 2912 break; 2913 case SIL_FAULT_BNDERR: 2914 new.si_addr = ptr_to_compat(from->si_addr); 2915 #ifdef __ARCH_SI_TRAPNO 2916 new.si_trapno = from->si_trapno; 2917 #endif 2918 new.si_lower = ptr_to_compat(from->si_lower); 2919 new.si_upper = ptr_to_compat(from->si_upper); 2920 break; 2921 case SIL_FAULT_PKUERR: 2922 new.si_addr = ptr_to_compat(from->si_addr); 2923 #ifdef __ARCH_SI_TRAPNO 2924 new.si_trapno = from->si_trapno; 2925 #endif 2926 new.si_pkey = from->si_pkey; 2927 break; 2928 case SIL_CHLD: 2929 new.si_pid = from->si_pid; 2930 new.si_uid = from->si_uid; 2931 new.si_status = from->si_status; 2932 #ifdef CONFIG_X86_X32_ABI 2933 if (x32_ABI) { 2934 new._sifields._sigchld_x32._utime = from->si_utime; 2935 new._sifields._sigchld_x32._stime = from->si_stime; 2936 } else 2937 #endif 2938 { 2939 new.si_utime = from->si_utime; 2940 new.si_stime = from->si_stime; 2941 } 2942 break; 2943 case SIL_RT: 2944 new.si_pid = from->si_pid; 2945 new.si_uid = from->si_uid; 2946 new.si_int = from->si_int; 2947 break; 2948 case SIL_SYS: 2949 new.si_call_addr = ptr_to_compat(from->si_call_addr); 2950 new.si_syscall = from->si_syscall; 2951 new.si_arch = from->si_arch; 2952 break; 2953 } 2954 2955 if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) 2956 return -EFAULT; 2957 2958 return 0; 2959 } 2960 2961 int copy_siginfo_from_user32(struct siginfo *to, 2962 const struct compat_siginfo __user *ufrom) 2963 { 2964 struct compat_siginfo from; 2965 2966 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) 2967 return -EFAULT; 2968 2969 clear_siginfo(to); 2970 to->si_signo = from.si_signo; 2971 to->si_errno = from.si_errno; 2972 to->si_code = from.si_code; 2973 switch(siginfo_layout(from.si_signo, from.si_code)) { 2974 case SIL_KILL: 2975 to->si_pid = from.si_pid; 2976 to->si_uid = from.si_uid; 2977 break; 2978 case SIL_TIMER: 2979 to->si_tid = from.si_tid; 2980 to->si_overrun = from.si_overrun; 2981 to->si_int = from.si_int; 2982 break; 2983 case SIL_POLL: 2984 to->si_band = from.si_band; 2985 to->si_fd = from.si_fd; 2986 break; 2987 case SIL_FAULT: 2988 to->si_addr = compat_ptr(from.si_addr); 2989 #ifdef __ARCH_SI_TRAPNO 2990 to->si_trapno = from.si_trapno; 2991 #endif 2992 break; 2993 case SIL_FAULT_MCEERR: 2994 to->si_addr = compat_ptr(from.si_addr); 2995 #ifdef __ARCH_SI_TRAPNO 2996 to->si_trapno = from.si_trapno; 2997 #endif 2998 to->si_addr_lsb = from.si_addr_lsb; 2999 break; 3000 case SIL_FAULT_BNDERR: 3001 to->si_addr = compat_ptr(from.si_addr); 3002 #ifdef __ARCH_SI_TRAPNO 3003 to->si_trapno = from.si_trapno; 3004 #endif 3005 to->si_lower = compat_ptr(from.si_lower); 3006 to->si_upper = compat_ptr(from.si_upper); 3007 break; 3008 case SIL_FAULT_PKUERR: 3009 to->si_addr = compat_ptr(from.si_addr); 3010 #ifdef __ARCH_SI_TRAPNO 3011 to->si_trapno = from.si_trapno; 3012 #endif 3013 to->si_pkey = from.si_pkey; 3014 break; 3015 case SIL_CHLD: 3016 to->si_pid = from.si_pid; 3017 to->si_uid = from.si_uid; 3018 to->si_status = from.si_status; 3019 #ifdef CONFIG_X86_X32_ABI 3020 if (in_x32_syscall()) { 3021 to->si_utime = from._sifields._sigchld_x32._utime; 3022 to->si_stime = from._sifields._sigchld_x32._stime; 3023 } else 3024 #endif 3025 { 3026 to->si_utime = from.si_utime; 3027 to->si_stime = from.si_stime; 3028 } 3029 break; 3030 case SIL_RT: 3031 to->si_pid = from.si_pid; 3032 to->si_uid = from.si_uid; 3033 to->si_int = from.si_int; 3034 break; 3035 case SIL_SYS: 3036 to->si_call_addr = compat_ptr(from.si_call_addr); 3037 to->si_syscall = from.si_syscall; 3038 to->si_arch = from.si_arch; 3039 break; 3040 } 3041 return 0; 3042 } 3043 #endif /* CONFIG_COMPAT */ 3044 3045 /** 3046 * do_sigtimedwait - wait for queued signals specified in @which 3047 * @which: queued signals to wait for 3048 * @info: if non-null, the signal's siginfo is returned here 3049 * @ts: upper bound on process time suspension 3050 */ 3051 static int do_sigtimedwait(const sigset_t *which, siginfo_t *info, 3052 const struct timespec *ts) 3053 { 3054 ktime_t *to = NULL, timeout = KTIME_MAX; 3055 struct task_struct *tsk = current; 3056 sigset_t mask = *which; 3057 int sig, ret = 0; 3058 3059 if (ts) { 3060 if (!timespec_valid(ts)) 3061 return -EINVAL; 3062 timeout = timespec_to_ktime(*ts); 3063 to = &timeout; 3064 } 3065 3066 /* 3067 * Invert the set of allowed signals to get those we want to block. 3068 */ 3069 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); 3070 signotset(&mask); 3071 3072 spin_lock_irq(&tsk->sighand->siglock); 3073 sig = dequeue_signal(tsk, &mask, info); 3074 if (!sig && timeout) { 3075 /* 3076 * None ready, temporarily unblock those we're interested 3077 * while we are sleeping in so that we'll be awakened when 3078 * they arrive. Unblocking is always fine, we can avoid 3079 * set_current_blocked(). 3080 */ 3081 tsk->real_blocked = tsk->blocked; 3082 sigandsets(&tsk->blocked, &tsk->blocked, &mask); 3083 recalc_sigpending(); 3084 spin_unlock_irq(&tsk->sighand->siglock); 3085 3086 __set_current_state(TASK_INTERRUPTIBLE); 3087 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, 3088 HRTIMER_MODE_REL); 3089 spin_lock_irq(&tsk->sighand->siglock); 3090 __set_task_blocked(tsk, &tsk->real_blocked); 3091 sigemptyset(&tsk->real_blocked); 3092 sig = dequeue_signal(tsk, &mask, info); 3093 } 3094 spin_unlock_irq(&tsk->sighand->siglock); 3095 3096 if (sig) 3097 return sig; 3098 return ret ? -EINTR : -EAGAIN; 3099 } 3100 3101 /** 3102 * sys_rt_sigtimedwait - synchronously wait for queued signals specified 3103 * in @uthese 3104 * @uthese: queued signals to wait for 3105 * @uinfo: if non-null, the signal's siginfo is returned here 3106 * @uts: upper bound on process time suspension 3107 * @sigsetsize: size of sigset_t type 3108 */ 3109 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, 3110 siginfo_t __user *, uinfo, const struct timespec __user *, uts, 3111 size_t, sigsetsize) 3112 { 3113 sigset_t these; 3114 struct timespec ts; 3115 siginfo_t info; 3116 int ret; 3117 3118 /* XXX: Don't preclude handling different sized sigset_t's. */ 3119 if (sigsetsize != sizeof(sigset_t)) 3120 return -EINVAL; 3121 3122 if (copy_from_user(&these, uthese, sizeof(these))) 3123 return -EFAULT; 3124 3125 if (uts) { 3126 if (copy_from_user(&ts, uts, sizeof(ts))) 3127 return -EFAULT; 3128 } 3129 3130 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); 3131 3132 if (ret > 0 && uinfo) { 3133 if (copy_siginfo_to_user(uinfo, &info)) 3134 ret = -EFAULT; 3135 } 3136 3137 return ret; 3138 } 3139 3140 #ifdef CONFIG_COMPAT 3141 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese, 3142 struct compat_siginfo __user *, uinfo, 3143 struct compat_timespec __user *, uts, compat_size_t, sigsetsize) 3144 { 3145 sigset_t s; 3146 struct timespec t; 3147 siginfo_t info; 3148 long ret; 3149 3150 if (sigsetsize != sizeof(sigset_t)) 3151 return -EINVAL; 3152 3153 if (get_compat_sigset(&s, uthese)) 3154 return -EFAULT; 3155 3156 if (uts) { 3157 if (compat_get_timespec(&t, uts)) 3158 return -EFAULT; 3159 } 3160 3161 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); 3162 3163 if (ret > 0 && uinfo) { 3164 if (copy_siginfo_to_user32(uinfo, &info)) 3165 ret = -EFAULT; 3166 } 3167 3168 return ret; 3169 } 3170 #endif 3171 3172 /** 3173 * sys_kill - send a signal to a process 3174 * @pid: the PID of the process 3175 * @sig: signal to be sent 3176 */ 3177 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) 3178 { 3179 struct siginfo info; 3180 3181 clear_siginfo(&info); 3182 info.si_signo = sig; 3183 info.si_errno = 0; 3184 info.si_code = SI_USER; 3185 info.si_pid = task_tgid_vnr(current); 3186 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 3187 3188 return kill_something_info(sig, &info, pid); 3189 } 3190 3191 static int 3192 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) 3193 { 3194 struct task_struct *p; 3195 int error = -ESRCH; 3196 3197 rcu_read_lock(); 3198 p = find_task_by_vpid(pid); 3199 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { 3200 error = check_kill_permission(sig, info, p); 3201 /* 3202 * The null signal is a permissions and process existence 3203 * probe. No signal is actually delivered. 3204 */ 3205 if (!error && sig) { 3206 error = do_send_sig_info(sig, info, p, false); 3207 /* 3208 * If lock_task_sighand() failed we pretend the task 3209 * dies after receiving the signal. The window is tiny, 3210 * and the signal is private anyway. 3211 */ 3212 if (unlikely(error == -ESRCH)) 3213 error = 0; 3214 } 3215 } 3216 rcu_read_unlock(); 3217 3218 return error; 3219 } 3220 3221 static int do_tkill(pid_t tgid, pid_t pid, int sig) 3222 { 3223 struct siginfo info; 3224 3225 clear_siginfo(&info); 3226 info.si_signo = sig; 3227 info.si_errno = 0; 3228 info.si_code = SI_TKILL; 3229 info.si_pid = task_tgid_vnr(current); 3230 info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); 3231 3232 return do_send_specific(tgid, pid, sig, &info); 3233 } 3234 3235 /** 3236 * sys_tgkill - send signal to one specific thread 3237 * @tgid: the thread group ID of the thread 3238 * @pid: the PID of the thread 3239 * @sig: signal to be sent 3240 * 3241 * This syscall also checks the @tgid and returns -ESRCH even if the PID 3242 * exists but it's not belonging to the target process anymore. This 3243 * method solves the problem of threads exiting and PIDs getting reused. 3244 */ 3245 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) 3246 { 3247 /* This is only valid for single tasks */ 3248 if (pid <= 0 || tgid <= 0) 3249 return -EINVAL; 3250 3251 return do_tkill(tgid, pid, sig); 3252 } 3253 3254 /** 3255 * sys_tkill - send signal to one specific task 3256 * @pid: the PID of the task 3257 * @sig: signal to be sent 3258 * 3259 * Send a signal to only one task, even if it's a CLONE_THREAD task. 3260 */ 3261 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) 3262 { 3263 /* This is only valid for single tasks */ 3264 if (pid <= 0) 3265 return -EINVAL; 3266 3267 return do_tkill(0, pid, sig); 3268 } 3269 3270 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info) 3271 { 3272 /* Not even root can pretend to send signals from the kernel. 3273 * Nor can they impersonate a kill()/tgkill(), which adds source info. 3274 */ 3275 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 3276 (task_pid_vnr(current) != pid)) 3277 return -EPERM; 3278 3279 info->si_signo = sig; 3280 3281 /* POSIX.1b doesn't mention process groups. */ 3282 return kill_proc_info(sig, info, pid); 3283 } 3284 3285 /** 3286 * sys_rt_sigqueueinfo - send signal information to a signal 3287 * @pid: the PID of the thread 3288 * @sig: signal to be sent 3289 * @uinfo: signal info to be sent 3290 */ 3291 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, 3292 siginfo_t __user *, uinfo) 3293 { 3294 siginfo_t info; 3295 if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) 3296 return -EFAULT; 3297 return do_rt_sigqueueinfo(pid, sig, &info); 3298 } 3299 3300 #ifdef CONFIG_COMPAT 3301 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, 3302 compat_pid_t, pid, 3303 int, sig, 3304 struct compat_siginfo __user *, uinfo) 3305 { 3306 siginfo_t info; 3307 int ret = copy_siginfo_from_user32(&info, uinfo); 3308 if (unlikely(ret)) 3309 return ret; 3310 return do_rt_sigqueueinfo(pid, sig, &info); 3311 } 3312 #endif 3313 3314 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) 3315 { 3316 /* This is only valid for single tasks */ 3317 if (pid <= 0 || tgid <= 0) 3318 return -EINVAL; 3319 3320 /* Not even root can pretend to send signals from the kernel. 3321 * Nor can they impersonate a kill()/tgkill(), which adds source info. 3322 */ 3323 if ((info->si_code >= 0 || info->si_code == SI_TKILL) && 3324 (task_pid_vnr(current) != pid)) 3325 return -EPERM; 3326 3327 info->si_signo = sig; 3328 3329 return do_send_specific(tgid, pid, sig, info); 3330 } 3331 3332 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, 3333 siginfo_t __user *, uinfo) 3334 { 3335 siginfo_t info; 3336 3337 if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) 3338 return -EFAULT; 3339 3340 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 3341 } 3342 3343 #ifdef CONFIG_COMPAT 3344 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, 3345 compat_pid_t, tgid, 3346 compat_pid_t, pid, 3347 int, sig, 3348 struct compat_siginfo __user *, uinfo) 3349 { 3350 siginfo_t info; 3351 3352 if (copy_siginfo_from_user32(&info, uinfo)) 3353 return -EFAULT; 3354 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 3355 } 3356 #endif 3357 3358 /* 3359 * For kthreads only, must not be used if cloned with CLONE_SIGHAND 3360 */ 3361 void kernel_sigaction(int sig, __sighandler_t action) 3362 { 3363 spin_lock_irq(¤t->sighand->siglock); 3364 current->sighand->action[sig - 1].sa.sa_handler = action; 3365 if (action == SIG_IGN) { 3366 sigset_t mask; 3367 3368 sigemptyset(&mask); 3369 sigaddset(&mask, sig); 3370 3371 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); 3372 flush_sigqueue_mask(&mask, ¤t->pending); 3373 recalc_sigpending(); 3374 } 3375 spin_unlock_irq(¤t->sighand->siglock); 3376 } 3377 EXPORT_SYMBOL(kernel_sigaction); 3378 3379 void __weak sigaction_compat_abi(struct k_sigaction *act, 3380 struct k_sigaction *oact) 3381 { 3382 } 3383 3384 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 3385 { 3386 struct task_struct *p = current, *t; 3387 struct k_sigaction *k; 3388 sigset_t mask; 3389 3390 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 3391 return -EINVAL; 3392 3393 k = &p->sighand->action[sig-1]; 3394 3395 spin_lock_irq(&p->sighand->siglock); 3396 if (oact) 3397 *oact = *k; 3398 3399 sigaction_compat_abi(act, oact); 3400 3401 if (act) { 3402 sigdelsetmask(&act->sa.sa_mask, 3403 sigmask(SIGKILL) | sigmask(SIGSTOP)); 3404 *k = *act; 3405 /* 3406 * POSIX 3.3.1.3: 3407 * "Setting a signal action to SIG_IGN for a signal that is 3408 * pending shall cause the pending signal to be discarded, 3409 * whether or not it is blocked." 3410 * 3411 * "Setting a signal action to SIG_DFL for a signal that is 3412 * pending and whose default action is to ignore the signal 3413 * (for example, SIGCHLD), shall cause the pending signal to 3414 * be discarded, whether or not it is blocked" 3415 */ 3416 if (sig_handler_ignored(sig_handler(p, sig), sig)) { 3417 sigemptyset(&mask); 3418 sigaddset(&mask, sig); 3419 flush_sigqueue_mask(&mask, &p->signal->shared_pending); 3420 for_each_thread(p, t) 3421 flush_sigqueue_mask(&mask, &t->pending); 3422 } 3423 } 3424 3425 spin_unlock_irq(&p->sighand->siglock); 3426 return 0; 3427 } 3428 3429 static int 3430 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp) 3431 { 3432 struct task_struct *t = current; 3433 3434 if (oss) { 3435 memset(oss, 0, sizeof(stack_t)); 3436 oss->ss_sp = (void __user *) t->sas_ss_sp; 3437 oss->ss_size = t->sas_ss_size; 3438 oss->ss_flags = sas_ss_flags(sp) | 3439 (current->sas_ss_flags & SS_FLAG_BITS); 3440 } 3441 3442 if (ss) { 3443 void __user *ss_sp = ss->ss_sp; 3444 size_t ss_size = ss->ss_size; 3445 unsigned ss_flags = ss->ss_flags; 3446 int ss_mode; 3447 3448 if (unlikely(on_sig_stack(sp))) 3449 return -EPERM; 3450 3451 ss_mode = ss_flags & ~SS_FLAG_BITS; 3452 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK && 3453 ss_mode != 0)) 3454 return -EINVAL; 3455 3456 if (ss_mode == SS_DISABLE) { 3457 ss_size = 0; 3458 ss_sp = NULL; 3459 } else { 3460 if (unlikely(ss_size < MINSIGSTKSZ)) 3461 return -ENOMEM; 3462 } 3463 3464 t->sas_ss_sp = (unsigned long) ss_sp; 3465 t->sas_ss_size = ss_size; 3466 t->sas_ss_flags = ss_flags; 3467 } 3468 return 0; 3469 } 3470 3471 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) 3472 { 3473 stack_t new, old; 3474 int err; 3475 if (uss && copy_from_user(&new, uss, sizeof(stack_t))) 3476 return -EFAULT; 3477 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL, 3478 current_user_stack_pointer()); 3479 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t))) 3480 err = -EFAULT; 3481 return err; 3482 } 3483 3484 int restore_altstack(const stack_t __user *uss) 3485 { 3486 stack_t new; 3487 if (copy_from_user(&new, uss, sizeof(stack_t))) 3488 return -EFAULT; 3489 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer()); 3490 /* squash all but EFAULT for now */ 3491 return 0; 3492 } 3493 3494 int __save_altstack(stack_t __user *uss, unsigned long sp) 3495 { 3496 struct task_struct *t = current; 3497 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | 3498 __put_user(t->sas_ss_flags, &uss->ss_flags) | 3499 __put_user(t->sas_ss_size, &uss->ss_size); 3500 if (err) 3501 return err; 3502 if (t->sas_ss_flags & SS_AUTODISARM) 3503 sas_ss_reset(t); 3504 return 0; 3505 } 3506 3507 #ifdef CONFIG_COMPAT 3508 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr, 3509 compat_stack_t __user *uoss_ptr) 3510 { 3511 stack_t uss, uoss; 3512 int ret; 3513 3514 if (uss_ptr) { 3515 compat_stack_t uss32; 3516 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) 3517 return -EFAULT; 3518 uss.ss_sp = compat_ptr(uss32.ss_sp); 3519 uss.ss_flags = uss32.ss_flags; 3520 uss.ss_size = uss32.ss_size; 3521 } 3522 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, 3523 compat_user_stack_pointer()); 3524 if (ret >= 0 && uoss_ptr) { 3525 compat_stack_t old; 3526 memset(&old, 0, sizeof(old)); 3527 old.ss_sp = ptr_to_compat(uoss.ss_sp); 3528 old.ss_flags = uoss.ss_flags; 3529 old.ss_size = uoss.ss_size; 3530 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t))) 3531 ret = -EFAULT; 3532 } 3533 return ret; 3534 } 3535 3536 COMPAT_SYSCALL_DEFINE2(sigaltstack, 3537 const compat_stack_t __user *, uss_ptr, 3538 compat_stack_t __user *, uoss_ptr) 3539 { 3540 return do_compat_sigaltstack(uss_ptr, uoss_ptr); 3541 } 3542 3543 int compat_restore_altstack(const compat_stack_t __user *uss) 3544 { 3545 int err = do_compat_sigaltstack(uss, NULL); 3546 /* squash all but -EFAULT for now */ 3547 return err == -EFAULT ? err : 0; 3548 } 3549 3550 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) 3551 { 3552 int err; 3553 struct task_struct *t = current; 3554 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), 3555 &uss->ss_sp) | 3556 __put_user(t->sas_ss_flags, &uss->ss_flags) | 3557 __put_user(t->sas_ss_size, &uss->ss_size); 3558 if (err) 3559 return err; 3560 if (t->sas_ss_flags & SS_AUTODISARM) 3561 sas_ss_reset(t); 3562 return 0; 3563 } 3564 #endif 3565 3566 #ifdef __ARCH_WANT_SYS_SIGPENDING 3567 3568 /** 3569 * sys_sigpending - examine pending signals 3570 * @uset: where mask of pending signal is returned 3571 */ 3572 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset) 3573 { 3574 sigset_t set; 3575 int err; 3576 3577 if (sizeof(old_sigset_t) > sizeof(*uset)) 3578 return -EINVAL; 3579 3580 err = do_sigpending(&set); 3581 if (!err && copy_to_user(uset, &set, sizeof(old_sigset_t))) 3582 err = -EFAULT; 3583 return err; 3584 } 3585 3586 #ifdef CONFIG_COMPAT 3587 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32) 3588 { 3589 sigset_t set; 3590 int err = do_sigpending(&set); 3591 if (!err) 3592 err = put_user(set.sig[0], set32); 3593 return err; 3594 } 3595 #endif 3596 3597 #endif 3598 3599 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 3600 /** 3601 * sys_sigprocmask - examine and change blocked signals 3602 * @how: whether to add, remove, or set signals 3603 * @nset: signals to add or remove (if non-null) 3604 * @oset: previous value of signal mask if non-null 3605 * 3606 * Some platforms have their own version with special arguments; 3607 * others support only sys_rt_sigprocmask. 3608 */ 3609 3610 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, 3611 old_sigset_t __user *, oset) 3612 { 3613 old_sigset_t old_set, new_set; 3614 sigset_t new_blocked; 3615 3616 old_set = current->blocked.sig[0]; 3617 3618 if (nset) { 3619 if (copy_from_user(&new_set, nset, sizeof(*nset))) 3620 return -EFAULT; 3621 3622 new_blocked = current->blocked; 3623 3624 switch (how) { 3625 case SIG_BLOCK: 3626 sigaddsetmask(&new_blocked, new_set); 3627 break; 3628 case SIG_UNBLOCK: 3629 sigdelsetmask(&new_blocked, new_set); 3630 break; 3631 case SIG_SETMASK: 3632 new_blocked.sig[0] = new_set; 3633 break; 3634 default: 3635 return -EINVAL; 3636 } 3637 3638 set_current_blocked(&new_blocked); 3639 } 3640 3641 if (oset) { 3642 if (copy_to_user(oset, &old_set, sizeof(*oset))) 3643 return -EFAULT; 3644 } 3645 3646 return 0; 3647 } 3648 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 3649 3650 #ifndef CONFIG_ODD_RT_SIGACTION 3651 /** 3652 * sys_rt_sigaction - alter an action taken by a process 3653 * @sig: signal to be sent 3654 * @act: new sigaction 3655 * @oact: used to save the previous sigaction 3656 * @sigsetsize: size of sigset_t type 3657 */ 3658 SYSCALL_DEFINE4(rt_sigaction, int, sig, 3659 const struct sigaction __user *, act, 3660 struct sigaction __user *, oact, 3661 size_t, sigsetsize) 3662 { 3663 struct k_sigaction new_sa, old_sa; 3664 int ret = -EINVAL; 3665 3666 /* XXX: Don't preclude handling different sized sigset_t's. */ 3667 if (sigsetsize != sizeof(sigset_t)) 3668 goto out; 3669 3670 if (act) { 3671 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 3672 return -EFAULT; 3673 } 3674 3675 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 3676 3677 if (!ret && oact) { 3678 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 3679 return -EFAULT; 3680 } 3681 out: 3682 return ret; 3683 } 3684 #ifdef CONFIG_COMPAT 3685 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, 3686 const struct compat_sigaction __user *, act, 3687 struct compat_sigaction __user *, oact, 3688 compat_size_t, sigsetsize) 3689 { 3690 struct k_sigaction new_ka, old_ka; 3691 #ifdef __ARCH_HAS_SA_RESTORER 3692 compat_uptr_t restorer; 3693 #endif 3694 int ret; 3695 3696 /* XXX: Don't preclude handling different sized sigset_t's. */ 3697 if (sigsetsize != sizeof(compat_sigset_t)) 3698 return -EINVAL; 3699 3700 if (act) { 3701 compat_uptr_t handler; 3702 ret = get_user(handler, &act->sa_handler); 3703 new_ka.sa.sa_handler = compat_ptr(handler); 3704 #ifdef __ARCH_HAS_SA_RESTORER 3705 ret |= get_user(restorer, &act->sa_restorer); 3706 new_ka.sa.sa_restorer = compat_ptr(restorer); 3707 #endif 3708 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask); 3709 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); 3710 if (ret) 3711 return -EFAULT; 3712 } 3713 3714 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 3715 if (!ret && oact) { 3716 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 3717 &oact->sa_handler); 3718 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask, 3719 sizeof(oact->sa_mask)); 3720 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); 3721 #ifdef __ARCH_HAS_SA_RESTORER 3722 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), 3723 &oact->sa_restorer); 3724 #endif 3725 } 3726 return ret; 3727 } 3728 #endif 3729 #endif /* !CONFIG_ODD_RT_SIGACTION */ 3730 3731 #ifdef CONFIG_OLD_SIGACTION 3732 SYSCALL_DEFINE3(sigaction, int, sig, 3733 const struct old_sigaction __user *, act, 3734 struct old_sigaction __user *, oact) 3735 { 3736 struct k_sigaction new_ka, old_ka; 3737 int ret; 3738 3739 if (act) { 3740 old_sigset_t mask; 3741 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 3742 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 3743 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || 3744 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 3745 __get_user(mask, &act->sa_mask)) 3746 return -EFAULT; 3747 #ifdef __ARCH_HAS_KA_RESTORER 3748 new_ka.ka_restorer = NULL; 3749 #endif 3750 siginitset(&new_ka.sa.sa_mask, mask); 3751 } 3752 3753 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 3754 3755 if (!ret && oact) { 3756 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 3757 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 3758 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || 3759 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 3760 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 3761 return -EFAULT; 3762 } 3763 3764 return ret; 3765 } 3766 #endif 3767 #ifdef CONFIG_COMPAT_OLD_SIGACTION 3768 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, 3769 const struct compat_old_sigaction __user *, act, 3770 struct compat_old_sigaction __user *, oact) 3771 { 3772 struct k_sigaction new_ka, old_ka; 3773 int ret; 3774 compat_old_sigset_t mask; 3775 compat_uptr_t handler, restorer; 3776 3777 if (act) { 3778 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 3779 __get_user(handler, &act->sa_handler) || 3780 __get_user(restorer, &act->sa_restorer) || 3781 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 3782 __get_user(mask, &act->sa_mask)) 3783 return -EFAULT; 3784 3785 #ifdef __ARCH_HAS_KA_RESTORER 3786 new_ka.ka_restorer = NULL; 3787 #endif 3788 new_ka.sa.sa_handler = compat_ptr(handler); 3789 new_ka.sa.sa_restorer = compat_ptr(restorer); 3790 siginitset(&new_ka.sa.sa_mask, mask); 3791 } 3792 3793 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 3794 3795 if (!ret && oact) { 3796 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 3797 __put_user(ptr_to_compat(old_ka.sa.sa_handler), 3798 &oact->sa_handler) || 3799 __put_user(ptr_to_compat(old_ka.sa.sa_restorer), 3800 &oact->sa_restorer) || 3801 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 3802 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 3803 return -EFAULT; 3804 } 3805 return ret; 3806 } 3807 #endif 3808 3809 #ifdef CONFIG_SGETMASK_SYSCALL 3810 3811 /* 3812 * For backwards compatibility. Functionality superseded by sigprocmask. 3813 */ 3814 SYSCALL_DEFINE0(sgetmask) 3815 { 3816 /* SMP safe */ 3817 return current->blocked.sig[0]; 3818 } 3819 3820 SYSCALL_DEFINE1(ssetmask, int, newmask) 3821 { 3822 int old = current->blocked.sig[0]; 3823 sigset_t newset; 3824 3825 siginitset(&newset, newmask); 3826 set_current_blocked(&newset); 3827 3828 return old; 3829 } 3830 #endif /* CONFIG_SGETMASK_SYSCALL */ 3831 3832 #ifdef __ARCH_WANT_SYS_SIGNAL 3833 /* 3834 * For backwards compatibility. Functionality superseded by sigaction. 3835 */ 3836 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) 3837 { 3838 struct k_sigaction new_sa, old_sa; 3839 int ret; 3840 3841 new_sa.sa.sa_handler = handler; 3842 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 3843 sigemptyset(&new_sa.sa.sa_mask); 3844 3845 ret = do_sigaction(sig, &new_sa, &old_sa); 3846 3847 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 3848 } 3849 #endif /* __ARCH_WANT_SYS_SIGNAL */ 3850 3851 #ifdef __ARCH_WANT_SYS_PAUSE 3852 3853 SYSCALL_DEFINE0(pause) 3854 { 3855 while (!signal_pending(current)) { 3856 __set_current_state(TASK_INTERRUPTIBLE); 3857 schedule(); 3858 } 3859 return -ERESTARTNOHAND; 3860 } 3861 3862 #endif 3863 3864 static int sigsuspend(sigset_t *set) 3865 { 3866 current->saved_sigmask = current->blocked; 3867 set_current_blocked(set); 3868 3869 while (!signal_pending(current)) { 3870 __set_current_state(TASK_INTERRUPTIBLE); 3871 schedule(); 3872 } 3873 set_restore_sigmask(); 3874 return -ERESTARTNOHAND; 3875 } 3876 3877 /** 3878 * sys_rt_sigsuspend - replace the signal mask for a value with the 3879 * @unewset value until a signal is received 3880 * @unewset: new signal mask value 3881 * @sigsetsize: size of sigset_t type 3882 */ 3883 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) 3884 { 3885 sigset_t newset; 3886 3887 /* XXX: Don't preclude handling different sized sigset_t's. */ 3888 if (sigsetsize != sizeof(sigset_t)) 3889 return -EINVAL; 3890 3891 if (copy_from_user(&newset, unewset, sizeof(newset))) 3892 return -EFAULT; 3893 return sigsuspend(&newset); 3894 } 3895 3896 #ifdef CONFIG_COMPAT 3897 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) 3898 { 3899 sigset_t newset; 3900 3901 /* XXX: Don't preclude handling different sized sigset_t's. */ 3902 if (sigsetsize != sizeof(sigset_t)) 3903 return -EINVAL; 3904 3905 if (get_compat_sigset(&newset, unewset)) 3906 return -EFAULT; 3907 return sigsuspend(&newset); 3908 } 3909 #endif 3910 3911 #ifdef CONFIG_OLD_SIGSUSPEND 3912 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) 3913 { 3914 sigset_t blocked; 3915 siginitset(&blocked, mask); 3916 return sigsuspend(&blocked); 3917 } 3918 #endif 3919 #ifdef CONFIG_OLD_SIGSUSPEND3 3920 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) 3921 { 3922 sigset_t blocked; 3923 siginitset(&blocked, mask); 3924 return sigsuspend(&blocked); 3925 } 3926 #endif 3927 3928 __weak const char *arch_vma_name(struct vm_area_struct *vma) 3929 { 3930 return NULL; 3931 } 3932 3933 void __init signals_init(void) 3934 { 3935 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */ 3936 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE 3937 != offsetof(struct siginfo, _sifields._pad)); 3938 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE); 3939 3940 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); 3941 } 3942 3943 #ifdef CONFIG_KGDB_KDB 3944 #include <linux/kdb.h> 3945 /* 3946 * kdb_send_sig - Allows kdb to send signals without exposing 3947 * signal internals. This function checks if the required locks are 3948 * available before calling the main signal code, to avoid kdb 3949 * deadlocks. 3950 */ 3951 void kdb_send_sig(struct task_struct *t, int sig) 3952 { 3953 static struct task_struct *kdb_prev_t; 3954 int new_t, ret; 3955 if (!spin_trylock(&t->sighand->siglock)) { 3956 kdb_printf("Can't do kill command now.\n" 3957 "The sigmask lock is held somewhere else in " 3958 "kernel, try again later\n"); 3959 return; 3960 } 3961 new_t = kdb_prev_t != t; 3962 kdb_prev_t = t; 3963 if (t->state != TASK_RUNNING && new_t) { 3964 spin_unlock(&t->sighand->siglock); 3965 kdb_printf("Process is not RUNNING, sending a signal from " 3966 "kdb risks deadlock\n" 3967 "on the run queue locks. " 3968 "The signal has _not_ been sent.\n" 3969 "Reissue the kill command if you want to risk " 3970 "the deadlock.\n"); 3971 return; 3972 } 3973 ret = send_signal(sig, SEND_SIG_PRIV, t, false); 3974 spin_unlock(&t->sighand->siglock); 3975 if (ret) 3976 kdb_printf("Fail to deliver Signal %d to process %d.\n", 3977 sig, t->pid); 3978 else 3979 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); 3980 } 3981 #endif /* CONFIG_KGDB_KDB */ 3982