1 /* 2 * linux/kernel/signal.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 7 * 8 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 9 * Changes to use preallocated sigqueue structures 10 * to allow signals to be sent reliably. 11 */ 12 13 #include <linux/slab.h> 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/sched.h> 17 #include <linux/fs.h> 18 #include <linux/tty.h> 19 #include <linux/binfmts.h> 20 #include <linux/security.h> 21 #include <linux/syscalls.h> 22 #include <linux/ptrace.h> 23 #include <linux/signal.h> 24 #include <linux/signalfd.h> 25 #include <linux/ratelimit.h> 26 #include <linux/tracehook.h> 27 #include <linux/capability.h> 28 #include <linux/freezer.h> 29 #include <linux/pid_namespace.h> 30 #include <linux/nsproxy.h> 31 #define CREATE_TRACE_POINTS 32 #include <trace/events/signal.h> 33 34 #include <asm/param.h> 35 #include <asm/uaccess.h> 36 #include <asm/unistd.h> 37 #include <asm/siginfo.h> 38 #include "audit.h" /* audit_signal_info() */ 39 40 /* 41 * SLAB caches for signal bits. 42 */ 43 44 static struct kmem_cache *sigqueue_cachep; 45 46 int print_fatal_signals __read_mostly; 47 48 static void __user *sig_handler(struct task_struct *t, int sig) 49 { 50 return t->sighand->action[sig - 1].sa.sa_handler; 51 } 52 53 static int sig_handler_ignored(void __user *handler, int sig) 54 { 55 /* Is it explicitly or implicitly ignored? */ 56 return handler == SIG_IGN || 57 (handler == SIG_DFL && sig_kernel_ignore(sig)); 58 } 59 60 static int sig_task_ignored(struct task_struct *t, int sig, 61 int from_ancestor_ns) 62 { 63 void __user *handler; 64 65 handler = sig_handler(t, sig); 66 67 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && 68 handler == SIG_DFL && !from_ancestor_ns) 69 return 1; 70 71 return sig_handler_ignored(handler, sig); 72 } 73 74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns) 75 { 76 /* 77 * Blocked signals are never ignored, since the 78 * signal handler may change by the time it is 79 * unblocked. 80 */ 81 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 82 return 0; 83 84 if (!sig_task_ignored(t, sig, from_ancestor_ns)) 85 return 0; 86 87 /* 88 * Tracers may want to know about even ignored signals. 89 */ 90 return !tracehook_consider_ignored_signal(t, sig); 91 } 92 93 /* 94 * Re-calculate pending state from the set of locally pending 95 * signals, globally pending signals, and blocked signals. 96 */ 97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) 98 { 99 unsigned long ready; 100 long i; 101 102 switch (_NSIG_WORDS) { 103 default: 104 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 105 ready |= signal->sig[i] &~ blocked->sig[i]; 106 break; 107 108 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 109 ready |= signal->sig[2] &~ blocked->sig[2]; 110 ready |= signal->sig[1] &~ blocked->sig[1]; 111 ready |= signal->sig[0] &~ blocked->sig[0]; 112 break; 113 114 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 115 ready |= signal->sig[0] &~ blocked->sig[0]; 116 break; 117 118 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 119 } 120 return ready != 0; 121 } 122 123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 124 125 static int recalc_sigpending_tsk(struct task_struct *t) 126 { 127 if ((t->group_stop & GROUP_STOP_PENDING) || 128 PENDING(&t->pending, &t->blocked) || 129 PENDING(&t->signal->shared_pending, &t->blocked)) { 130 set_tsk_thread_flag(t, TIF_SIGPENDING); 131 return 1; 132 } 133 /* 134 * We must never clear the flag in another thread, or in current 135 * when it's possible the current syscall is returning -ERESTART*. 136 * So we don't clear it here, and only callers who know they should do. 137 */ 138 return 0; 139 } 140 141 /* 142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. 143 * This is superfluous when called on current, the wakeup is a harmless no-op. 144 */ 145 void recalc_sigpending_and_wake(struct task_struct *t) 146 { 147 if (recalc_sigpending_tsk(t)) 148 signal_wake_up(t, 0); 149 } 150 151 void recalc_sigpending(void) 152 { 153 if (unlikely(tracehook_force_sigpending())) 154 set_thread_flag(TIF_SIGPENDING); 155 else if (!recalc_sigpending_tsk(current) && !freezing(current)) 156 clear_thread_flag(TIF_SIGPENDING); 157 158 } 159 160 /* Given the mask, find the first available signal that should be serviced. */ 161 162 #define SYNCHRONOUS_MASK \ 163 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ 164 sigmask(SIGTRAP) | sigmask(SIGFPE)) 165 166 int next_signal(struct sigpending *pending, sigset_t *mask) 167 { 168 unsigned long i, *s, *m, x; 169 int sig = 0; 170 171 s = pending->signal.sig; 172 m = mask->sig; 173 174 /* 175 * Handle the first word specially: it contains the 176 * synchronous signals that need to be dequeued first. 177 */ 178 x = *s &~ *m; 179 if (x) { 180 if (x & SYNCHRONOUS_MASK) 181 x &= SYNCHRONOUS_MASK; 182 sig = ffz(~x) + 1; 183 return sig; 184 } 185 186 switch (_NSIG_WORDS) { 187 default: 188 for (i = 1; i < _NSIG_WORDS; ++i) { 189 x = *++s &~ *++m; 190 if (!x) 191 continue; 192 sig = ffz(~x) + i*_NSIG_BPW + 1; 193 break; 194 } 195 break; 196 197 case 2: 198 x = s[1] &~ m[1]; 199 if (!x) 200 break; 201 sig = ffz(~x) + _NSIG_BPW + 1; 202 break; 203 204 case 1: 205 /* Nothing to do */ 206 break; 207 } 208 209 return sig; 210 } 211 212 static inline void print_dropped_signal(int sig) 213 { 214 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); 215 216 if (!print_fatal_signals) 217 return; 218 219 if (!__ratelimit(&ratelimit_state)) 220 return; 221 222 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", 223 current->comm, current->pid, sig); 224 } 225 226 /** 227 * task_clear_group_stop_trapping - clear group stop trapping bit 228 * @task: target task 229 * 230 * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it 231 * and wake up the ptracer. Note that we don't need any further locking. 232 * @task->siglock guarantees that @task->parent points to the ptracer. 233 * 234 * CONTEXT: 235 * Must be called with @task->sighand->siglock held. 236 */ 237 static void task_clear_group_stop_trapping(struct task_struct *task) 238 { 239 if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) { 240 task->group_stop &= ~GROUP_STOP_TRAPPING; 241 __wake_up_sync_key(&task->parent->signal->wait_chldexit, 242 TASK_UNINTERRUPTIBLE, 1, task); 243 } 244 } 245 246 /** 247 * task_clear_group_stop_pending - clear pending group stop 248 * @task: target task 249 * 250 * Clear group stop states for @task. 251 * 252 * CONTEXT: 253 * Must be called with @task->sighand->siglock held. 254 */ 255 void task_clear_group_stop_pending(struct task_struct *task) 256 { 257 task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME | 258 GROUP_STOP_DEQUEUED); 259 } 260 261 /** 262 * task_participate_group_stop - participate in a group stop 263 * @task: task participating in a group stop 264 * 265 * @task has GROUP_STOP_PENDING set and is participating in a group stop. 266 * Group stop states are cleared and the group stop count is consumed if 267 * %GROUP_STOP_CONSUME was set. If the consumption completes the group 268 * stop, the appropriate %SIGNAL_* flags are set. 269 * 270 * CONTEXT: 271 * Must be called with @task->sighand->siglock held. 272 * 273 * RETURNS: 274 * %true if group stop completion should be notified to the parent, %false 275 * otherwise. 276 */ 277 static bool task_participate_group_stop(struct task_struct *task) 278 { 279 struct signal_struct *sig = task->signal; 280 bool consume = task->group_stop & GROUP_STOP_CONSUME; 281 282 WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING)); 283 284 task_clear_group_stop_pending(task); 285 286 if (!consume) 287 return false; 288 289 if (!WARN_ON_ONCE(sig->group_stop_count == 0)) 290 sig->group_stop_count--; 291 292 /* 293 * Tell the caller to notify completion iff we are entering into a 294 * fresh group stop. Read comment in do_signal_stop() for details. 295 */ 296 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { 297 sig->flags = SIGNAL_STOP_STOPPED; 298 return true; 299 } 300 return false; 301 } 302 303 /* 304 * allocate a new signal queue record 305 * - this may be called without locks if and only if t == current, otherwise an 306 * appropriate lock must be held to stop the target task from exiting 307 */ 308 static struct sigqueue * 309 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) 310 { 311 struct sigqueue *q = NULL; 312 struct user_struct *user; 313 314 /* 315 * Protect access to @t credentials. This can go away when all 316 * callers hold rcu read lock. 317 */ 318 rcu_read_lock(); 319 user = get_uid(__task_cred(t)->user); 320 atomic_inc(&user->sigpending); 321 rcu_read_unlock(); 322 323 if (override_rlimit || 324 atomic_read(&user->sigpending) <= 325 task_rlimit(t, RLIMIT_SIGPENDING)) { 326 q = kmem_cache_alloc(sigqueue_cachep, flags); 327 } else { 328 print_dropped_signal(sig); 329 } 330 331 if (unlikely(q == NULL)) { 332 atomic_dec(&user->sigpending); 333 free_uid(user); 334 } else { 335 INIT_LIST_HEAD(&q->list); 336 q->flags = 0; 337 q->user = user; 338 } 339 340 return q; 341 } 342 343 static void __sigqueue_free(struct sigqueue *q) 344 { 345 if (q->flags & SIGQUEUE_PREALLOC) 346 return; 347 atomic_dec(&q->user->sigpending); 348 free_uid(q->user); 349 kmem_cache_free(sigqueue_cachep, q); 350 } 351 352 void flush_sigqueue(struct sigpending *queue) 353 { 354 struct sigqueue *q; 355 356 sigemptyset(&queue->signal); 357 while (!list_empty(&queue->list)) { 358 q = list_entry(queue->list.next, struct sigqueue , list); 359 list_del_init(&q->list); 360 __sigqueue_free(q); 361 } 362 } 363 364 /* 365 * Flush all pending signals for a task. 366 */ 367 void __flush_signals(struct task_struct *t) 368 { 369 clear_tsk_thread_flag(t, TIF_SIGPENDING); 370 flush_sigqueue(&t->pending); 371 flush_sigqueue(&t->signal->shared_pending); 372 } 373 374 void flush_signals(struct task_struct *t) 375 { 376 unsigned long flags; 377 378 spin_lock_irqsave(&t->sighand->siglock, flags); 379 __flush_signals(t); 380 spin_unlock_irqrestore(&t->sighand->siglock, flags); 381 } 382 383 static void __flush_itimer_signals(struct sigpending *pending) 384 { 385 sigset_t signal, retain; 386 struct sigqueue *q, *n; 387 388 signal = pending->signal; 389 sigemptyset(&retain); 390 391 list_for_each_entry_safe(q, n, &pending->list, list) { 392 int sig = q->info.si_signo; 393 394 if (likely(q->info.si_code != SI_TIMER)) { 395 sigaddset(&retain, sig); 396 } else { 397 sigdelset(&signal, sig); 398 list_del_init(&q->list); 399 __sigqueue_free(q); 400 } 401 } 402 403 sigorsets(&pending->signal, &signal, &retain); 404 } 405 406 void flush_itimer_signals(void) 407 { 408 struct task_struct *tsk = current; 409 unsigned long flags; 410 411 spin_lock_irqsave(&tsk->sighand->siglock, flags); 412 __flush_itimer_signals(&tsk->pending); 413 __flush_itimer_signals(&tsk->signal->shared_pending); 414 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 415 } 416 417 void ignore_signals(struct task_struct *t) 418 { 419 int i; 420 421 for (i = 0; i < _NSIG; ++i) 422 t->sighand->action[i].sa.sa_handler = SIG_IGN; 423 424 flush_signals(t); 425 } 426 427 /* 428 * Flush all handlers for a task. 429 */ 430 431 void 432 flush_signal_handlers(struct task_struct *t, int force_default) 433 { 434 int i; 435 struct k_sigaction *ka = &t->sighand->action[0]; 436 for (i = _NSIG ; i != 0 ; i--) { 437 if (force_default || ka->sa.sa_handler != SIG_IGN) 438 ka->sa.sa_handler = SIG_DFL; 439 ka->sa.sa_flags = 0; 440 sigemptyset(&ka->sa.sa_mask); 441 ka++; 442 } 443 } 444 445 int unhandled_signal(struct task_struct *tsk, int sig) 446 { 447 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; 448 if (is_global_init(tsk)) 449 return 1; 450 if (handler != SIG_IGN && handler != SIG_DFL) 451 return 0; 452 return !tracehook_consider_fatal_signal(tsk, sig); 453 } 454 455 /* 456 * Notify the system that a driver wants to block all signals for this 457 * process, and wants to be notified if any signals at all were to be 458 * sent/acted upon. If the notifier routine returns non-zero, then the 459 * signal will be acted upon after all. If the notifier routine returns 0, 460 * then then signal will be blocked. Only one block per process is 461 * allowed. priv is a pointer to private data that the notifier routine 462 * can use to determine if the signal should be blocked or not. 463 */ 464 void 465 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) 466 { 467 unsigned long flags; 468 469 spin_lock_irqsave(¤t->sighand->siglock, flags); 470 current->notifier_mask = mask; 471 current->notifier_data = priv; 472 current->notifier = notifier; 473 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 474 } 475 476 /* Notify the system that blocking has ended. */ 477 478 void 479 unblock_all_signals(void) 480 { 481 unsigned long flags; 482 483 spin_lock_irqsave(¤t->sighand->siglock, flags); 484 current->notifier = NULL; 485 current->notifier_data = NULL; 486 recalc_sigpending(); 487 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 488 } 489 490 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) 491 { 492 struct sigqueue *q, *first = NULL; 493 494 /* 495 * Collect the siginfo appropriate to this signal. Check if 496 * there is another siginfo for the same signal. 497 */ 498 list_for_each_entry(q, &list->list, list) { 499 if (q->info.si_signo == sig) { 500 if (first) 501 goto still_pending; 502 first = q; 503 } 504 } 505 506 sigdelset(&list->signal, sig); 507 508 if (first) { 509 still_pending: 510 list_del_init(&first->list); 511 copy_siginfo(info, &first->info); 512 __sigqueue_free(first); 513 } else { 514 /* 515 * Ok, it wasn't in the queue. This must be 516 * a fast-pathed signal or we must have been 517 * out of queue space. So zero out the info. 518 */ 519 info->si_signo = sig; 520 info->si_errno = 0; 521 info->si_code = SI_USER; 522 info->si_pid = 0; 523 info->si_uid = 0; 524 } 525 } 526 527 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 528 siginfo_t *info) 529 { 530 int sig = next_signal(pending, mask); 531 532 if (sig) { 533 if (current->notifier) { 534 if (sigismember(current->notifier_mask, sig)) { 535 if (!(current->notifier)(current->notifier_data)) { 536 clear_thread_flag(TIF_SIGPENDING); 537 return 0; 538 } 539 } 540 } 541 542 collect_signal(sig, pending, info); 543 } 544 545 return sig; 546 } 547 548 /* 549 * Dequeue a signal and return the element to the caller, which is 550 * expected to free it. 551 * 552 * All callers have to hold the siglock. 553 */ 554 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 555 { 556 int signr; 557 558 /* We only dequeue private signals from ourselves, we don't let 559 * signalfd steal them 560 */ 561 signr = __dequeue_signal(&tsk->pending, mask, info); 562 if (!signr) { 563 signr = __dequeue_signal(&tsk->signal->shared_pending, 564 mask, info); 565 /* 566 * itimer signal ? 567 * 568 * itimers are process shared and we restart periodic 569 * itimers in the signal delivery path to prevent DoS 570 * attacks in the high resolution timer case. This is 571 * compliant with the old way of self-restarting 572 * itimers, as the SIGALRM is a legacy signal and only 573 * queued once. Changing the restart behaviour to 574 * restart the timer in the signal dequeue path is 575 * reducing the timer noise on heavy loaded !highres 576 * systems too. 577 */ 578 if (unlikely(signr == SIGALRM)) { 579 struct hrtimer *tmr = &tsk->signal->real_timer; 580 581 if (!hrtimer_is_queued(tmr) && 582 tsk->signal->it_real_incr.tv64 != 0) { 583 hrtimer_forward(tmr, tmr->base->get_time(), 584 tsk->signal->it_real_incr); 585 hrtimer_restart(tmr); 586 } 587 } 588 } 589 590 recalc_sigpending(); 591 if (!signr) 592 return 0; 593 594 if (unlikely(sig_kernel_stop(signr))) { 595 /* 596 * Set a marker that we have dequeued a stop signal. Our 597 * caller might release the siglock and then the pending 598 * stop signal it is about to process is no longer in the 599 * pending bitmasks, but must still be cleared by a SIGCONT 600 * (and overruled by a SIGKILL). So those cases clear this 601 * shared flag after we've set it. Note that this flag may 602 * remain set after the signal we return is ignored or 603 * handled. That doesn't matter because its only purpose 604 * is to alert stop-signal processing code when another 605 * processor has come along and cleared the flag. 606 */ 607 current->group_stop |= GROUP_STOP_DEQUEUED; 608 } 609 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { 610 /* 611 * Release the siglock to ensure proper locking order 612 * of timer locks outside of siglocks. Note, we leave 613 * irqs disabled here, since the posix-timers code is 614 * about to disable them again anyway. 615 */ 616 spin_unlock(&tsk->sighand->siglock); 617 do_schedule_next_timer(info); 618 spin_lock(&tsk->sighand->siglock); 619 } 620 return signr; 621 } 622 623 /* 624 * Tell a process that it has a new active signal.. 625 * 626 * NOTE! we rely on the previous spin_lock to 627 * lock interrupts for us! We can only be called with 628 * "siglock" held, and the local interrupt must 629 * have been disabled when that got acquired! 630 * 631 * No need to set need_resched since signal event passing 632 * goes through ->blocked 633 */ 634 void signal_wake_up(struct task_struct *t, int resume) 635 { 636 unsigned int mask; 637 638 set_tsk_thread_flag(t, TIF_SIGPENDING); 639 640 /* 641 * For SIGKILL, we want to wake it up in the stopped/traced/killable 642 * case. We don't check t->state here because there is a race with it 643 * executing another processor and just now entering stopped state. 644 * By using wake_up_state, we ensure the process will wake up and 645 * handle its death signal. 646 */ 647 mask = TASK_INTERRUPTIBLE; 648 if (resume) 649 mask |= TASK_WAKEKILL; 650 if (!wake_up_state(t, mask)) 651 kick_process(t); 652 } 653 654 /* 655 * Remove signals in mask from the pending set and queue. 656 * Returns 1 if any signals were found. 657 * 658 * All callers must be holding the siglock. 659 * 660 * This version takes a sigset mask and looks at all signals, 661 * not just those in the first mask word. 662 */ 663 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) 664 { 665 struct sigqueue *q, *n; 666 sigset_t m; 667 668 sigandsets(&m, mask, &s->signal); 669 if (sigisemptyset(&m)) 670 return 0; 671 672 sigandnsets(&s->signal, &s->signal, mask); 673 list_for_each_entry_safe(q, n, &s->list, list) { 674 if (sigismember(mask, q->info.si_signo)) { 675 list_del_init(&q->list); 676 __sigqueue_free(q); 677 } 678 } 679 return 1; 680 } 681 /* 682 * Remove signals in mask from the pending set and queue. 683 * Returns 1 if any signals were found. 684 * 685 * All callers must be holding the siglock. 686 */ 687 static int rm_from_queue(unsigned long mask, struct sigpending *s) 688 { 689 struct sigqueue *q, *n; 690 691 if (!sigtestsetmask(&s->signal, mask)) 692 return 0; 693 694 sigdelsetmask(&s->signal, mask); 695 list_for_each_entry_safe(q, n, &s->list, list) { 696 if (q->info.si_signo < SIGRTMIN && 697 (mask & sigmask(q->info.si_signo))) { 698 list_del_init(&q->list); 699 __sigqueue_free(q); 700 } 701 } 702 return 1; 703 } 704 705 static inline int is_si_special(const struct siginfo *info) 706 { 707 return info <= SEND_SIG_FORCED; 708 } 709 710 static inline bool si_fromuser(const struct siginfo *info) 711 { 712 return info == SEND_SIG_NOINFO || 713 (!is_si_special(info) && SI_FROMUSER(info)); 714 } 715 716 /* 717 * called with RCU read lock from check_kill_permission() 718 */ 719 static int kill_ok_by_cred(struct task_struct *t) 720 { 721 const struct cred *cred = current_cred(); 722 const struct cred *tcred = __task_cred(t); 723 724 if (cred->user->user_ns == tcred->user->user_ns && 725 (cred->euid == tcred->suid || 726 cred->euid == tcred->uid || 727 cred->uid == tcred->suid || 728 cred->uid == tcred->uid)) 729 return 1; 730 731 if (ns_capable(tcred->user->user_ns, CAP_KILL)) 732 return 1; 733 734 return 0; 735 } 736 737 /* 738 * Bad permissions for sending the signal 739 * - the caller must hold the RCU read lock 740 */ 741 static int check_kill_permission(int sig, struct siginfo *info, 742 struct task_struct *t) 743 { 744 struct pid *sid; 745 int error; 746 747 if (!valid_signal(sig)) 748 return -EINVAL; 749 750 if (!si_fromuser(info)) 751 return 0; 752 753 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 754 if (error) 755 return error; 756 757 if (!same_thread_group(current, t) && 758 !kill_ok_by_cred(t)) { 759 switch (sig) { 760 case SIGCONT: 761 sid = task_session(t); 762 /* 763 * We don't return the error if sid == NULL. The 764 * task was unhashed, the caller must notice this. 765 */ 766 if (!sid || sid == task_session(current)) 767 break; 768 default: 769 return -EPERM; 770 } 771 } 772 773 return security_task_kill(t, info, sig, 0); 774 } 775 776 /* 777 * Handle magic process-wide effects of stop/continue signals. Unlike 778 * the signal actions, these happen immediately at signal-generation 779 * time regardless of blocking, ignoring, or handling. This does the 780 * actual continuing for SIGCONT, but not the actual stopping for stop 781 * signals. The process stop is done as a signal action for SIG_DFL. 782 * 783 * Returns true if the signal should be actually delivered, otherwise 784 * it should be dropped. 785 */ 786 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) 787 { 788 struct signal_struct *signal = p->signal; 789 struct task_struct *t; 790 791 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { 792 /* 793 * The process is in the middle of dying, nothing to do. 794 */ 795 } else if (sig_kernel_stop(sig)) { 796 /* 797 * This is a stop signal. Remove SIGCONT from all queues. 798 */ 799 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); 800 t = p; 801 do { 802 rm_from_queue(sigmask(SIGCONT), &t->pending); 803 } while_each_thread(p, t); 804 } else if (sig == SIGCONT) { 805 unsigned int why; 806 /* 807 * Remove all stop signals from all queues, wake all threads. 808 */ 809 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); 810 t = p; 811 do { 812 task_clear_group_stop_pending(t); 813 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); 814 wake_up_state(t, __TASK_STOPPED); 815 } while_each_thread(p, t); 816 817 /* 818 * Notify the parent with CLD_CONTINUED if we were stopped. 819 * 820 * If we were in the middle of a group stop, we pretend it 821 * was already finished, and then continued. Since SIGCHLD 822 * doesn't queue we report only CLD_STOPPED, as if the next 823 * CLD_CONTINUED was dropped. 824 */ 825 why = 0; 826 if (signal->flags & SIGNAL_STOP_STOPPED) 827 why |= SIGNAL_CLD_CONTINUED; 828 else if (signal->group_stop_count) 829 why |= SIGNAL_CLD_STOPPED; 830 831 if (why) { 832 /* 833 * The first thread which returns from do_signal_stop() 834 * will take ->siglock, notice SIGNAL_CLD_MASK, and 835 * notify its parent. See get_signal_to_deliver(). 836 */ 837 signal->flags = why | SIGNAL_STOP_CONTINUED; 838 signal->group_stop_count = 0; 839 signal->group_exit_code = 0; 840 } 841 } 842 843 return !sig_ignored(p, sig, from_ancestor_ns); 844 } 845 846 /* 847 * Test if P wants to take SIG. After we've checked all threads with this, 848 * it's equivalent to finding no threads not blocking SIG. Any threads not 849 * blocking SIG were ruled out because they are not running and already 850 * have pending signals. Such threads will dequeue from the shared queue 851 * as soon as they're available, so putting the signal on the shared queue 852 * will be equivalent to sending it to one such thread. 853 */ 854 static inline int wants_signal(int sig, struct task_struct *p) 855 { 856 if (sigismember(&p->blocked, sig)) 857 return 0; 858 if (p->flags & PF_EXITING) 859 return 0; 860 if (sig == SIGKILL) 861 return 1; 862 if (task_is_stopped_or_traced(p)) 863 return 0; 864 return task_curr(p) || !signal_pending(p); 865 } 866 867 static void complete_signal(int sig, struct task_struct *p, int group) 868 { 869 struct signal_struct *signal = p->signal; 870 struct task_struct *t; 871 872 /* 873 * Now find a thread we can wake up to take the signal off the queue. 874 * 875 * If the main thread wants the signal, it gets first crack. 876 * Probably the least surprising to the average bear. 877 */ 878 if (wants_signal(sig, p)) 879 t = p; 880 else if (!group || thread_group_empty(p)) 881 /* 882 * There is just one thread and it does not need to be woken. 883 * It will dequeue unblocked signals before it runs again. 884 */ 885 return; 886 else { 887 /* 888 * Otherwise try to find a suitable thread. 889 */ 890 t = signal->curr_target; 891 while (!wants_signal(sig, t)) { 892 t = next_thread(t); 893 if (t == signal->curr_target) 894 /* 895 * No thread needs to be woken. 896 * Any eligible threads will see 897 * the signal in the queue soon. 898 */ 899 return; 900 } 901 signal->curr_target = t; 902 } 903 904 /* 905 * Found a killable thread. If the signal will be fatal, 906 * then start taking the whole group down immediately. 907 */ 908 if (sig_fatal(p, sig) && 909 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && 910 !sigismember(&t->real_blocked, sig) && 911 (sig == SIGKILL || 912 !tracehook_consider_fatal_signal(t, sig))) { 913 /* 914 * This signal will be fatal to the whole group. 915 */ 916 if (!sig_kernel_coredump(sig)) { 917 /* 918 * Start a group exit and wake everybody up. 919 * This way we don't have other threads 920 * running and doing things after a slower 921 * thread has the fatal signal pending. 922 */ 923 signal->flags = SIGNAL_GROUP_EXIT; 924 signal->group_exit_code = sig; 925 signal->group_stop_count = 0; 926 t = p; 927 do { 928 task_clear_group_stop_pending(t); 929 sigaddset(&t->pending.signal, SIGKILL); 930 signal_wake_up(t, 1); 931 } while_each_thread(p, t); 932 return; 933 } 934 } 935 936 /* 937 * The signal is already in the shared-pending queue. 938 * Tell the chosen thread to wake up and dequeue it. 939 */ 940 signal_wake_up(t, sig == SIGKILL); 941 return; 942 } 943 944 static inline int legacy_queue(struct sigpending *signals, int sig) 945 { 946 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 947 } 948 949 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, 950 int group, int from_ancestor_ns) 951 { 952 struct sigpending *pending; 953 struct sigqueue *q; 954 int override_rlimit; 955 956 trace_signal_generate(sig, info, t); 957 958 assert_spin_locked(&t->sighand->siglock); 959 960 if (!prepare_signal(sig, t, from_ancestor_ns)) 961 return 0; 962 963 pending = group ? &t->signal->shared_pending : &t->pending; 964 /* 965 * Short-circuit ignored signals and support queuing 966 * exactly one non-rt signal, so that we can get more 967 * detailed information about the cause of the signal. 968 */ 969 if (legacy_queue(pending, sig)) 970 return 0; 971 /* 972 * fast-pathed signals for kernel-internal things like SIGSTOP 973 * or SIGKILL. 974 */ 975 if (info == SEND_SIG_FORCED) 976 goto out_set; 977 978 /* 979 * Real-time signals must be queued if sent by sigqueue, or 980 * some other real-time mechanism. It is implementation 981 * defined whether kill() does so. We attempt to do so, on 982 * the principle of least surprise, but since kill is not 983 * allowed to fail with EAGAIN when low on memory we just 984 * make sure at least one signal gets delivered and don't 985 * pass on the info struct. 986 */ 987 if (sig < SIGRTMIN) 988 override_rlimit = (is_si_special(info) || info->si_code >= 0); 989 else 990 override_rlimit = 0; 991 992 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, 993 override_rlimit); 994 if (q) { 995 list_add_tail(&q->list, &pending->list); 996 switch ((unsigned long) info) { 997 case (unsigned long) SEND_SIG_NOINFO: 998 q->info.si_signo = sig; 999 q->info.si_errno = 0; 1000 q->info.si_code = SI_USER; 1001 q->info.si_pid = task_tgid_nr_ns(current, 1002 task_active_pid_ns(t)); 1003 q->info.si_uid = current_uid(); 1004 break; 1005 case (unsigned long) SEND_SIG_PRIV: 1006 q->info.si_signo = sig; 1007 q->info.si_errno = 0; 1008 q->info.si_code = SI_KERNEL; 1009 q->info.si_pid = 0; 1010 q->info.si_uid = 0; 1011 break; 1012 default: 1013 copy_siginfo(&q->info, info); 1014 if (from_ancestor_ns) 1015 q->info.si_pid = 0; 1016 break; 1017 } 1018 } else if (!is_si_special(info)) { 1019 if (sig >= SIGRTMIN && info->si_code != SI_USER) { 1020 /* 1021 * Queue overflow, abort. We may abort if the 1022 * signal was rt and sent by user using something 1023 * other than kill(). 1024 */ 1025 trace_signal_overflow_fail(sig, group, info); 1026 return -EAGAIN; 1027 } else { 1028 /* 1029 * This is a silent loss of information. We still 1030 * send the signal, but the *info bits are lost. 1031 */ 1032 trace_signal_lose_info(sig, group, info); 1033 } 1034 } 1035 1036 out_set: 1037 signalfd_notify(t, sig); 1038 sigaddset(&pending->signal, sig); 1039 complete_signal(sig, t, group); 1040 return 0; 1041 } 1042 1043 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, 1044 int group) 1045 { 1046 int from_ancestor_ns = 0; 1047 1048 #ifdef CONFIG_PID_NS 1049 from_ancestor_ns = si_fromuser(info) && 1050 !task_pid_nr_ns(current, task_active_pid_ns(t)); 1051 #endif 1052 1053 return __send_signal(sig, info, t, group, from_ancestor_ns); 1054 } 1055 1056 static void print_fatal_signal(struct pt_regs *regs, int signr) 1057 { 1058 printk("%s/%d: potentially unexpected fatal signal %d.\n", 1059 current->comm, task_pid_nr(current), signr); 1060 1061 #if defined(__i386__) && !defined(__arch_um__) 1062 printk("code at %08lx: ", regs->ip); 1063 { 1064 int i; 1065 for (i = 0; i < 16; i++) { 1066 unsigned char insn; 1067 1068 if (get_user(insn, (unsigned char *)(regs->ip + i))) 1069 break; 1070 printk("%02x ", insn); 1071 } 1072 } 1073 #endif 1074 printk("\n"); 1075 preempt_disable(); 1076 show_regs(regs); 1077 preempt_enable(); 1078 } 1079 1080 static int __init setup_print_fatal_signals(char *str) 1081 { 1082 get_option (&str, &print_fatal_signals); 1083 1084 return 1; 1085 } 1086 1087 __setup("print-fatal-signals=", setup_print_fatal_signals); 1088 1089 int 1090 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1091 { 1092 return send_signal(sig, info, p, 1); 1093 } 1094 1095 static int 1096 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) 1097 { 1098 return send_signal(sig, info, t, 0); 1099 } 1100 1101 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, 1102 bool group) 1103 { 1104 unsigned long flags; 1105 int ret = -ESRCH; 1106 1107 if (lock_task_sighand(p, &flags)) { 1108 ret = send_signal(sig, info, p, group); 1109 unlock_task_sighand(p, &flags); 1110 } 1111 1112 return ret; 1113 } 1114 1115 /* 1116 * Force a signal that the process can't ignore: if necessary 1117 * we unblock the signal and change any SIG_IGN to SIG_DFL. 1118 * 1119 * Note: If we unblock the signal, we always reset it to SIG_DFL, 1120 * since we do not want to have a signal handler that was blocked 1121 * be invoked when user space had explicitly blocked it. 1122 * 1123 * We don't want to have recursive SIGSEGV's etc, for example, 1124 * that is why we also clear SIGNAL_UNKILLABLE. 1125 */ 1126 int 1127 force_sig_info(int sig, struct siginfo *info, struct task_struct *t) 1128 { 1129 unsigned long int flags; 1130 int ret, blocked, ignored; 1131 struct k_sigaction *action; 1132 1133 spin_lock_irqsave(&t->sighand->siglock, flags); 1134 action = &t->sighand->action[sig-1]; 1135 ignored = action->sa.sa_handler == SIG_IGN; 1136 blocked = sigismember(&t->blocked, sig); 1137 if (blocked || ignored) { 1138 action->sa.sa_handler = SIG_DFL; 1139 if (blocked) { 1140 sigdelset(&t->blocked, sig); 1141 recalc_sigpending_and_wake(t); 1142 } 1143 } 1144 if (action->sa.sa_handler == SIG_DFL) 1145 t->signal->flags &= ~SIGNAL_UNKILLABLE; 1146 ret = specific_send_sig_info(sig, info, t); 1147 spin_unlock_irqrestore(&t->sighand->siglock, flags); 1148 1149 return ret; 1150 } 1151 1152 /* 1153 * Nuke all other threads in the group. 1154 */ 1155 int zap_other_threads(struct task_struct *p) 1156 { 1157 struct task_struct *t = p; 1158 int count = 0; 1159 1160 p->signal->group_stop_count = 0; 1161 1162 while_each_thread(p, t) { 1163 task_clear_group_stop_pending(t); 1164 count++; 1165 1166 /* Don't bother with already dead threads */ 1167 if (t->exit_state) 1168 continue; 1169 sigaddset(&t->pending.signal, SIGKILL); 1170 signal_wake_up(t, 1); 1171 } 1172 1173 return count; 1174 } 1175 1176 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 1177 unsigned long *flags) 1178 { 1179 struct sighand_struct *sighand; 1180 1181 rcu_read_lock(); 1182 for (;;) { 1183 sighand = rcu_dereference(tsk->sighand); 1184 if (unlikely(sighand == NULL)) 1185 break; 1186 1187 spin_lock_irqsave(&sighand->siglock, *flags); 1188 if (likely(sighand == tsk->sighand)) 1189 break; 1190 spin_unlock_irqrestore(&sighand->siglock, *flags); 1191 } 1192 rcu_read_unlock(); 1193 1194 return sighand; 1195 } 1196 1197 /* 1198 * send signal info to all the members of a group 1199 */ 1200 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1201 { 1202 int ret; 1203 1204 rcu_read_lock(); 1205 ret = check_kill_permission(sig, info, p); 1206 rcu_read_unlock(); 1207 1208 if (!ret && sig) 1209 ret = do_send_sig_info(sig, info, p, true); 1210 1211 return ret; 1212 } 1213 1214 /* 1215 * __kill_pgrp_info() sends a signal to a process group: this is what the tty 1216 * control characters do (^C, ^Z etc) 1217 * - the caller must hold at least a readlock on tasklist_lock 1218 */ 1219 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) 1220 { 1221 struct task_struct *p = NULL; 1222 int retval, success; 1223 1224 success = 0; 1225 retval = -ESRCH; 1226 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1227 int err = group_send_sig_info(sig, info, p); 1228 success |= !err; 1229 retval = err; 1230 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1231 return success ? 0 : retval; 1232 } 1233 1234 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) 1235 { 1236 int error = -ESRCH; 1237 struct task_struct *p; 1238 1239 rcu_read_lock(); 1240 retry: 1241 p = pid_task(pid, PIDTYPE_PID); 1242 if (p) { 1243 error = group_send_sig_info(sig, info, p); 1244 if (unlikely(error == -ESRCH)) 1245 /* 1246 * The task was unhashed in between, try again. 1247 * If it is dead, pid_task() will return NULL, 1248 * if we race with de_thread() it will find the 1249 * new leader. 1250 */ 1251 goto retry; 1252 } 1253 rcu_read_unlock(); 1254 1255 return error; 1256 } 1257 1258 int kill_proc_info(int sig, struct siginfo *info, pid_t pid) 1259 { 1260 int error; 1261 rcu_read_lock(); 1262 error = kill_pid_info(sig, info, find_vpid(pid)); 1263 rcu_read_unlock(); 1264 return error; 1265 } 1266 1267 /* like kill_pid_info(), but doesn't use uid/euid of "current" */ 1268 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, 1269 uid_t uid, uid_t euid, u32 secid) 1270 { 1271 int ret = -EINVAL; 1272 struct task_struct *p; 1273 const struct cred *pcred; 1274 unsigned long flags; 1275 1276 if (!valid_signal(sig)) 1277 return ret; 1278 1279 rcu_read_lock(); 1280 p = pid_task(pid, PIDTYPE_PID); 1281 if (!p) { 1282 ret = -ESRCH; 1283 goto out_unlock; 1284 } 1285 pcred = __task_cred(p); 1286 if (si_fromuser(info) && 1287 euid != pcred->suid && euid != pcred->uid && 1288 uid != pcred->suid && uid != pcred->uid) { 1289 ret = -EPERM; 1290 goto out_unlock; 1291 } 1292 ret = security_task_kill(p, info, sig, secid); 1293 if (ret) 1294 goto out_unlock; 1295 1296 if (sig) { 1297 if (lock_task_sighand(p, &flags)) { 1298 ret = __send_signal(sig, info, p, 1, 0); 1299 unlock_task_sighand(p, &flags); 1300 } else 1301 ret = -ESRCH; 1302 } 1303 out_unlock: 1304 rcu_read_unlock(); 1305 return ret; 1306 } 1307 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); 1308 1309 /* 1310 * kill_something_info() interprets pid in interesting ways just like kill(2). 1311 * 1312 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1313 * is probably wrong. Should make it like BSD or SYSV. 1314 */ 1315 1316 static int kill_something_info(int sig, struct siginfo *info, pid_t pid) 1317 { 1318 int ret; 1319 1320 if (pid > 0) { 1321 rcu_read_lock(); 1322 ret = kill_pid_info(sig, info, find_vpid(pid)); 1323 rcu_read_unlock(); 1324 return ret; 1325 } 1326 1327 read_lock(&tasklist_lock); 1328 if (pid != -1) { 1329 ret = __kill_pgrp_info(sig, info, 1330 pid ? find_vpid(-pid) : task_pgrp(current)); 1331 } else { 1332 int retval = 0, count = 0; 1333 struct task_struct * p; 1334 1335 for_each_process(p) { 1336 if (task_pid_vnr(p) > 1 && 1337 !same_thread_group(p, current)) { 1338 int err = group_send_sig_info(sig, info, p); 1339 ++count; 1340 if (err != -EPERM) 1341 retval = err; 1342 } 1343 } 1344 ret = count ? retval : -ESRCH; 1345 } 1346 read_unlock(&tasklist_lock); 1347 1348 return ret; 1349 } 1350 1351 /* 1352 * These are for backward compatibility with the rest of the kernel source. 1353 */ 1354 1355 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1356 { 1357 /* 1358 * Make sure legacy kernel users don't send in bad values 1359 * (normal paths check this in check_kill_permission). 1360 */ 1361 if (!valid_signal(sig)) 1362 return -EINVAL; 1363 1364 return do_send_sig_info(sig, info, p, false); 1365 } 1366 1367 #define __si_special(priv) \ 1368 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1369 1370 int 1371 send_sig(int sig, struct task_struct *p, int priv) 1372 { 1373 return send_sig_info(sig, __si_special(priv), p); 1374 } 1375 1376 void 1377 force_sig(int sig, struct task_struct *p) 1378 { 1379 force_sig_info(sig, SEND_SIG_PRIV, p); 1380 } 1381 1382 /* 1383 * When things go south during signal handling, we 1384 * will force a SIGSEGV. And if the signal that caused 1385 * the problem was already a SIGSEGV, we'll want to 1386 * make sure we don't even try to deliver the signal.. 1387 */ 1388 int 1389 force_sigsegv(int sig, struct task_struct *p) 1390 { 1391 if (sig == SIGSEGV) { 1392 unsigned long flags; 1393 spin_lock_irqsave(&p->sighand->siglock, flags); 1394 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; 1395 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1396 } 1397 force_sig(SIGSEGV, p); 1398 return 0; 1399 } 1400 1401 int kill_pgrp(struct pid *pid, int sig, int priv) 1402 { 1403 int ret; 1404 1405 read_lock(&tasklist_lock); 1406 ret = __kill_pgrp_info(sig, __si_special(priv), pid); 1407 read_unlock(&tasklist_lock); 1408 1409 return ret; 1410 } 1411 EXPORT_SYMBOL(kill_pgrp); 1412 1413 int kill_pid(struct pid *pid, int sig, int priv) 1414 { 1415 return kill_pid_info(sig, __si_special(priv), pid); 1416 } 1417 EXPORT_SYMBOL(kill_pid); 1418 1419 /* 1420 * These functions support sending signals using preallocated sigqueue 1421 * structures. This is needed "because realtime applications cannot 1422 * afford to lose notifications of asynchronous events, like timer 1423 * expirations or I/O completions". In the case of POSIX Timers 1424 * we allocate the sigqueue structure from the timer_create. If this 1425 * allocation fails we are able to report the failure to the application 1426 * with an EAGAIN error. 1427 */ 1428 struct sigqueue *sigqueue_alloc(void) 1429 { 1430 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); 1431 1432 if (q) 1433 q->flags |= SIGQUEUE_PREALLOC; 1434 1435 return q; 1436 } 1437 1438 void sigqueue_free(struct sigqueue *q) 1439 { 1440 unsigned long flags; 1441 spinlock_t *lock = ¤t->sighand->siglock; 1442 1443 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1444 /* 1445 * We must hold ->siglock while testing q->list 1446 * to serialize with collect_signal() or with 1447 * __exit_signal()->flush_sigqueue(). 1448 */ 1449 spin_lock_irqsave(lock, flags); 1450 q->flags &= ~SIGQUEUE_PREALLOC; 1451 /* 1452 * If it is queued it will be freed when dequeued, 1453 * like the "regular" sigqueue. 1454 */ 1455 if (!list_empty(&q->list)) 1456 q = NULL; 1457 spin_unlock_irqrestore(lock, flags); 1458 1459 if (q) 1460 __sigqueue_free(q); 1461 } 1462 1463 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) 1464 { 1465 int sig = q->info.si_signo; 1466 struct sigpending *pending; 1467 unsigned long flags; 1468 int ret; 1469 1470 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1471 1472 ret = -1; 1473 if (!likely(lock_task_sighand(t, &flags))) 1474 goto ret; 1475 1476 ret = 1; /* the signal is ignored */ 1477 if (!prepare_signal(sig, t, 0)) 1478 goto out; 1479 1480 ret = 0; 1481 if (unlikely(!list_empty(&q->list))) { 1482 /* 1483 * If an SI_TIMER entry is already queue just increment 1484 * the overrun count. 1485 */ 1486 BUG_ON(q->info.si_code != SI_TIMER); 1487 q->info.si_overrun++; 1488 goto out; 1489 } 1490 q->info.si_overrun = 0; 1491 1492 signalfd_notify(t, sig); 1493 pending = group ? &t->signal->shared_pending : &t->pending; 1494 list_add_tail(&q->list, &pending->list); 1495 sigaddset(&pending->signal, sig); 1496 complete_signal(sig, t, group); 1497 out: 1498 unlock_task_sighand(t, &flags); 1499 ret: 1500 return ret; 1501 } 1502 1503 /* 1504 * Let a parent know about the death of a child. 1505 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1506 * 1507 * Returns -1 if our parent ignored us and so we've switched to 1508 * self-reaping, or else @sig. 1509 */ 1510 int do_notify_parent(struct task_struct *tsk, int sig) 1511 { 1512 struct siginfo info; 1513 unsigned long flags; 1514 struct sighand_struct *psig; 1515 int ret = sig; 1516 1517 BUG_ON(sig == -1); 1518 1519 /* do_notify_parent_cldstop should have been called instead. */ 1520 BUG_ON(task_is_stopped_or_traced(tsk)); 1521 1522 BUG_ON(!task_ptrace(tsk) && 1523 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1524 1525 info.si_signo = sig; 1526 info.si_errno = 0; 1527 /* 1528 * we are under tasklist_lock here so our parent is tied to 1529 * us and cannot exit and release its namespace. 1530 * 1531 * the only it can is to switch its nsproxy with sys_unshare, 1532 * bu uncharing pid namespaces is not allowed, so we'll always 1533 * see relevant namespace 1534 * 1535 * write_lock() currently calls preempt_disable() which is the 1536 * same as rcu_read_lock(), but according to Oleg, this is not 1537 * correct to rely on this 1538 */ 1539 rcu_read_lock(); 1540 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); 1541 info.si_uid = __task_cred(tsk)->uid; 1542 rcu_read_unlock(); 1543 1544 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, 1545 tsk->signal->utime)); 1546 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, 1547 tsk->signal->stime)); 1548 1549 info.si_status = tsk->exit_code & 0x7f; 1550 if (tsk->exit_code & 0x80) 1551 info.si_code = CLD_DUMPED; 1552 else if (tsk->exit_code & 0x7f) 1553 info.si_code = CLD_KILLED; 1554 else { 1555 info.si_code = CLD_EXITED; 1556 info.si_status = tsk->exit_code >> 8; 1557 } 1558 1559 psig = tsk->parent->sighand; 1560 spin_lock_irqsave(&psig->siglock, flags); 1561 if (!task_ptrace(tsk) && sig == SIGCHLD && 1562 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 1563 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 1564 /* 1565 * We are exiting and our parent doesn't care. POSIX.1 1566 * defines special semantics for setting SIGCHLD to SIG_IGN 1567 * or setting the SA_NOCLDWAIT flag: we should be reaped 1568 * automatically and not left for our parent's wait4 call. 1569 * Rather than having the parent do it as a magic kind of 1570 * signal handler, we just set this to tell do_exit that we 1571 * can be cleaned up without becoming a zombie. Note that 1572 * we still call __wake_up_parent in this case, because a 1573 * blocked sys_wait4 might now return -ECHILD. 1574 * 1575 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 1576 * is implementation-defined: we do (if you don't want 1577 * it, just use SIG_IGN instead). 1578 */ 1579 ret = tsk->exit_signal = -1; 1580 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1581 sig = -1; 1582 } 1583 if (valid_signal(sig) && sig > 0) 1584 __group_send_sig_info(sig, &info, tsk->parent); 1585 __wake_up_parent(tsk, tsk->parent); 1586 spin_unlock_irqrestore(&psig->siglock, flags); 1587 1588 return ret; 1589 } 1590 1591 /** 1592 * do_notify_parent_cldstop - notify parent of stopped/continued state change 1593 * @tsk: task reporting the state change 1594 * @for_ptracer: the notification is for ptracer 1595 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report 1596 * 1597 * Notify @tsk's parent that the stopped/continued state has changed. If 1598 * @for_ptracer is %false, @tsk's group leader notifies to its real parent. 1599 * If %true, @tsk reports to @tsk->parent which should be the ptracer. 1600 * 1601 * CONTEXT: 1602 * Must be called with tasklist_lock at least read locked. 1603 */ 1604 static void do_notify_parent_cldstop(struct task_struct *tsk, 1605 bool for_ptracer, int why) 1606 { 1607 struct siginfo info; 1608 unsigned long flags; 1609 struct task_struct *parent; 1610 struct sighand_struct *sighand; 1611 1612 if (for_ptracer) { 1613 parent = tsk->parent; 1614 } else { 1615 tsk = tsk->group_leader; 1616 parent = tsk->real_parent; 1617 } 1618 1619 info.si_signo = SIGCHLD; 1620 info.si_errno = 0; 1621 /* 1622 * see comment in do_notify_parent() about the following 4 lines 1623 */ 1624 rcu_read_lock(); 1625 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); 1626 info.si_uid = __task_cred(tsk)->uid; 1627 rcu_read_unlock(); 1628 1629 info.si_utime = cputime_to_clock_t(tsk->utime); 1630 info.si_stime = cputime_to_clock_t(tsk->stime); 1631 1632 info.si_code = why; 1633 switch (why) { 1634 case CLD_CONTINUED: 1635 info.si_status = SIGCONT; 1636 break; 1637 case CLD_STOPPED: 1638 info.si_status = tsk->signal->group_exit_code & 0x7f; 1639 break; 1640 case CLD_TRAPPED: 1641 info.si_status = tsk->exit_code & 0x7f; 1642 break; 1643 default: 1644 BUG(); 1645 } 1646 1647 sighand = parent->sighand; 1648 spin_lock_irqsave(&sighand->siglock, flags); 1649 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 1650 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 1651 __group_send_sig_info(SIGCHLD, &info, parent); 1652 /* 1653 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 1654 */ 1655 __wake_up_parent(tsk, parent); 1656 spin_unlock_irqrestore(&sighand->siglock, flags); 1657 } 1658 1659 static inline int may_ptrace_stop(void) 1660 { 1661 if (!likely(task_ptrace(current))) 1662 return 0; 1663 /* 1664 * Are we in the middle of do_coredump? 1665 * If so and our tracer is also part of the coredump stopping 1666 * is a deadlock situation, and pointless because our tracer 1667 * is dead so don't allow us to stop. 1668 * If SIGKILL was already sent before the caller unlocked 1669 * ->siglock we must see ->core_state != NULL. Otherwise it 1670 * is safe to enter schedule(). 1671 */ 1672 if (unlikely(current->mm->core_state) && 1673 unlikely(current->mm == current->parent->mm)) 1674 return 0; 1675 1676 return 1; 1677 } 1678 1679 /* 1680 * Return non-zero if there is a SIGKILL that should be waking us up. 1681 * Called with the siglock held. 1682 */ 1683 static int sigkill_pending(struct task_struct *tsk) 1684 { 1685 return sigismember(&tsk->pending.signal, SIGKILL) || 1686 sigismember(&tsk->signal->shared_pending.signal, SIGKILL); 1687 } 1688 1689 /* 1690 * Test whether the target task of the usual cldstop notification - the 1691 * real_parent of @child - is in the same group as the ptracer. 1692 */ 1693 static bool real_parent_is_ptracer(struct task_struct *child) 1694 { 1695 return same_thread_group(child->parent, child->real_parent); 1696 } 1697 1698 /* 1699 * This must be called with current->sighand->siglock held. 1700 * 1701 * This should be the path for all ptrace stops. 1702 * We always set current->last_siginfo while stopped here. 1703 * That makes it a way to test a stopped process for 1704 * being ptrace-stopped vs being job-control-stopped. 1705 * 1706 * If we actually decide not to stop at all because the tracer 1707 * is gone, we keep current->exit_code unless clear_code. 1708 */ 1709 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) 1710 __releases(¤t->sighand->siglock) 1711 __acquires(¤t->sighand->siglock) 1712 { 1713 bool gstop_done = false; 1714 1715 if (arch_ptrace_stop_needed(exit_code, info)) { 1716 /* 1717 * The arch code has something special to do before a 1718 * ptrace stop. This is allowed to block, e.g. for faults 1719 * on user stack pages. We can't keep the siglock while 1720 * calling arch_ptrace_stop, so we must release it now. 1721 * To preserve proper semantics, we must do this before 1722 * any signal bookkeeping like checking group_stop_count. 1723 * Meanwhile, a SIGKILL could come in before we retake the 1724 * siglock. That must prevent us from sleeping in TASK_TRACED. 1725 * So after regaining the lock, we must check for SIGKILL. 1726 */ 1727 spin_unlock_irq(¤t->sighand->siglock); 1728 arch_ptrace_stop(exit_code, info); 1729 spin_lock_irq(¤t->sighand->siglock); 1730 if (sigkill_pending(current)) 1731 return; 1732 } 1733 1734 /* 1735 * If @why is CLD_STOPPED, we're trapping to participate in a group 1736 * stop. Do the bookkeeping. Note that if SIGCONT was delievered 1737 * while siglock was released for the arch hook, PENDING could be 1738 * clear now. We act as if SIGCONT is received after TASK_TRACED 1739 * is entered - ignore it. 1740 */ 1741 if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING)) 1742 gstop_done = task_participate_group_stop(current); 1743 1744 current->last_siginfo = info; 1745 current->exit_code = exit_code; 1746 1747 /* 1748 * TRACED should be visible before TRAPPING is cleared; otherwise, 1749 * the tracer might fail do_wait(). 1750 */ 1751 set_current_state(TASK_TRACED); 1752 1753 /* 1754 * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and 1755 * transition to TASK_TRACED should be atomic with respect to 1756 * siglock. This hsould be done after the arch hook as siglock is 1757 * released and regrabbed across it. 1758 */ 1759 task_clear_group_stop_trapping(current); 1760 1761 spin_unlock_irq(¤t->sighand->siglock); 1762 read_lock(&tasklist_lock); 1763 if (may_ptrace_stop()) { 1764 /* 1765 * Notify parents of the stop. 1766 * 1767 * While ptraced, there are two parents - the ptracer and 1768 * the real_parent of the group_leader. The ptracer should 1769 * know about every stop while the real parent is only 1770 * interested in the completion of group stop. The states 1771 * for the two don't interact with each other. Notify 1772 * separately unless they're gonna be duplicates. 1773 */ 1774 do_notify_parent_cldstop(current, true, why); 1775 if (gstop_done && !real_parent_is_ptracer(current)) 1776 do_notify_parent_cldstop(current, false, why); 1777 1778 /* 1779 * Don't want to allow preemption here, because 1780 * sys_ptrace() needs this task to be inactive. 1781 * 1782 * XXX: implement read_unlock_no_resched(). 1783 */ 1784 preempt_disable(); 1785 read_unlock(&tasklist_lock); 1786 preempt_enable_no_resched(); 1787 schedule(); 1788 } else { 1789 /* 1790 * By the time we got the lock, our tracer went away. 1791 * Don't drop the lock yet, another tracer may come. 1792 * 1793 * If @gstop_done, the ptracer went away between group stop 1794 * completion and here. During detach, it would have set 1795 * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED 1796 * in do_signal_stop() on return, so notifying the real 1797 * parent of the group stop completion is enough. 1798 */ 1799 if (gstop_done) 1800 do_notify_parent_cldstop(current, false, why); 1801 1802 __set_current_state(TASK_RUNNING); 1803 if (clear_code) 1804 current->exit_code = 0; 1805 read_unlock(&tasklist_lock); 1806 } 1807 1808 /* 1809 * While in TASK_TRACED, we were considered "frozen enough". 1810 * Now that we woke up, it's crucial if we're supposed to be 1811 * frozen that we freeze now before running anything substantial. 1812 */ 1813 try_to_freeze(); 1814 1815 /* 1816 * We are back. Now reacquire the siglock before touching 1817 * last_siginfo, so that we are sure to have synchronized with 1818 * any signal-sending on another CPU that wants to examine it. 1819 */ 1820 spin_lock_irq(¤t->sighand->siglock); 1821 current->last_siginfo = NULL; 1822 1823 /* 1824 * Queued signals ignored us while we were stopped for tracing. 1825 * So check for any that we should take before resuming user mode. 1826 * This sets TIF_SIGPENDING, but never clears it. 1827 */ 1828 recalc_sigpending_tsk(current); 1829 } 1830 1831 void ptrace_notify(int exit_code) 1832 { 1833 siginfo_t info; 1834 1835 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 1836 1837 memset(&info, 0, sizeof info); 1838 info.si_signo = SIGTRAP; 1839 info.si_code = exit_code; 1840 info.si_pid = task_pid_vnr(current); 1841 info.si_uid = current_uid(); 1842 1843 /* Let the debugger run. */ 1844 spin_lock_irq(¤t->sighand->siglock); 1845 ptrace_stop(exit_code, CLD_TRAPPED, 1, &info); 1846 spin_unlock_irq(¤t->sighand->siglock); 1847 } 1848 1849 /* 1850 * This performs the stopping for SIGSTOP and other stop signals. 1851 * We have to stop all threads in the thread group. 1852 * Returns non-zero if we've actually stopped and released the siglock. 1853 * Returns zero if we didn't stop and still hold the siglock. 1854 */ 1855 static int do_signal_stop(int signr) 1856 { 1857 struct signal_struct *sig = current->signal; 1858 1859 if (!(current->group_stop & GROUP_STOP_PENDING)) { 1860 unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME; 1861 struct task_struct *t; 1862 1863 /* signr will be recorded in task->group_stop for retries */ 1864 WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK); 1865 1866 if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) || 1867 unlikely(signal_group_exit(sig))) 1868 return 0; 1869 /* 1870 * There is no group stop already in progress. We must 1871 * initiate one now. 1872 * 1873 * While ptraced, a task may be resumed while group stop is 1874 * still in effect and then receive a stop signal and 1875 * initiate another group stop. This deviates from the 1876 * usual behavior as two consecutive stop signals can't 1877 * cause two group stops when !ptraced. That is why we 1878 * also check !task_is_stopped(t) below. 1879 * 1880 * The condition can be distinguished by testing whether 1881 * SIGNAL_STOP_STOPPED is already set. Don't generate 1882 * group_exit_code in such case. 1883 * 1884 * This is not necessary for SIGNAL_STOP_CONTINUED because 1885 * an intervening stop signal is required to cause two 1886 * continued events regardless of ptrace. 1887 */ 1888 if (!(sig->flags & SIGNAL_STOP_STOPPED)) 1889 sig->group_exit_code = signr; 1890 else 1891 WARN_ON_ONCE(!task_ptrace(current)); 1892 1893 current->group_stop &= ~GROUP_STOP_SIGMASK; 1894 current->group_stop |= signr | gstop; 1895 sig->group_stop_count = 1; 1896 for (t = next_thread(current); t != current; 1897 t = next_thread(t)) { 1898 t->group_stop &= ~GROUP_STOP_SIGMASK; 1899 /* 1900 * Setting state to TASK_STOPPED for a group 1901 * stop is always done with the siglock held, 1902 * so this check has no races. 1903 */ 1904 if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) { 1905 t->group_stop |= signr | gstop; 1906 sig->group_stop_count++; 1907 signal_wake_up(t, 0); 1908 } 1909 } 1910 } 1911 retry: 1912 if (likely(!task_ptrace(current))) { 1913 int notify = 0; 1914 1915 /* 1916 * If there are no other threads in the group, or if there 1917 * is a group stop in progress and we are the last to stop, 1918 * report to the parent. 1919 */ 1920 if (task_participate_group_stop(current)) 1921 notify = CLD_STOPPED; 1922 1923 __set_current_state(TASK_STOPPED); 1924 spin_unlock_irq(¤t->sighand->siglock); 1925 1926 /* 1927 * Notify the parent of the group stop completion. Because 1928 * we're not holding either the siglock or tasklist_lock 1929 * here, ptracer may attach inbetween; however, this is for 1930 * group stop and should always be delivered to the real 1931 * parent of the group leader. The new ptracer will get 1932 * its notification when this task transitions into 1933 * TASK_TRACED. 1934 */ 1935 if (notify) { 1936 read_lock(&tasklist_lock); 1937 do_notify_parent_cldstop(current, false, notify); 1938 read_unlock(&tasklist_lock); 1939 } 1940 1941 /* Now we don't run again until woken by SIGCONT or SIGKILL */ 1942 schedule(); 1943 1944 spin_lock_irq(¤t->sighand->siglock); 1945 } else { 1946 ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK, 1947 CLD_STOPPED, 0, NULL); 1948 current->exit_code = 0; 1949 } 1950 1951 /* 1952 * GROUP_STOP_PENDING could be set if another group stop has 1953 * started since being woken up or ptrace wants us to transit 1954 * between TASK_STOPPED and TRACED. Retry group stop. 1955 */ 1956 if (current->group_stop & GROUP_STOP_PENDING) { 1957 WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK)); 1958 goto retry; 1959 } 1960 1961 /* PTRACE_ATTACH might have raced with task killing, clear trapping */ 1962 task_clear_group_stop_trapping(current); 1963 1964 spin_unlock_irq(¤t->sighand->siglock); 1965 1966 tracehook_finish_jctl(); 1967 1968 return 1; 1969 } 1970 1971 static int ptrace_signal(int signr, siginfo_t *info, 1972 struct pt_regs *regs, void *cookie) 1973 { 1974 if (!task_ptrace(current)) 1975 return signr; 1976 1977 ptrace_signal_deliver(regs, cookie); 1978 1979 /* Let the debugger run. */ 1980 ptrace_stop(signr, CLD_TRAPPED, 0, info); 1981 1982 /* We're back. Did the debugger cancel the sig? */ 1983 signr = current->exit_code; 1984 if (signr == 0) 1985 return signr; 1986 1987 current->exit_code = 0; 1988 1989 /* 1990 * Update the siginfo structure if the signal has 1991 * changed. If the debugger wanted something 1992 * specific in the siginfo structure then it should 1993 * have updated *info via PTRACE_SETSIGINFO. 1994 */ 1995 if (signr != info->si_signo) { 1996 info->si_signo = signr; 1997 info->si_errno = 0; 1998 info->si_code = SI_USER; 1999 info->si_pid = task_pid_vnr(current->parent); 2000 info->si_uid = task_uid(current->parent); 2001 } 2002 2003 /* If the (new) signal is now blocked, requeue it. */ 2004 if (sigismember(¤t->blocked, signr)) { 2005 specific_send_sig_info(signr, info, current); 2006 signr = 0; 2007 } 2008 2009 return signr; 2010 } 2011 2012 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, 2013 struct pt_regs *regs, void *cookie) 2014 { 2015 struct sighand_struct *sighand = current->sighand; 2016 struct signal_struct *signal = current->signal; 2017 int signr; 2018 2019 relock: 2020 /* 2021 * We'll jump back here after any time we were stopped in TASK_STOPPED. 2022 * While in TASK_STOPPED, we were considered "frozen enough". 2023 * Now that we woke up, it's crucial if we're supposed to be 2024 * frozen that we freeze now before running anything substantial. 2025 */ 2026 try_to_freeze(); 2027 2028 spin_lock_irq(&sighand->siglock); 2029 /* 2030 * Every stopped thread goes here after wakeup. Check to see if 2031 * we should notify the parent, prepare_signal(SIGCONT) encodes 2032 * the CLD_ si_code into SIGNAL_CLD_MASK bits. 2033 */ 2034 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { 2035 struct task_struct *leader; 2036 int why; 2037 2038 if (signal->flags & SIGNAL_CLD_CONTINUED) 2039 why = CLD_CONTINUED; 2040 else 2041 why = CLD_STOPPED; 2042 2043 signal->flags &= ~SIGNAL_CLD_MASK; 2044 2045 spin_unlock_irq(&sighand->siglock); 2046 2047 /* 2048 * Notify the parent that we're continuing. This event is 2049 * always per-process and doesn't make whole lot of sense 2050 * for ptracers, who shouldn't consume the state via 2051 * wait(2) either, but, for backward compatibility, notify 2052 * the ptracer of the group leader too unless it's gonna be 2053 * a duplicate. 2054 */ 2055 read_lock(&tasklist_lock); 2056 2057 do_notify_parent_cldstop(current, false, why); 2058 2059 leader = current->group_leader; 2060 if (task_ptrace(leader) && !real_parent_is_ptracer(leader)) 2061 do_notify_parent_cldstop(leader, true, why); 2062 2063 read_unlock(&tasklist_lock); 2064 2065 goto relock; 2066 } 2067 2068 for (;;) { 2069 struct k_sigaction *ka; 2070 /* 2071 * Tracing can induce an artificial signal and choose sigaction. 2072 * The return value in @signr determines the default action, 2073 * but @info->si_signo is the signal number we will report. 2074 */ 2075 signr = tracehook_get_signal(current, regs, info, return_ka); 2076 if (unlikely(signr < 0)) 2077 goto relock; 2078 if (unlikely(signr != 0)) 2079 ka = return_ka; 2080 else { 2081 if (unlikely(current->group_stop & 2082 GROUP_STOP_PENDING) && do_signal_stop(0)) 2083 goto relock; 2084 2085 signr = dequeue_signal(current, ¤t->blocked, 2086 info); 2087 2088 if (!signr) 2089 break; /* will return 0 */ 2090 2091 if (signr != SIGKILL) { 2092 signr = ptrace_signal(signr, info, 2093 regs, cookie); 2094 if (!signr) 2095 continue; 2096 } 2097 2098 ka = &sighand->action[signr-1]; 2099 } 2100 2101 /* Trace actually delivered signals. */ 2102 trace_signal_deliver(signr, info, ka); 2103 2104 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 2105 continue; 2106 if (ka->sa.sa_handler != SIG_DFL) { 2107 /* Run the handler. */ 2108 *return_ka = *ka; 2109 2110 if (ka->sa.sa_flags & SA_ONESHOT) 2111 ka->sa.sa_handler = SIG_DFL; 2112 2113 break; /* will return non-zero "signr" value */ 2114 } 2115 2116 /* 2117 * Now we are doing the default action for this signal. 2118 */ 2119 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 2120 continue; 2121 2122 /* 2123 * Global init gets no signals it doesn't want. 2124 * Container-init gets no signals it doesn't want from same 2125 * container. 2126 * 2127 * Note that if global/container-init sees a sig_kernel_only() 2128 * signal here, the signal must have been generated internally 2129 * or must have come from an ancestor namespace. In either 2130 * case, the signal cannot be dropped. 2131 */ 2132 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && 2133 !sig_kernel_only(signr)) 2134 continue; 2135 2136 if (sig_kernel_stop(signr)) { 2137 /* 2138 * The default action is to stop all threads in 2139 * the thread group. The job control signals 2140 * do nothing in an orphaned pgrp, but SIGSTOP 2141 * always works. Note that siglock needs to be 2142 * dropped during the call to is_orphaned_pgrp() 2143 * because of lock ordering with tasklist_lock. 2144 * This allows an intervening SIGCONT to be posted. 2145 * We need to check for that and bail out if necessary. 2146 */ 2147 if (signr != SIGSTOP) { 2148 spin_unlock_irq(&sighand->siglock); 2149 2150 /* signals can be posted during this window */ 2151 2152 if (is_current_pgrp_orphaned()) 2153 goto relock; 2154 2155 spin_lock_irq(&sighand->siglock); 2156 } 2157 2158 if (likely(do_signal_stop(info->si_signo))) { 2159 /* It released the siglock. */ 2160 goto relock; 2161 } 2162 2163 /* 2164 * We didn't actually stop, due to a race 2165 * with SIGCONT or something like that. 2166 */ 2167 continue; 2168 } 2169 2170 spin_unlock_irq(&sighand->siglock); 2171 2172 /* 2173 * Anything else is fatal, maybe with a core dump. 2174 */ 2175 current->flags |= PF_SIGNALED; 2176 2177 if (sig_kernel_coredump(signr)) { 2178 if (print_fatal_signals) 2179 print_fatal_signal(regs, info->si_signo); 2180 /* 2181 * If it was able to dump core, this kills all 2182 * other threads in the group and synchronizes with 2183 * their demise. If we lost the race with another 2184 * thread getting here, it set group_exit_code 2185 * first and our do_group_exit call below will use 2186 * that value and ignore the one we pass it. 2187 */ 2188 do_coredump(info->si_signo, info->si_signo, regs); 2189 } 2190 2191 /* 2192 * Death signals, no core dump. 2193 */ 2194 do_group_exit(info->si_signo); 2195 /* NOTREACHED */ 2196 } 2197 spin_unlock_irq(&sighand->siglock); 2198 return signr; 2199 } 2200 2201 /* 2202 * It could be that complete_signal() picked us to notify about the 2203 * group-wide signal. Other threads should be notified now to take 2204 * the shared signals in @which since we will not. 2205 */ 2206 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) 2207 { 2208 sigset_t retarget; 2209 struct task_struct *t; 2210 2211 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); 2212 if (sigisemptyset(&retarget)) 2213 return; 2214 2215 t = tsk; 2216 while_each_thread(tsk, t) { 2217 if (t->flags & PF_EXITING) 2218 continue; 2219 2220 if (!has_pending_signals(&retarget, &t->blocked)) 2221 continue; 2222 /* Remove the signals this thread can handle. */ 2223 sigandsets(&retarget, &retarget, &t->blocked); 2224 2225 if (!signal_pending(t)) 2226 signal_wake_up(t, 0); 2227 2228 if (sigisemptyset(&retarget)) 2229 break; 2230 } 2231 } 2232 2233 void exit_signals(struct task_struct *tsk) 2234 { 2235 int group_stop = 0; 2236 sigset_t unblocked; 2237 2238 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { 2239 tsk->flags |= PF_EXITING; 2240 return; 2241 } 2242 2243 spin_lock_irq(&tsk->sighand->siglock); 2244 /* 2245 * From now this task is not visible for group-wide signals, 2246 * see wants_signal(), do_signal_stop(). 2247 */ 2248 tsk->flags |= PF_EXITING; 2249 if (!signal_pending(tsk)) 2250 goto out; 2251 2252 unblocked = tsk->blocked; 2253 signotset(&unblocked); 2254 retarget_shared_pending(tsk, &unblocked); 2255 2256 if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) && 2257 task_participate_group_stop(tsk)) 2258 group_stop = CLD_STOPPED; 2259 out: 2260 spin_unlock_irq(&tsk->sighand->siglock); 2261 2262 /* 2263 * If group stop has completed, deliver the notification. This 2264 * should always go to the real parent of the group leader. 2265 */ 2266 if (unlikely(group_stop)) { 2267 read_lock(&tasklist_lock); 2268 do_notify_parent_cldstop(tsk, false, group_stop); 2269 read_unlock(&tasklist_lock); 2270 } 2271 } 2272 2273 EXPORT_SYMBOL(recalc_sigpending); 2274 EXPORT_SYMBOL_GPL(dequeue_signal); 2275 EXPORT_SYMBOL(flush_signals); 2276 EXPORT_SYMBOL(force_sig); 2277 EXPORT_SYMBOL(send_sig); 2278 EXPORT_SYMBOL(send_sig_info); 2279 EXPORT_SYMBOL(sigprocmask); 2280 EXPORT_SYMBOL(block_all_signals); 2281 EXPORT_SYMBOL(unblock_all_signals); 2282 2283 2284 /* 2285 * System call entry points. 2286 */ 2287 2288 /** 2289 * sys_restart_syscall - restart a system call 2290 */ 2291 SYSCALL_DEFINE0(restart_syscall) 2292 { 2293 struct restart_block *restart = ¤t_thread_info()->restart_block; 2294 return restart->fn(restart); 2295 } 2296 2297 long do_no_restart_syscall(struct restart_block *param) 2298 { 2299 return -EINTR; 2300 } 2301 2302 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) 2303 { 2304 if (signal_pending(tsk) && !thread_group_empty(tsk)) { 2305 sigset_t newblocked; 2306 /* A set of now blocked but previously unblocked signals. */ 2307 sigandnsets(&newblocked, newset, ¤t->blocked); 2308 retarget_shared_pending(tsk, &newblocked); 2309 } 2310 tsk->blocked = *newset; 2311 recalc_sigpending(); 2312 } 2313 2314 /** 2315 * set_current_blocked - change current->blocked mask 2316 * @newset: new mask 2317 * 2318 * It is wrong to change ->blocked directly, this helper should be used 2319 * to ensure the process can't miss a shared signal we are going to block. 2320 */ 2321 void set_current_blocked(const sigset_t *newset) 2322 { 2323 struct task_struct *tsk = current; 2324 2325 spin_lock_irq(&tsk->sighand->siglock); 2326 __set_task_blocked(tsk, newset); 2327 spin_unlock_irq(&tsk->sighand->siglock); 2328 } 2329 2330 /* 2331 * This is also useful for kernel threads that want to temporarily 2332 * (or permanently) block certain signals. 2333 * 2334 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 2335 * interface happily blocks "unblockable" signals like SIGKILL 2336 * and friends. 2337 */ 2338 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 2339 { 2340 struct task_struct *tsk = current; 2341 sigset_t newset; 2342 2343 /* Lockless, only current can change ->blocked, never from irq */ 2344 if (oldset) 2345 *oldset = tsk->blocked; 2346 2347 switch (how) { 2348 case SIG_BLOCK: 2349 sigorsets(&newset, &tsk->blocked, set); 2350 break; 2351 case SIG_UNBLOCK: 2352 sigandnsets(&newset, &tsk->blocked, set); 2353 break; 2354 case SIG_SETMASK: 2355 newset = *set; 2356 break; 2357 default: 2358 return -EINVAL; 2359 } 2360 2361 set_current_blocked(&newset); 2362 return 0; 2363 } 2364 2365 /** 2366 * sys_rt_sigprocmask - change the list of currently blocked signals 2367 * @how: whether to add, remove, or set signals 2368 * @set: stores pending signals 2369 * @oset: previous value of signal mask if non-null 2370 * @sigsetsize: size of sigset_t type 2371 */ 2372 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, 2373 sigset_t __user *, oset, size_t, sigsetsize) 2374 { 2375 sigset_t old_set, new_set; 2376 int error; 2377 2378 /* XXX: Don't preclude handling different sized sigset_t's. */ 2379 if (sigsetsize != sizeof(sigset_t)) 2380 return -EINVAL; 2381 2382 old_set = current->blocked; 2383 2384 if (nset) { 2385 if (copy_from_user(&new_set, nset, sizeof(sigset_t))) 2386 return -EFAULT; 2387 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2388 2389 error = sigprocmask(how, &new_set, NULL); 2390 if (error) 2391 return error; 2392 } 2393 2394 if (oset) { 2395 if (copy_to_user(oset, &old_set, sizeof(sigset_t))) 2396 return -EFAULT; 2397 } 2398 2399 return 0; 2400 } 2401 2402 long do_sigpending(void __user *set, unsigned long sigsetsize) 2403 { 2404 long error = -EINVAL; 2405 sigset_t pending; 2406 2407 if (sigsetsize > sizeof(sigset_t)) 2408 goto out; 2409 2410 spin_lock_irq(¤t->sighand->siglock); 2411 sigorsets(&pending, ¤t->pending.signal, 2412 ¤t->signal->shared_pending.signal); 2413 spin_unlock_irq(¤t->sighand->siglock); 2414 2415 /* Outside the lock because only this thread touches it. */ 2416 sigandsets(&pending, ¤t->blocked, &pending); 2417 2418 error = -EFAULT; 2419 if (!copy_to_user(set, &pending, sigsetsize)) 2420 error = 0; 2421 2422 out: 2423 return error; 2424 } 2425 2426 /** 2427 * sys_rt_sigpending - examine a pending signal that has been raised 2428 * while blocked 2429 * @set: stores pending signals 2430 * @sigsetsize: size of sigset_t type or larger 2431 */ 2432 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) 2433 { 2434 return do_sigpending(set, sigsetsize); 2435 } 2436 2437 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER 2438 2439 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) 2440 { 2441 int err; 2442 2443 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) 2444 return -EFAULT; 2445 if (from->si_code < 0) 2446 return __copy_to_user(to, from, sizeof(siginfo_t)) 2447 ? -EFAULT : 0; 2448 /* 2449 * If you change siginfo_t structure, please be sure 2450 * this code is fixed accordingly. 2451 * Please remember to update the signalfd_copyinfo() function 2452 * inside fs/signalfd.c too, in case siginfo_t changes. 2453 * It should never copy any pad contained in the structure 2454 * to avoid security leaks, but must copy the generic 2455 * 3 ints plus the relevant union member. 2456 */ 2457 err = __put_user(from->si_signo, &to->si_signo); 2458 err |= __put_user(from->si_errno, &to->si_errno); 2459 err |= __put_user((short)from->si_code, &to->si_code); 2460 switch (from->si_code & __SI_MASK) { 2461 case __SI_KILL: 2462 err |= __put_user(from->si_pid, &to->si_pid); 2463 err |= __put_user(from->si_uid, &to->si_uid); 2464 break; 2465 case __SI_TIMER: 2466 err |= __put_user(from->si_tid, &to->si_tid); 2467 err |= __put_user(from->si_overrun, &to->si_overrun); 2468 err |= __put_user(from->si_ptr, &to->si_ptr); 2469 break; 2470 case __SI_POLL: 2471 err |= __put_user(from->si_band, &to->si_band); 2472 err |= __put_user(from->si_fd, &to->si_fd); 2473 break; 2474 case __SI_FAULT: 2475 err |= __put_user(from->si_addr, &to->si_addr); 2476 #ifdef __ARCH_SI_TRAPNO 2477 err |= __put_user(from->si_trapno, &to->si_trapno); 2478 #endif 2479 #ifdef BUS_MCEERR_AO 2480 /* 2481 * Other callers might not initialize the si_lsb field, 2482 * so check explicitly for the right codes here. 2483 */ 2484 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 2485 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 2486 #endif 2487 break; 2488 case __SI_CHLD: 2489 err |= __put_user(from->si_pid, &to->si_pid); 2490 err |= __put_user(from->si_uid, &to->si_uid); 2491 err |= __put_user(from->si_status, &to->si_status); 2492 err |= __put_user(from->si_utime, &to->si_utime); 2493 err |= __put_user(from->si_stime, &to->si_stime); 2494 break; 2495 case __SI_RT: /* This is not generated by the kernel as of now. */ 2496 case __SI_MESGQ: /* But this is */ 2497 err |= __put_user(from->si_pid, &to->si_pid); 2498 err |= __put_user(from->si_uid, &to->si_uid); 2499 err |= __put_user(from->si_ptr, &to->si_ptr); 2500 break; 2501 default: /* this is just in case for now ... */ 2502 err |= __put_user(from->si_pid, &to->si_pid); 2503 err |= __put_user(from->si_uid, &to->si_uid); 2504 break; 2505 } 2506 return err; 2507 } 2508 2509 #endif 2510 2511 /** 2512 * do_sigtimedwait - wait for queued signals specified in @which 2513 * @which: queued signals to wait for 2514 * @info: if non-null, the signal's siginfo is returned here 2515 * @ts: upper bound on process time suspension 2516 */ 2517 int do_sigtimedwait(const sigset_t *which, siginfo_t *info, 2518 const struct timespec *ts) 2519 { 2520 struct task_struct *tsk = current; 2521 long timeout = MAX_SCHEDULE_TIMEOUT; 2522 sigset_t mask = *which; 2523 int sig; 2524 2525 if (ts) { 2526 if (!timespec_valid(ts)) 2527 return -EINVAL; 2528 timeout = timespec_to_jiffies(ts); 2529 /* 2530 * We can be close to the next tick, add another one 2531 * to ensure we will wait at least the time asked for. 2532 */ 2533 if (ts->tv_sec || ts->tv_nsec) 2534 timeout++; 2535 } 2536 2537 /* 2538 * Invert the set of allowed signals to get those we want to block. 2539 */ 2540 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); 2541 signotset(&mask); 2542 2543 spin_lock_irq(&tsk->sighand->siglock); 2544 sig = dequeue_signal(tsk, &mask, info); 2545 if (!sig && timeout) { 2546 /* 2547 * None ready, temporarily unblock those we're interested 2548 * while we are sleeping in so that we'll be awakened when 2549 * they arrive. Unblocking is always fine, we can avoid 2550 * set_current_blocked(). 2551 */ 2552 tsk->real_blocked = tsk->blocked; 2553 sigandsets(&tsk->blocked, &tsk->blocked, &mask); 2554 recalc_sigpending(); 2555 spin_unlock_irq(&tsk->sighand->siglock); 2556 2557 timeout = schedule_timeout_interruptible(timeout); 2558 2559 spin_lock_irq(&tsk->sighand->siglock); 2560 __set_task_blocked(tsk, &tsk->real_blocked); 2561 siginitset(&tsk->real_blocked, 0); 2562 sig = dequeue_signal(tsk, &mask, info); 2563 } 2564 spin_unlock_irq(&tsk->sighand->siglock); 2565 2566 if (sig) 2567 return sig; 2568 return timeout ? -EINTR : -EAGAIN; 2569 } 2570 2571 /** 2572 * sys_rt_sigtimedwait - synchronously wait for queued signals specified 2573 * in @uthese 2574 * @uthese: queued signals to wait for 2575 * @uinfo: if non-null, the signal's siginfo is returned here 2576 * @uts: upper bound on process time suspension 2577 * @sigsetsize: size of sigset_t type 2578 */ 2579 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, 2580 siginfo_t __user *, uinfo, const struct timespec __user *, uts, 2581 size_t, sigsetsize) 2582 { 2583 sigset_t these; 2584 struct timespec ts; 2585 siginfo_t info; 2586 int ret; 2587 2588 /* XXX: Don't preclude handling different sized sigset_t's. */ 2589 if (sigsetsize != sizeof(sigset_t)) 2590 return -EINVAL; 2591 2592 if (copy_from_user(&these, uthese, sizeof(these))) 2593 return -EFAULT; 2594 2595 if (uts) { 2596 if (copy_from_user(&ts, uts, sizeof(ts))) 2597 return -EFAULT; 2598 } 2599 2600 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); 2601 2602 if (ret > 0 && uinfo) { 2603 if (copy_siginfo_to_user(uinfo, &info)) 2604 ret = -EFAULT; 2605 } 2606 2607 return ret; 2608 } 2609 2610 /** 2611 * sys_kill - send a signal to a process 2612 * @pid: the PID of the process 2613 * @sig: signal to be sent 2614 */ 2615 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) 2616 { 2617 struct siginfo info; 2618 2619 info.si_signo = sig; 2620 info.si_errno = 0; 2621 info.si_code = SI_USER; 2622 info.si_pid = task_tgid_vnr(current); 2623 info.si_uid = current_uid(); 2624 2625 return kill_something_info(sig, &info, pid); 2626 } 2627 2628 static int 2629 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) 2630 { 2631 struct task_struct *p; 2632 int error = -ESRCH; 2633 2634 rcu_read_lock(); 2635 p = find_task_by_vpid(pid); 2636 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { 2637 error = check_kill_permission(sig, info, p); 2638 /* 2639 * The null signal is a permissions and process existence 2640 * probe. No signal is actually delivered. 2641 */ 2642 if (!error && sig) { 2643 error = do_send_sig_info(sig, info, p, false); 2644 /* 2645 * If lock_task_sighand() failed we pretend the task 2646 * dies after receiving the signal. The window is tiny, 2647 * and the signal is private anyway. 2648 */ 2649 if (unlikely(error == -ESRCH)) 2650 error = 0; 2651 } 2652 } 2653 rcu_read_unlock(); 2654 2655 return error; 2656 } 2657 2658 static int do_tkill(pid_t tgid, pid_t pid, int sig) 2659 { 2660 struct siginfo info; 2661 2662 info.si_signo = sig; 2663 info.si_errno = 0; 2664 info.si_code = SI_TKILL; 2665 info.si_pid = task_tgid_vnr(current); 2666 info.si_uid = current_uid(); 2667 2668 return do_send_specific(tgid, pid, sig, &info); 2669 } 2670 2671 /** 2672 * sys_tgkill - send signal to one specific thread 2673 * @tgid: the thread group ID of the thread 2674 * @pid: the PID of the thread 2675 * @sig: signal to be sent 2676 * 2677 * This syscall also checks the @tgid and returns -ESRCH even if the PID 2678 * exists but it's not belonging to the target process anymore. This 2679 * method solves the problem of threads exiting and PIDs getting reused. 2680 */ 2681 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) 2682 { 2683 /* This is only valid for single tasks */ 2684 if (pid <= 0 || tgid <= 0) 2685 return -EINVAL; 2686 2687 return do_tkill(tgid, pid, sig); 2688 } 2689 2690 /** 2691 * sys_tkill - send signal to one specific task 2692 * @pid: the PID of the task 2693 * @sig: signal to be sent 2694 * 2695 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2696 */ 2697 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) 2698 { 2699 /* This is only valid for single tasks */ 2700 if (pid <= 0) 2701 return -EINVAL; 2702 2703 return do_tkill(0, pid, sig); 2704 } 2705 2706 /** 2707 * sys_rt_sigqueueinfo - send signal information to a signal 2708 * @pid: the PID of the thread 2709 * @sig: signal to be sent 2710 * @uinfo: signal info to be sent 2711 */ 2712 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, 2713 siginfo_t __user *, uinfo) 2714 { 2715 siginfo_t info; 2716 2717 if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) 2718 return -EFAULT; 2719 2720 /* Not even root can pretend to send signals from the kernel. 2721 * Nor can they impersonate a kill()/tgkill(), which adds source info. 2722 */ 2723 if (info.si_code >= 0 || info.si_code == SI_TKILL) { 2724 /* We used to allow any < 0 si_code */ 2725 WARN_ON_ONCE(info.si_code < 0); 2726 return -EPERM; 2727 } 2728 info.si_signo = sig; 2729 2730 /* POSIX.1b doesn't mention process groups. */ 2731 return kill_proc_info(sig, &info, pid); 2732 } 2733 2734 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) 2735 { 2736 /* This is only valid for single tasks */ 2737 if (pid <= 0 || tgid <= 0) 2738 return -EINVAL; 2739 2740 /* Not even root can pretend to send signals from the kernel. 2741 * Nor can they impersonate a kill()/tgkill(), which adds source info. 2742 */ 2743 if (info->si_code >= 0 || info->si_code == SI_TKILL) { 2744 /* We used to allow any < 0 si_code */ 2745 WARN_ON_ONCE(info->si_code < 0); 2746 return -EPERM; 2747 } 2748 info->si_signo = sig; 2749 2750 return do_send_specific(tgid, pid, sig, info); 2751 } 2752 2753 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, 2754 siginfo_t __user *, uinfo) 2755 { 2756 siginfo_t info; 2757 2758 if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) 2759 return -EFAULT; 2760 2761 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 2762 } 2763 2764 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 2765 { 2766 struct task_struct *t = current; 2767 struct k_sigaction *k; 2768 sigset_t mask; 2769 2770 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 2771 return -EINVAL; 2772 2773 k = &t->sighand->action[sig-1]; 2774 2775 spin_lock_irq(¤t->sighand->siglock); 2776 if (oact) 2777 *oact = *k; 2778 2779 if (act) { 2780 sigdelsetmask(&act->sa.sa_mask, 2781 sigmask(SIGKILL) | sigmask(SIGSTOP)); 2782 *k = *act; 2783 /* 2784 * POSIX 3.3.1.3: 2785 * "Setting a signal action to SIG_IGN for a signal that is 2786 * pending shall cause the pending signal to be discarded, 2787 * whether or not it is blocked." 2788 * 2789 * "Setting a signal action to SIG_DFL for a signal that is 2790 * pending and whose default action is to ignore the signal 2791 * (for example, SIGCHLD), shall cause the pending signal to 2792 * be discarded, whether or not it is blocked" 2793 */ 2794 if (sig_handler_ignored(sig_handler(t, sig), sig)) { 2795 sigemptyset(&mask); 2796 sigaddset(&mask, sig); 2797 rm_from_queue_full(&mask, &t->signal->shared_pending); 2798 do { 2799 rm_from_queue_full(&mask, &t->pending); 2800 t = next_thread(t); 2801 } while (t != current); 2802 } 2803 } 2804 2805 spin_unlock_irq(¤t->sighand->siglock); 2806 return 0; 2807 } 2808 2809 int 2810 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) 2811 { 2812 stack_t oss; 2813 int error; 2814 2815 oss.ss_sp = (void __user *) current->sas_ss_sp; 2816 oss.ss_size = current->sas_ss_size; 2817 oss.ss_flags = sas_ss_flags(sp); 2818 2819 if (uss) { 2820 void __user *ss_sp; 2821 size_t ss_size; 2822 int ss_flags; 2823 2824 error = -EFAULT; 2825 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) 2826 goto out; 2827 error = __get_user(ss_sp, &uss->ss_sp) | 2828 __get_user(ss_flags, &uss->ss_flags) | 2829 __get_user(ss_size, &uss->ss_size); 2830 if (error) 2831 goto out; 2832 2833 error = -EPERM; 2834 if (on_sig_stack(sp)) 2835 goto out; 2836 2837 error = -EINVAL; 2838 /* 2839 * Note - this code used to test ss_flags incorrectly: 2840 * old code may have been written using ss_flags==0 2841 * to mean ss_flags==SS_ONSTACK (as this was the only 2842 * way that worked) - this fix preserves that older 2843 * mechanism. 2844 */ 2845 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) 2846 goto out; 2847 2848 if (ss_flags == SS_DISABLE) { 2849 ss_size = 0; 2850 ss_sp = NULL; 2851 } else { 2852 error = -ENOMEM; 2853 if (ss_size < MINSIGSTKSZ) 2854 goto out; 2855 } 2856 2857 current->sas_ss_sp = (unsigned long) ss_sp; 2858 current->sas_ss_size = ss_size; 2859 } 2860 2861 error = 0; 2862 if (uoss) { 2863 error = -EFAULT; 2864 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) 2865 goto out; 2866 error = __put_user(oss.ss_sp, &uoss->ss_sp) | 2867 __put_user(oss.ss_size, &uoss->ss_size) | 2868 __put_user(oss.ss_flags, &uoss->ss_flags); 2869 } 2870 2871 out: 2872 return error; 2873 } 2874 2875 #ifdef __ARCH_WANT_SYS_SIGPENDING 2876 2877 /** 2878 * sys_sigpending - examine pending signals 2879 * @set: where mask of pending signal is returned 2880 */ 2881 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) 2882 { 2883 return do_sigpending(set, sizeof(*set)); 2884 } 2885 2886 #endif 2887 2888 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 2889 /** 2890 * sys_sigprocmask - examine and change blocked signals 2891 * @how: whether to add, remove, or set signals 2892 * @nset: signals to add or remove (if non-null) 2893 * @oset: previous value of signal mask if non-null 2894 * 2895 * Some platforms have their own version with special arguments; 2896 * others support only sys_rt_sigprocmask. 2897 */ 2898 2899 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, 2900 old_sigset_t __user *, oset) 2901 { 2902 old_sigset_t old_set, new_set; 2903 sigset_t new_blocked; 2904 2905 old_set = current->blocked.sig[0]; 2906 2907 if (nset) { 2908 if (copy_from_user(&new_set, nset, sizeof(*nset))) 2909 return -EFAULT; 2910 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); 2911 2912 new_blocked = current->blocked; 2913 2914 switch (how) { 2915 case SIG_BLOCK: 2916 sigaddsetmask(&new_blocked, new_set); 2917 break; 2918 case SIG_UNBLOCK: 2919 sigdelsetmask(&new_blocked, new_set); 2920 break; 2921 case SIG_SETMASK: 2922 new_blocked.sig[0] = new_set; 2923 break; 2924 default: 2925 return -EINVAL; 2926 } 2927 2928 set_current_blocked(&new_blocked); 2929 } 2930 2931 if (oset) { 2932 if (copy_to_user(oset, &old_set, sizeof(*oset))) 2933 return -EFAULT; 2934 } 2935 2936 return 0; 2937 } 2938 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 2939 2940 #ifdef __ARCH_WANT_SYS_RT_SIGACTION 2941 /** 2942 * sys_rt_sigaction - alter an action taken by a process 2943 * @sig: signal to be sent 2944 * @act: new sigaction 2945 * @oact: used to save the previous sigaction 2946 * @sigsetsize: size of sigset_t type 2947 */ 2948 SYSCALL_DEFINE4(rt_sigaction, int, sig, 2949 const struct sigaction __user *, act, 2950 struct sigaction __user *, oact, 2951 size_t, sigsetsize) 2952 { 2953 struct k_sigaction new_sa, old_sa; 2954 int ret = -EINVAL; 2955 2956 /* XXX: Don't preclude handling different sized sigset_t's. */ 2957 if (sigsetsize != sizeof(sigset_t)) 2958 goto out; 2959 2960 if (act) { 2961 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 2962 return -EFAULT; 2963 } 2964 2965 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 2966 2967 if (!ret && oact) { 2968 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 2969 return -EFAULT; 2970 } 2971 out: 2972 return ret; 2973 } 2974 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ 2975 2976 #ifdef __ARCH_WANT_SYS_SGETMASK 2977 2978 /* 2979 * For backwards compatibility. Functionality superseded by sigprocmask. 2980 */ 2981 SYSCALL_DEFINE0(sgetmask) 2982 { 2983 /* SMP safe */ 2984 return current->blocked.sig[0]; 2985 } 2986 2987 SYSCALL_DEFINE1(ssetmask, int, newmask) 2988 { 2989 int old; 2990 2991 spin_lock_irq(¤t->sighand->siglock); 2992 old = current->blocked.sig[0]; 2993 2994 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| 2995 sigmask(SIGSTOP))); 2996 recalc_sigpending(); 2997 spin_unlock_irq(¤t->sighand->siglock); 2998 2999 return old; 3000 } 3001 #endif /* __ARCH_WANT_SGETMASK */ 3002 3003 #ifdef __ARCH_WANT_SYS_SIGNAL 3004 /* 3005 * For backwards compatibility. Functionality superseded by sigaction. 3006 */ 3007 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) 3008 { 3009 struct k_sigaction new_sa, old_sa; 3010 int ret; 3011 3012 new_sa.sa.sa_handler = handler; 3013 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 3014 sigemptyset(&new_sa.sa.sa_mask); 3015 3016 ret = do_sigaction(sig, &new_sa, &old_sa); 3017 3018 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 3019 } 3020 #endif /* __ARCH_WANT_SYS_SIGNAL */ 3021 3022 #ifdef __ARCH_WANT_SYS_PAUSE 3023 3024 SYSCALL_DEFINE0(pause) 3025 { 3026 while (!signal_pending(current)) { 3027 current->state = TASK_INTERRUPTIBLE; 3028 schedule(); 3029 } 3030 return -ERESTARTNOHAND; 3031 } 3032 3033 #endif 3034 3035 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND 3036 /** 3037 * sys_rt_sigsuspend - replace the signal mask for a value with the 3038 * @unewset value until a signal is received 3039 * @unewset: new signal mask value 3040 * @sigsetsize: size of sigset_t type 3041 */ 3042 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) 3043 { 3044 sigset_t newset; 3045 3046 /* XXX: Don't preclude handling different sized sigset_t's. */ 3047 if (sigsetsize != sizeof(sigset_t)) 3048 return -EINVAL; 3049 3050 if (copy_from_user(&newset, unewset, sizeof(newset))) 3051 return -EFAULT; 3052 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 3053 3054 spin_lock_irq(¤t->sighand->siglock); 3055 current->saved_sigmask = current->blocked; 3056 current->blocked = newset; 3057 recalc_sigpending(); 3058 spin_unlock_irq(¤t->sighand->siglock); 3059 3060 current->state = TASK_INTERRUPTIBLE; 3061 schedule(); 3062 set_restore_sigmask(); 3063 return -ERESTARTNOHAND; 3064 } 3065 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ 3066 3067 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) 3068 { 3069 return NULL; 3070 } 3071 3072 void __init signals_init(void) 3073 { 3074 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); 3075 } 3076 3077 #ifdef CONFIG_KGDB_KDB 3078 #include <linux/kdb.h> 3079 /* 3080 * kdb_send_sig_info - Allows kdb to send signals without exposing 3081 * signal internals. This function checks if the required locks are 3082 * available before calling the main signal code, to avoid kdb 3083 * deadlocks. 3084 */ 3085 void 3086 kdb_send_sig_info(struct task_struct *t, struct siginfo *info) 3087 { 3088 static struct task_struct *kdb_prev_t; 3089 int sig, new_t; 3090 if (!spin_trylock(&t->sighand->siglock)) { 3091 kdb_printf("Can't do kill command now.\n" 3092 "The sigmask lock is held somewhere else in " 3093 "kernel, try again later\n"); 3094 return; 3095 } 3096 spin_unlock(&t->sighand->siglock); 3097 new_t = kdb_prev_t != t; 3098 kdb_prev_t = t; 3099 if (t->state != TASK_RUNNING && new_t) { 3100 kdb_printf("Process is not RUNNING, sending a signal from " 3101 "kdb risks deadlock\n" 3102 "on the run queue locks. " 3103 "The signal has _not_ been sent.\n" 3104 "Reissue the kill command if you want to risk " 3105 "the deadlock.\n"); 3106 return; 3107 } 3108 sig = info->si_signo; 3109 if (send_sig_info(sig, info, t)) 3110 kdb_printf("Fail to deliver Signal %d to process %d.\n", 3111 sig, t->pid); 3112 else 3113 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); 3114 } 3115 #endif /* CONFIG_KGDB_KDB */ 3116