1 /* 2 * linux/kernel/signal.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson 7 * 8 * 2003-06-02 Jim Houston - Concurrent Computer Corp. 9 * Changes to use preallocated sigqueue structures 10 * to allow signals to be sent reliably. 11 */ 12 13 #include <linux/slab.h> 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/sched.h> 17 #include <linux/fs.h> 18 #include <linux/tty.h> 19 #include <linux/binfmts.h> 20 #include <linux/security.h> 21 #include <linux/syscalls.h> 22 #include <linux/ptrace.h> 23 #include <linux/signal.h> 24 #include <linux/signalfd.h> 25 #include <linux/ratelimit.h> 26 #include <linux/tracehook.h> 27 #include <linux/capability.h> 28 #include <linux/freezer.h> 29 #include <linux/pid_namespace.h> 30 #include <linux/nsproxy.h> 31 #define CREATE_TRACE_POINTS 32 #include <trace/events/signal.h> 33 34 #include <asm/param.h> 35 #include <asm/uaccess.h> 36 #include <asm/unistd.h> 37 #include <asm/siginfo.h> 38 #include "audit.h" /* audit_signal_info() */ 39 40 /* 41 * SLAB caches for signal bits. 42 */ 43 44 static struct kmem_cache *sigqueue_cachep; 45 46 int print_fatal_signals __read_mostly; 47 48 static void __user *sig_handler(struct task_struct *t, int sig) 49 { 50 return t->sighand->action[sig - 1].sa.sa_handler; 51 } 52 53 static int sig_handler_ignored(void __user *handler, int sig) 54 { 55 /* Is it explicitly or implicitly ignored? */ 56 return handler == SIG_IGN || 57 (handler == SIG_DFL && sig_kernel_ignore(sig)); 58 } 59 60 static int sig_task_ignored(struct task_struct *t, int sig, 61 int from_ancestor_ns) 62 { 63 void __user *handler; 64 65 handler = sig_handler(t, sig); 66 67 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && 68 handler == SIG_DFL && !from_ancestor_ns) 69 return 1; 70 71 return sig_handler_ignored(handler, sig); 72 } 73 74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns) 75 { 76 /* 77 * Blocked signals are never ignored, since the 78 * signal handler may change by the time it is 79 * unblocked. 80 */ 81 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 82 return 0; 83 84 if (!sig_task_ignored(t, sig, from_ancestor_ns)) 85 return 0; 86 87 /* 88 * Tracers may want to know about even ignored signals. 89 */ 90 return !tracehook_consider_ignored_signal(t, sig); 91 } 92 93 /* 94 * Re-calculate pending state from the set of locally pending 95 * signals, globally pending signals, and blocked signals. 96 */ 97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) 98 { 99 unsigned long ready; 100 long i; 101 102 switch (_NSIG_WORDS) { 103 default: 104 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) 105 ready |= signal->sig[i] &~ blocked->sig[i]; 106 break; 107 108 case 4: ready = signal->sig[3] &~ blocked->sig[3]; 109 ready |= signal->sig[2] &~ blocked->sig[2]; 110 ready |= signal->sig[1] &~ blocked->sig[1]; 111 ready |= signal->sig[0] &~ blocked->sig[0]; 112 break; 113 114 case 2: ready = signal->sig[1] &~ blocked->sig[1]; 115 ready |= signal->sig[0] &~ blocked->sig[0]; 116 break; 117 118 case 1: ready = signal->sig[0] &~ blocked->sig[0]; 119 } 120 return ready != 0; 121 } 122 123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 124 125 static int recalc_sigpending_tsk(struct task_struct *t) 126 { 127 if (t->signal->group_stop_count > 0 || 128 PENDING(&t->pending, &t->blocked) || 129 PENDING(&t->signal->shared_pending, &t->blocked)) { 130 set_tsk_thread_flag(t, TIF_SIGPENDING); 131 return 1; 132 } 133 /* 134 * We must never clear the flag in another thread, or in current 135 * when it's possible the current syscall is returning -ERESTART*. 136 * So we don't clear it here, and only callers who know they should do. 137 */ 138 return 0; 139 } 140 141 /* 142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. 143 * This is superfluous when called on current, the wakeup is a harmless no-op. 144 */ 145 void recalc_sigpending_and_wake(struct task_struct *t) 146 { 147 if (recalc_sigpending_tsk(t)) 148 signal_wake_up(t, 0); 149 } 150 151 void recalc_sigpending(void) 152 { 153 if (unlikely(tracehook_force_sigpending())) 154 set_thread_flag(TIF_SIGPENDING); 155 else if (!recalc_sigpending_tsk(current) && !freezing(current)) 156 clear_thread_flag(TIF_SIGPENDING); 157 158 } 159 160 /* Given the mask, find the first available signal that should be serviced. */ 161 162 #define SYNCHRONOUS_MASK \ 163 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ 164 sigmask(SIGTRAP) | sigmask(SIGFPE)) 165 166 int next_signal(struct sigpending *pending, sigset_t *mask) 167 { 168 unsigned long i, *s, *m, x; 169 int sig = 0; 170 171 s = pending->signal.sig; 172 m = mask->sig; 173 174 /* 175 * Handle the first word specially: it contains the 176 * synchronous signals that need to be dequeued first. 177 */ 178 x = *s &~ *m; 179 if (x) { 180 if (x & SYNCHRONOUS_MASK) 181 x &= SYNCHRONOUS_MASK; 182 sig = ffz(~x) + 1; 183 return sig; 184 } 185 186 switch (_NSIG_WORDS) { 187 default: 188 for (i = 1; i < _NSIG_WORDS; ++i) { 189 x = *++s &~ *++m; 190 if (!x) 191 continue; 192 sig = ffz(~x) + i*_NSIG_BPW + 1; 193 break; 194 } 195 break; 196 197 case 2: 198 x = s[1] &~ m[1]; 199 if (!x) 200 break; 201 sig = ffz(~x) + _NSIG_BPW + 1; 202 break; 203 204 case 1: 205 /* Nothing to do */ 206 break; 207 } 208 209 return sig; 210 } 211 212 static inline void print_dropped_signal(int sig) 213 { 214 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); 215 216 if (!print_fatal_signals) 217 return; 218 219 if (!__ratelimit(&ratelimit_state)) 220 return; 221 222 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", 223 current->comm, current->pid, sig); 224 } 225 226 /* 227 * allocate a new signal queue record 228 * - this may be called without locks if and only if t == current, otherwise an 229 * appropriate lock must be held to stop the target task from exiting 230 */ 231 static struct sigqueue * 232 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) 233 { 234 struct sigqueue *q = NULL; 235 struct user_struct *user; 236 237 /* 238 * Protect access to @t credentials. This can go away when all 239 * callers hold rcu read lock. 240 */ 241 rcu_read_lock(); 242 user = get_uid(__task_cred(t)->user); 243 atomic_inc(&user->sigpending); 244 rcu_read_unlock(); 245 246 if (override_rlimit || 247 atomic_read(&user->sigpending) <= 248 task_rlimit(t, RLIMIT_SIGPENDING)) { 249 q = kmem_cache_alloc(sigqueue_cachep, flags); 250 } else { 251 print_dropped_signal(sig); 252 } 253 254 if (unlikely(q == NULL)) { 255 atomic_dec(&user->sigpending); 256 free_uid(user); 257 } else { 258 INIT_LIST_HEAD(&q->list); 259 q->flags = 0; 260 q->user = user; 261 } 262 263 return q; 264 } 265 266 static void __sigqueue_free(struct sigqueue *q) 267 { 268 if (q->flags & SIGQUEUE_PREALLOC) 269 return; 270 atomic_dec(&q->user->sigpending); 271 free_uid(q->user); 272 kmem_cache_free(sigqueue_cachep, q); 273 } 274 275 void flush_sigqueue(struct sigpending *queue) 276 { 277 struct sigqueue *q; 278 279 sigemptyset(&queue->signal); 280 while (!list_empty(&queue->list)) { 281 q = list_entry(queue->list.next, struct sigqueue , list); 282 list_del_init(&q->list); 283 __sigqueue_free(q); 284 } 285 } 286 287 /* 288 * Flush all pending signals for a task. 289 */ 290 void __flush_signals(struct task_struct *t) 291 { 292 clear_tsk_thread_flag(t, TIF_SIGPENDING); 293 flush_sigqueue(&t->pending); 294 flush_sigqueue(&t->signal->shared_pending); 295 } 296 297 void flush_signals(struct task_struct *t) 298 { 299 unsigned long flags; 300 301 spin_lock_irqsave(&t->sighand->siglock, flags); 302 __flush_signals(t); 303 spin_unlock_irqrestore(&t->sighand->siglock, flags); 304 } 305 306 static void __flush_itimer_signals(struct sigpending *pending) 307 { 308 sigset_t signal, retain; 309 struct sigqueue *q, *n; 310 311 signal = pending->signal; 312 sigemptyset(&retain); 313 314 list_for_each_entry_safe(q, n, &pending->list, list) { 315 int sig = q->info.si_signo; 316 317 if (likely(q->info.si_code != SI_TIMER)) { 318 sigaddset(&retain, sig); 319 } else { 320 sigdelset(&signal, sig); 321 list_del_init(&q->list); 322 __sigqueue_free(q); 323 } 324 } 325 326 sigorsets(&pending->signal, &signal, &retain); 327 } 328 329 void flush_itimer_signals(void) 330 { 331 struct task_struct *tsk = current; 332 unsigned long flags; 333 334 spin_lock_irqsave(&tsk->sighand->siglock, flags); 335 __flush_itimer_signals(&tsk->pending); 336 __flush_itimer_signals(&tsk->signal->shared_pending); 337 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); 338 } 339 340 void ignore_signals(struct task_struct *t) 341 { 342 int i; 343 344 for (i = 0; i < _NSIG; ++i) 345 t->sighand->action[i].sa.sa_handler = SIG_IGN; 346 347 flush_signals(t); 348 } 349 350 /* 351 * Flush all handlers for a task. 352 */ 353 354 void 355 flush_signal_handlers(struct task_struct *t, int force_default) 356 { 357 int i; 358 struct k_sigaction *ka = &t->sighand->action[0]; 359 for (i = _NSIG ; i != 0 ; i--) { 360 if (force_default || ka->sa.sa_handler != SIG_IGN) 361 ka->sa.sa_handler = SIG_DFL; 362 ka->sa.sa_flags = 0; 363 sigemptyset(&ka->sa.sa_mask); 364 ka++; 365 } 366 } 367 368 int unhandled_signal(struct task_struct *tsk, int sig) 369 { 370 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; 371 if (is_global_init(tsk)) 372 return 1; 373 if (handler != SIG_IGN && handler != SIG_DFL) 374 return 0; 375 return !tracehook_consider_fatal_signal(tsk, sig); 376 } 377 378 /* 379 * Notify the system that a driver wants to block all signals for this 380 * process, and wants to be notified if any signals at all were to be 381 * sent/acted upon. If the notifier routine returns non-zero, then the 382 * signal will be acted upon after all. If the notifier routine returns 0, 383 * then then signal will be blocked. Only one block per process is 384 * allowed. priv is a pointer to private data that the notifier routine 385 * can use to determine if the signal should be blocked or not. 386 */ 387 void 388 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) 389 { 390 unsigned long flags; 391 392 spin_lock_irqsave(¤t->sighand->siglock, flags); 393 current->notifier_mask = mask; 394 current->notifier_data = priv; 395 current->notifier = notifier; 396 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 397 } 398 399 /* Notify the system that blocking has ended. */ 400 401 void 402 unblock_all_signals(void) 403 { 404 unsigned long flags; 405 406 spin_lock_irqsave(¤t->sighand->siglock, flags); 407 current->notifier = NULL; 408 current->notifier_data = NULL; 409 recalc_sigpending(); 410 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 411 } 412 413 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) 414 { 415 struct sigqueue *q, *first = NULL; 416 417 /* 418 * Collect the siginfo appropriate to this signal. Check if 419 * there is another siginfo for the same signal. 420 */ 421 list_for_each_entry(q, &list->list, list) { 422 if (q->info.si_signo == sig) { 423 if (first) 424 goto still_pending; 425 first = q; 426 } 427 } 428 429 sigdelset(&list->signal, sig); 430 431 if (first) { 432 still_pending: 433 list_del_init(&first->list); 434 copy_siginfo(info, &first->info); 435 __sigqueue_free(first); 436 } else { 437 /* 438 * Ok, it wasn't in the queue. This must be 439 * a fast-pathed signal or we must have been 440 * out of queue space. So zero out the info. 441 */ 442 info->si_signo = sig; 443 info->si_errno = 0; 444 info->si_code = SI_USER; 445 info->si_pid = 0; 446 info->si_uid = 0; 447 } 448 } 449 450 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 451 siginfo_t *info) 452 { 453 int sig = next_signal(pending, mask); 454 455 if (sig) { 456 if (current->notifier) { 457 if (sigismember(current->notifier_mask, sig)) { 458 if (!(current->notifier)(current->notifier_data)) { 459 clear_thread_flag(TIF_SIGPENDING); 460 return 0; 461 } 462 } 463 } 464 465 collect_signal(sig, pending, info); 466 } 467 468 return sig; 469 } 470 471 /* 472 * Dequeue a signal and return the element to the caller, which is 473 * expected to free it. 474 * 475 * All callers have to hold the siglock. 476 */ 477 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 478 { 479 int signr; 480 481 /* We only dequeue private signals from ourselves, we don't let 482 * signalfd steal them 483 */ 484 signr = __dequeue_signal(&tsk->pending, mask, info); 485 if (!signr) { 486 signr = __dequeue_signal(&tsk->signal->shared_pending, 487 mask, info); 488 /* 489 * itimer signal ? 490 * 491 * itimers are process shared and we restart periodic 492 * itimers in the signal delivery path to prevent DoS 493 * attacks in the high resolution timer case. This is 494 * compliant with the old way of self-restarting 495 * itimers, as the SIGALRM is a legacy signal and only 496 * queued once. Changing the restart behaviour to 497 * restart the timer in the signal dequeue path is 498 * reducing the timer noise on heavy loaded !highres 499 * systems too. 500 */ 501 if (unlikely(signr == SIGALRM)) { 502 struct hrtimer *tmr = &tsk->signal->real_timer; 503 504 if (!hrtimer_is_queued(tmr) && 505 tsk->signal->it_real_incr.tv64 != 0) { 506 hrtimer_forward(tmr, tmr->base->get_time(), 507 tsk->signal->it_real_incr); 508 hrtimer_restart(tmr); 509 } 510 } 511 } 512 513 recalc_sigpending(); 514 if (!signr) 515 return 0; 516 517 if (unlikely(sig_kernel_stop(signr))) { 518 /* 519 * Set a marker that we have dequeued a stop signal. Our 520 * caller might release the siglock and then the pending 521 * stop signal it is about to process is no longer in the 522 * pending bitmasks, but must still be cleared by a SIGCONT 523 * (and overruled by a SIGKILL). So those cases clear this 524 * shared flag after we've set it. Note that this flag may 525 * remain set after the signal we return is ignored or 526 * handled. That doesn't matter because its only purpose 527 * is to alert stop-signal processing code when another 528 * processor has come along and cleared the flag. 529 */ 530 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; 531 } 532 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { 533 /* 534 * Release the siglock to ensure proper locking order 535 * of timer locks outside of siglocks. Note, we leave 536 * irqs disabled here, since the posix-timers code is 537 * about to disable them again anyway. 538 */ 539 spin_unlock(&tsk->sighand->siglock); 540 do_schedule_next_timer(info); 541 spin_lock(&tsk->sighand->siglock); 542 } 543 return signr; 544 } 545 546 /* 547 * Tell a process that it has a new active signal.. 548 * 549 * NOTE! we rely on the previous spin_lock to 550 * lock interrupts for us! We can only be called with 551 * "siglock" held, and the local interrupt must 552 * have been disabled when that got acquired! 553 * 554 * No need to set need_resched since signal event passing 555 * goes through ->blocked 556 */ 557 void signal_wake_up(struct task_struct *t, int resume) 558 { 559 unsigned int mask; 560 561 set_tsk_thread_flag(t, TIF_SIGPENDING); 562 563 /* 564 * For SIGKILL, we want to wake it up in the stopped/traced/killable 565 * case. We don't check t->state here because there is a race with it 566 * executing another processor and just now entering stopped state. 567 * By using wake_up_state, we ensure the process will wake up and 568 * handle its death signal. 569 */ 570 mask = TASK_INTERRUPTIBLE; 571 if (resume) 572 mask |= TASK_WAKEKILL; 573 if (!wake_up_state(t, mask)) 574 kick_process(t); 575 } 576 577 /* 578 * Remove signals in mask from the pending set and queue. 579 * Returns 1 if any signals were found. 580 * 581 * All callers must be holding the siglock. 582 * 583 * This version takes a sigset mask and looks at all signals, 584 * not just those in the first mask word. 585 */ 586 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) 587 { 588 struct sigqueue *q, *n; 589 sigset_t m; 590 591 sigandsets(&m, mask, &s->signal); 592 if (sigisemptyset(&m)) 593 return 0; 594 595 signandsets(&s->signal, &s->signal, mask); 596 list_for_each_entry_safe(q, n, &s->list, list) { 597 if (sigismember(mask, q->info.si_signo)) { 598 list_del_init(&q->list); 599 __sigqueue_free(q); 600 } 601 } 602 return 1; 603 } 604 /* 605 * Remove signals in mask from the pending set and queue. 606 * Returns 1 if any signals were found. 607 * 608 * All callers must be holding the siglock. 609 */ 610 static int rm_from_queue(unsigned long mask, struct sigpending *s) 611 { 612 struct sigqueue *q, *n; 613 614 if (!sigtestsetmask(&s->signal, mask)) 615 return 0; 616 617 sigdelsetmask(&s->signal, mask); 618 list_for_each_entry_safe(q, n, &s->list, list) { 619 if (q->info.si_signo < SIGRTMIN && 620 (mask & sigmask(q->info.si_signo))) { 621 list_del_init(&q->list); 622 __sigqueue_free(q); 623 } 624 } 625 return 1; 626 } 627 628 static inline int is_si_special(const struct siginfo *info) 629 { 630 return info <= SEND_SIG_FORCED; 631 } 632 633 static inline bool si_fromuser(const struct siginfo *info) 634 { 635 return info == SEND_SIG_NOINFO || 636 (!is_si_special(info) && SI_FROMUSER(info)); 637 } 638 639 /* 640 * called with RCU read lock from check_kill_permission() 641 */ 642 static int kill_ok_by_cred(struct task_struct *t) 643 { 644 const struct cred *cred = current_cred(); 645 const struct cred *tcred = __task_cred(t); 646 647 if (cred->user->user_ns == tcred->user->user_ns && 648 (cred->euid == tcred->suid || 649 cred->euid == tcred->uid || 650 cred->uid == tcred->suid || 651 cred->uid == tcred->uid)) 652 return 1; 653 654 if (ns_capable(tcred->user->user_ns, CAP_KILL)) 655 return 1; 656 657 return 0; 658 } 659 660 /* 661 * Bad permissions for sending the signal 662 * - the caller must hold the RCU read lock 663 */ 664 static int check_kill_permission(int sig, struct siginfo *info, 665 struct task_struct *t) 666 { 667 struct pid *sid; 668 int error; 669 670 if (!valid_signal(sig)) 671 return -EINVAL; 672 673 if (!si_fromuser(info)) 674 return 0; 675 676 error = audit_signal_info(sig, t); /* Let audit system see the signal */ 677 if (error) 678 return error; 679 680 if (!same_thread_group(current, t) && 681 !kill_ok_by_cred(t)) { 682 switch (sig) { 683 case SIGCONT: 684 sid = task_session(t); 685 /* 686 * We don't return the error if sid == NULL. The 687 * task was unhashed, the caller must notice this. 688 */ 689 if (!sid || sid == task_session(current)) 690 break; 691 default: 692 return -EPERM; 693 } 694 } 695 696 return security_task_kill(t, info, sig, 0); 697 } 698 699 /* 700 * Handle magic process-wide effects of stop/continue signals. Unlike 701 * the signal actions, these happen immediately at signal-generation 702 * time regardless of blocking, ignoring, or handling. This does the 703 * actual continuing for SIGCONT, but not the actual stopping for stop 704 * signals. The process stop is done as a signal action for SIG_DFL. 705 * 706 * Returns true if the signal should be actually delivered, otherwise 707 * it should be dropped. 708 */ 709 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) 710 { 711 struct signal_struct *signal = p->signal; 712 struct task_struct *t; 713 714 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { 715 /* 716 * The process is in the middle of dying, nothing to do. 717 */ 718 } else if (sig_kernel_stop(sig)) { 719 /* 720 * This is a stop signal. Remove SIGCONT from all queues. 721 */ 722 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); 723 t = p; 724 do { 725 rm_from_queue(sigmask(SIGCONT), &t->pending); 726 } while_each_thread(p, t); 727 } else if (sig == SIGCONT) { 728 unsigned int why; 729 /* 730 * Remove all stop signals from all queues, 731 * and wake all threads. 732 */ 733 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); 734 t = p; 735 do { 736 unsigned int state; 737 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); 738 /* 739 * If there is a handler for SIGCONT, we must make 740 * sure that no thread returns to user mode before 741 * we post the signal, in case it was the only 742 * thread eligible to run the signal handler--then 743 * it must not do anything between resuming and 744 * running the handler. With the TIF_SIGPENDING 745 * flag set, the thread will pause and acquire the 746 * siglock that we hold now and until we've queued 747 * the pending signal. 748 * 749 * Wake up the stopped thread _after_ setting 750 * TIF_SIGPENDING 751 */ 752 state = __TASK_STOPPED; 753 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { 754 set_tsk_thread_flag(t, TIF_SIGPENDING); 755 state |= TASK_INTERRUPTIBLE; 756 } 757 wake_up_state(t, state); 758 } while_each_thread(p, t); 759 760 /* 761 * Notify the parent with CLD_CONTINUED if we were stopped. 762 * 763 * If we were in the middle of a group stop, we pretend it 764 * was already finished, and then continued. Since SIGCHLD 765 * doesn't queue we report only CLD_STOPPED, as if the next 766 * CLD_CONTINUED was dropped. 767 */ 768 why = 0; 769 if (signal->flags & SIGNAL_STOP_STOPPED) 770 why |= SIGNAL_CLD_CONTINUED; 771 else if (signal->group_stop_count) 772 why |= SIGNAL_CLD_STOPPED; 773 774 if (why) { 775 /* 776 * The first thread which returns from do_signal_stop() 777 * will take ->siglock, notice SIGNAL_CLD_MASK, and 778 * notify its parent. See get_signal_to_deliver(). 779 */ 780 signal->flags = why | SIGNAL_STOP_CONTINUED; 781 signal->group_stop_count = 0; 782 signal->group_exit_code = 0; 783 } else { 784 /* 785 * We are not stopped, but there could be a stop 786 * signal in the middle of being processed after 787 * being removed from the queue. Clear that too. 788 */ 789 signal->flags &= ~SIGNAL_STOP_DEQUEUED; 790 } 791 } 792 793 return !sig_ignored(p, sig, from_ancestor_ns); 794 } 795 796 /* 797 * Test if P wants to take SIG. After we've checked all threads with this, 798 * it's equivalent to finding no threads not blocking SIG. Any threads not 799 * blocking SIG were ruled out because they are not running and already 800 * have pending signals. Such threads will dequeue from the shared queue 801 * as soon as they're available, so putting the signal on the shared queue 802 * will be equivalent to sending it to one such thread. 803 */ 804 static inline int wants_signal(int sig, struct task_struct *p) 805 { 806 if (sigismember(&p->blocked, sig)) 807 return 0; 808 if (p->flags & PF_EXITING) 809 return 0; 810 if (sig == SIGKILL) 811 return 1; 812 if (task_is_stopped_or_traced(p)) 813 return 0; 814 return task_curr(p) || !signal_pending(p); 815 } 816 817 static void complete_signal(int sig, struct task_struct *p, int group) 818 { 819 struct signal_struct *signal = p->signal; 820 struct task_struct *t; 821 822 /* 823 * Now find a thread we can wake up to take the signal off the queue. 824 * 825 * If the main thread wants the signal, it gets first crack. 826 * Probably the least surprising to the average bear. 827 */ 828 if (wants_signal(sig, p)) 829 t = p; 830 else if (!group || thread_group_empty(p)) 831 /* 832 * There is just one thread and it does not need to be woken. 833 * It will dequeue unblocked signals before it runs again. 834 */ 835 return; 836 else { 837 /* 838 * Otherwise try to find a suitable thread. 839 */ 840 t = signal->curr_target; 841 while (!wants_signal(sig, t)) { 842 t = next_thread(t); 843 if (t == signal->curr_target) 844 /* 845 * No thread needs to be woken. 846 * Any eligible threads will see 847 * the signal in the queue soon. 848 */ 849 return; 850 } 851 signal->curr_target = t; 852 } 853 854 /* 855 * Found a killable thread. If the signal will be fatal, 856 * then start taking the whole group down immediately. 857 */ 858 if (sig_fatal(p, sig) && 859 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && 860 !sigismember(&t->real_blocked, sig) && 861 (sig == SIGKILL || 862 !tracehook_consider_fatal_signal(t, sig))) { 863 /* 864 * This signal will be fatal to the whole group. 865 */ 866 if (!sig_kernel_coredump(sig)) { 867 /* 868 * Start a group exit and wake everybody up. 869 * This way we don't have other threads 870 * running and doing things after a slower 871 * thread has the fatal signal pending. 872 */ 873 signal->flags = SIGNAL_GROUP_EXIT; 874 signal->group_exit_code = sig; 875 signal->group_stop_count = 0; 876 t = p; 877 do { 878 sigaddset(&t->pending.signal, SIGKILL); 879 signal_wake_up(t, 1); 880 } while_each_thread(p, t); 881 return; 882 } 883 } 884 885 /* 886 * The signal is already in the shared-pending queue. 887 * Tell the chosen thread to wake up and dequeue it. 888 */ 889 signal_wake_up(t, sig == SIGKILL); 890 return; 891 } 892 893 static inline int legacy_queue(struct sigpending *signals, int sig) 894 { 895 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 896 } 897 898 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, 899 int group, int from_ancestor_ns) 900 { 901 struct sigpending *pending; 902 struct sigqueue *q; 903 int override_rlimit; 904 905 trace_signal_generate(sig, info, t); 906 907 assert_spin_locked(&t->sighand->siglock); 908 909 if (!prepare_signal(sig, t, from_ancestor_ns)) 910 return 0; 911 912 pending = group ? &t->signal->shared_pending : &t->pending; 913 /* 914 * Short-circuit ignored signals and support queuing 915 * exactly one non-rt signal, so that we can get more 916 * detailed information about the cause of the signal. 917 */ 918 if (legacy_queue(pending, sig)) 919 return 0; 920 /* 921 * fast-pathed signals for kernel-internal things like SIGSTOP 922 * or SIGKILL. 923 */ 924 if (info == SEND_SIG_FORCED) 925 goto out_set; 926 927 /* 928 * Real-time signals must be queued if sent by sigqueue, or 929 * some other real-time mechanism. It is implementation 930 * defined whether kill() does so. We attempt to do so, on 931 * the principle of least surprise, but since kill is not 932 * allowed to fail with EAGAIN when low on memory we just 933 * make sure at least one signal gets delivered and don't 934 * pass on the info struct. 935 */ 936 if (sig < SIGRTMIN) 937 override_rlimit = (is_si_special(info) || info->si_code >= 0); 938 else 939 override_rlimit = 0; 940 941 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, 942 override_rlimit); 943 if (q) { 944 list_add_tail(&q->list, &pending->list); 945 switch ((unsigned long) info) { 946 case (unsigned long) SEND_SIG_NOINFO: 947 q->info.si_signo = sig; 948 q->info.si_errno = 0; 949 q->info.si_code = SI_USER; 950 q->info.si_pid = task_tgid_nr_ns(current, 951 task_active_pid_ns(t)); 952 q->info.si_uid = current_uid(); 953 break; 954 case (unsigned long) SEND_SIG_PRIV: 955 q->info.si_signo = sig; 956 q->info.si_errno = 0; 957 q->info.si_code = SI_KERNEL; 958 q->info.si_pid = 0; 959 q->info.si_uid = 0; 960 break; 961 default: 962 copy_siginfo(&q->info, info); 963 if (from_ancestor_ns) 964 q->info.si_pid = 0; 965 break; 966 } 967 } else if (!is_si_special(info)) { 968 if (sig >= SIGRTMIN && info->si_code != SI_USER) { 969 /* 970 * Queue overflow, abort. We may abort if the 971 * signal was rt and sent by user using something 972 * other than kill(). 973 */ 974 trace_signal_overflow_fail(sig, group, info); 975 return -EAGAIN; 976 } else { 977 /* 978 * This is a silent loss of information. We still 979 * send the signal, but the *info bits are lost. 980 */ 981 trace_signal_lose_info(sig, group, info); 982 } 983 } 984 985 out_set: 986 signalfd_notify(t, sig); 987 sigaddset(&pending->signal, sig); 988 complete_signal(sig, t, group); 989 return 0; 990 } 991 992 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, 993 int group) 994 { 995 int from_ancestor_ns = 0; 996 997 #ifdef CONFIG_PID_NS 998 from_ancestor_ns = si_fromuser(info) && 999 !task_pid_nr_ns(current, task_active_pid_ns(t)); 1000 #endif 1001 1002 return __send_signal(sig, info, t, group, from_ancestor_ns); 1003 } 1004 1005 static void print_fatal_signal(struct pt_regs *regs, int signr) 1006 { 1007 printk("%s/%d: potentially unexpected fatal signal %d.\n", 1008 current->comm, task_pid_nr(current), signr); 1009 1010 #if defined(__i386__) && !defined(__arch_um__) 1011 printk("code at %08lx: ", regs->ip); 1012 { 1013 int i; 1014 for (i = 0; i < 16; i++) { 1015 unsigned char insn; 1016 1017 if (get_user(insn, (unsigned char *)(regs->ip + i))) 1018 break; 1019 printk("%02x ", insn); 1020 } 1021 } 1022 #endif 1023 printk("\n"); 1024 preempt_disable(); 1025 show_regs(regs); 1026 preempt_enable(); 1027 } 1028 1029 static int __init setup_print_fatal_signals(char *str) 1030 { 1031 get_option (&str, &print_fatal_signals); 1032 1033 return 1; 1034 } 1035 1036 __setup("print-fatal-signals=", setup_print_fatal_signals); 1037 1038 int 1039 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1040 { 1041 return send_signal(sig, info, p, 1); 1042 } 1043 1044 static int 1045 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) 1046 { 1047 return send_signal(sig, info, t, 0); 1048 } 1049 1050 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, 1051 bool group) 1052 { 1053 unsigned long flags; 1054 int ret = -ESRCH; 1055 1056 if (lock_task_sighand(p, &flags)) { 1057 ret = send_signal(sig, info, p, group); 1058 unlock_task_sighand(p, &flags); 1059 } 1060 1061 return ret; 1062 } 1063 1064 /* 1065 * Force a signal that the process can't ignore: if necessary 1066 * we unblock the signal and change any SIG_IGN to SIG_DFL. 1067 * 1068 * Note: If we unblock the signal, we always reset it to SIG_DFL, 1069 * since we do not want to have a signal handler that was blocked 1070 * be invoked when user space had explicitly blocked it. 1071 * 1072 * We don't want to have recursive SIGSEGV's etc, for example, 1073 * that is why we also clear SIGNAL_UNKILLABLE. 1074 */ 1075 int 1076 force_sig_info(int sig, struct siginfo *info, struct task_struct *t) 1077 { 1078 unsigned long int flags; 1079 int ret, blocked, ignored; 1080 struct k_sigaction *action; 1081 1082 spin_lock_irqsave(&t->sighand->siglock, flags); 1083 action = &t->sighand->action[sig-1]; 1084 ignored = action->sa.sa_handler == SIG_IGN; 1085 blocked = sigismember(&t->blocked, sig); 1086 if (blocked || ignored) { 1087 action->sa.sa_handler = SIG_DFL; 1088 if (blocked) { 1089 sigdelset(&t->blocked, sig); 1090 recalc_sigpending_and_wake(t); 1091 } 1092 } 1093 if (action->sa.sa_handler == SIG_DFL) 1094 t->signal->flags &= ~SIGNAL_UNKILLABLE; 1095 ret = specific_send_sig_info(sig, info, t); 1096 spin_unlock_irqrestore(&t->sighand->siglock, flags); 1097 1098 return ret; 1099 } 1100 1101 /* 1102 * Nuke all other threads in the group. 1103 */ 1104 int zap_other_threads(struct task_struct *p) 1105 { 1106 struct task_struct *t = p; 1107 int count = 0; 1108 1109 p->signal->group_stop_count = 0; 1110 1111 while_each_thread(p, t) { 1112 count++; 1113 1114 /* Don't bother with already dead threads */ 1115 if (t->exit_state) 1116 continue; 1117 sigaddset(&t->pending.signal, SIGKILL); 1118 signal_wake_up(t, 1); 1119 } 1120 1121 return count; 1122 } 1123 1124 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 1125 unsigned long *flags) 1126 { 1127 struct sighand_struct *sighand; 1128 1129 rcu_read_lock(); 1130 for (;;) { 1131 sighand = rcu_dereference(tsk->sighand); 1132 if (unlikely(sighand == NULL)) 1133 break; 1134 1135 spin_lock_irqsave(&sighand->siglock, *flags); 1136 if (likely(sighand == tsk->sighand)) 1137 break; 1138 spin_unlock_irqrestore(&sighand->siglock, *flags); 1139 } 1140 rcu_read_unlock(); 1141 1142 return sighand; 1143 } 1144 1145 /* 1146 * send signal info to all the members of a group 1147 */ 1148 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1149 { 1150 int ret; 1151 1152 rcu_read_lock(); 1153 ret = check_kill_permission(sig, info, p); 1154 rcu_read_unlock(); 1155 1156 if (!ret && sig) 1157 ret = do_send_sig_info(sig, info, p, true); 1158 1159 return ret; 1160 } 1161 1162 /* 1163 * __kill_pgrp_info() sends a signal to a process group: this is what the tty 1164 * control characters do (^C, ^Z etc) 1165 * - the caller must hold at least a readlock on tasklist_lock 1166 */ 1167 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) 1168 { 1169 struct task_struct *p = NULL; 1170 int retval, success; 1171 1172 success = 0; 1173 retval = -ESRCH; 1174 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 1175 int err = group_send_sig_info(sig, info, p); 1176 success |= !err; 1177 retval = err; 1178 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 1179 return success ? 0 : retval; 1180 } 1181 1182 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) 1183 { 1184 int error = -ESRCH; 1185 struct task_struct *p; 1186 1187 rcu_read_lock(); 1188 retry: 1189 p = pid_task(pid, PIDTYPE_PID); 1190 if (p) { 1191 error = group_send_sig_info(sig, info, p); 1192 if (unlikely(error == -ESRCH)) 1193 /* 1194 * The task was unhashed in between, try again. 1195 * If it is dead, pid_task() will return NULL, 1196 * if we race with de_thread() it will find the 1197 * new leader. 1198 */ 1199 goto retry; 1200 } 1201 rcu_read_unlock(); 1202 1203 return error; 1204 } 1205 1206 int kill_proc_info(int sig, struct siginfo *info, pid_t pid) 1207 { 1208 int error; 1209 rcu_read_lock(); 1210 error = kill_pid_info(sig, info, find_vpid(pid)); 1211 rcu_read_unlock(); 1212 return error; 1213 } 1214 1215 /* like kill_pid_info(), but doesn't use uid/euid of "current" */ 1216 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, 1217 uid_t uid, uid_t euid, u32 secid) 1218 { 1219 int ret = -EINVAL; 1220 struct task_struct *p; 1221 const struct cred *pcred; 1222 unsigned long flags; 1223 1224 if (!valid_signal(sig)) 1225 return ret; 1226 1227 rcu_read_lock(); 1228 p = pid_task(pid, PIDTYPE_PID); 1229 if (!p) { 1230 ret = -ESRCH; 1231 goto out_unlock; 1232 } 1233 pcred = __task_cred(p); 1234 if (si_fromuser(info) && 1235 euid != pcred->suid && euid != pcred->uid && 1236 uid != pcred->suid && uid != pcred->uid) { 1237 ret = -EPERM; 1238 goto out_unlock; 1239 } 1240 ret = security_task_kill(p, info, sig, secid); 1241 if (ret) 1242 goto out_unlock; 1243 1244 if (sig) { 1245 if (lock_task_sighand(p, &flags)) { 1246 ret = __send_signal(sig, info, p, 1, 0); 1247 unlock_task_sighand(p, &flags); 1248 } else 1249 ret = -ESRCH; 1250 } 1251 out_unlock: 1252 rcu_read_unlock(); 1253 return ret; 1254 } 1255 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); 1256 1257 /* 1258 * kill_something_info() interprets pid in interesting ways just like kill(2). 1259 * 1260 * POSIX specifies that kill(-1,sig) is unspecified, but what we have 1261 * is probably wrong. Should make it like BSD or SYSV. 1262 */ 1263 1264 static int kill_something_info(int sig, struct siginfo *info, pid_t pid) 1265 { 1266 int ret; 1267 1268 if (pid > 0) { 1269 rcu_read_lock(); 1270 ret = kill_pid_info(sig, info, find_vpid(pid)); 1271 rcu_read_unlock(); 1272 return ret; 1273 } 1274 1275 read_lock(&tasklist_lock); 1276 if (pid != -1) { 1277 ret = __kill_pgrp_info(sig, info, 1278 pid ? find_vpid(-pid) : task_pgrp(current)); 1279 } else { 1280 int retval = 0, count = 0; 1281 struct task_struct * p; 1282 1283 for_each_process(p) { 1284 if (task_pid_vnr(p) > 1 && 1285 !same_thread_group(p, current)) { 1286 int err = group_send_sig_info(sig, info, p); 1287 ++count; 1288 if (err != -EPERM) 1289 retval = err; 1290 } 1291 } 1292 ret = count ? retval : -ESRCH; 1293 } 1294 read_unlock(&tasklist_lock); 1295 1296 return ret; 1297 } 1298 1299 /* 1300 * These are for backward compatibility with the rest of the kernel source. 1301 */ 1302 1303 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1304 { 1305 /* 1306 * Make sure legacy kernel users don't send in bad values 1307 * (normal paths check this in check_kill_permission). 1308 */ 1309 if (!valid_signal(sig)) 1310 return -EINVAL; 1311 1312 return do_send_sig_info(sig, info, p, false); 1313 } 1314 1315 #define __si_special(priv) \ 1316 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) 1317 1318 int 1319 send_sig(int sig, struct task_struct *p, int priv) 1320 { 1321 return send_sig_info(sig, __si_special(priv), p); 1322 } 1323 1324 void 1325 force_sig(int sig, struct task_struct *p) 1326 { 1327 force_sig_info(sig, SEND_SIG_PRIV, p); 1328 } 1329 1330 /* 1331 * When things go south during signal handling, we 1332 * will force a SIGSEGV. And if the signal that caused 1333 * the problem was already a SIGSEGV, we'll want to 1334 * make sure we don't even try to deliver the signal.. 1335 */ 1336 int 1337 force_sigsegv(int sig, struct task_struct *p) 1338 { 1339 if (sig == SIGSEGV) { 1340 unsigned long flags; 1341 spin_lock_irqsave(&p->sighand->siglock, flags); 1342 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; 1343 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1344 } 1345 force_sig(SIGSEGV, p); 1346 return 0; 1347 } 1348 1349 int kill_pgrp(struct pid *pid, int sig, int priv) 1350 { 1351 int ret; 1352 1353 read_lock(&tasklist_lock); 1354 ret = __kill_pgrp_info(sig, __si_special(priv), pid); 1355 read_unlock(&tasklist_lock); 1356 1357 return ret; 1358 } 1359 EXPORT_SYMBOL(kill_pgrp); 1360 1361 int kill_pid(struct pid *pid, int sig, int priv) 1362 { 1363 return kill_pid_info(sig, __si_special(priv), pid); 1364 } 1365 EXPORT_SYMBOL(kill_pid); 1366 1367 /* 1368 * These functions support sending signals using preallocated sigqueue 1369 * structures. This is needed "because realtime applications cannot 1370 * afford to lose notifications of asynchronous events, like timer 1371 * expirations or I/O completions". In the case of POSIX Timers 1372 * we allocate the sigqueue structure from the timer_create. If this 1373 * allocation fails we are able to report the failure to the application 1374 * with an EAGAIN error. 1375 */ 1376 struct sigqueue *sigqueue_alloc(void) 1377 { 1378 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); 1379 1380 if (q) 1381 q->flags |= SIGQUEUE_PREALLOC; 1382 1383 return q; 1384 } 1385 1386 void sigqueue_free(struct sigqueue *q) 1387 { 1388 unsigned long flags; 1389 spinlock_t *lock = ¤t->sighand->siglock; 1390 1391 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1392 /* 1393 * We must hold ->siglock while testing q->list 1394 * to serialize with collect_signal() or with 1395 * __exit_signal()->flush_sigqueue(). 1396 */ 1397 spin_lock_irqsave(lock, flags); 1398 q->flags &= ~SIGQUEUE_PREALLOC; 1399 /* 1400 * If it is queued it will be freed when dequeued, 1401 * like the "regular" sigqueue. 1402 */ 1403 if (!list_empty(&q->list)) 1404 q = NULL; 1405 spin_unlock_irqrestore(lock, flags); 1406 1407 if (q) 1408 __sigqueue_free(q); 1409 } 1410 1411 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) 1412 { 1413 int sig = q->info.si_signo; 1414 struct sigpending *pending; 1415 unsigned long flags; 1416 int ret; 1417 1418 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1419 1420 ret = -1; 1421 if (!likely(lock_task_sighand(t, &flags))) 1422 goto ret; 1423 1424 ret = 1; /* the signal is ignored */ 1425 if (!prepare_signal(sig, t, 0)) 1426 goto out; 1427 1428 ret = 0; 1429 if (unlikely(!list_empty(&q->list))) { 1430 /* 1431 * If an SI_TIMER entry is already queue just increment 1432 * the overrun count. 1433 */ 1434 BUG_ON(q->info.si_code != SI_TIMER); 1435 q->info.si_overrun++; 1436 goto out; 1437 } 1438 q->info.si_overrun = 0; 1439 1440 signalfd_notify(t, sig); 1441 pending = group ? &t->signal->shared_pending : &t->pending; 1442 list_add_tail(&q->list, &pending->list); 1443 sigaddset(&pending->signal, sig); 1444 complete_signal(sig, t, group); 1445 out: 1446 unlock_task_sighand(t, &flags); 1447 ret: 1448 return ret; 1449 } 1450 1451 /* 1452 * Let a parent know about the death of a child. 1453 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1454 * 1455 * Returns -1 if our parent ignored us and so we've switched to 1456 * self-reaping, or else @sig. 1457 */ 1458 int do_notify_parent(struct task_struct *tsk, int sig) 1459 { 1460 struct siginfo info; 1461 unsigned long flags; 1462 struct sighand_struct *psig; 1463 int ret = sig; 1464 1465 BUG_ON(sig == -1); 1466 1467 /* do_notify_parent_cldstop should have been called instead. */ 1468 BUG_ON(task_is_stopped_or_traced(tsk)); 1469 1470 BUG_ON(!task_ptrace(tsk) && 1471 (tsk->group_leader != tsk || !thread_group_empty(tsk))); 1472 1473 info.si_signo = sig; 1474 info.si_errno = 0; 1475 /* 1476 * we are under tasklist_lock here so our parent is tied to 1477 * us and cannot exit and release its namespace. 1478 * 1479 * the only it can is to switch its nsproxy with sys_unshare, 1480 * bu uncharing pid namespaces is not allowed, so we'll always 1481 * see relevant namespace 1482 * 1483 * write_lock() currently calls preempt_disable() which is the 1484 * same as rcu_read_lock(), but according to Oleg, this is not 1485 * correct to rely on this 1486 */ 1487 rcu_read_lock(); 1488 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); 1489 info.si_uid = __task_cred(tsk)->uid; 1490 rcu_read_unlock(); 1491 1492 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, 1493 tsk->signal->utime)); 1494 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, 1495 tsk->signal->stime)); 1496 1497 info.si_status = tsk->exit_code & 0x7f; 1498 if (tsk->exit_code & 0x80) 1499 info.si_code = CLD_DUMPED; 1500 else if (tsk->exit_code & 0x7f) 1501 info.si_code = CLD_KILLED; 1502 else { 1503 info.si_code = CLD_EXITED; 1504 info.si_status = tsk->exit_code >> 8; 1505 } 1506 1507 psig = tsk->parent->sighand; 1508 spin_lock_irqsave(&psig->siglock, flags); 1509 if (!task_ptrace(tsk) && sig == SIGCHLD && 1510 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || 1511 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { 1512 /* 1513 * We are exiting and our parent doesn't care. POSIX.1 1514 * defines special semantics for setting SIGCHLD to SIG_IGN 1515 * or setting the SA_NOCLDWAIT flag: we should be reaped 1516 * automatically and not left for our parent's wait4 call. 1517 * Rather than having the parent do it as a magic kind of 1518 * signal handler, we just set this to tell do_exit that we 1519 * can be cleaned up without becoming a zombie. Note that 1520 * we still call __wake_up_parent in this case, because a 1521 * blocked sys_wait4 might now return -ECHILD. 1522 * 1523 * Whether we send SIGCHLD or not for SA_NOCLDWAIT 1524 * is implementation-defined: we do (if you don't want 1525 * it, just use SIG_IGN instead). 1526 */ 1527 ret = tsk->exit_signal = -1; 1528 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1529 sig = -1; 1530 } 1531 if (valid_signal(sig) && sig > 0) 1532 __group_send_sig_info(sig, &info, tsk->parent); 1533 __wake_up_parent(tsk, tsk->parent); 1534 spin_unlock_irqrestore(&psig->siglock, flags); 1535 1536 return ret; 1537 } 1538 1539 static void do_notify_parent_cldstop(struct task_struct *tsk, int why) 1540 { 1541 struct siginfo info; 1542 unsigned long flags; 1543 struct task_struct *parent; 1544 struct sighand_struct *sighand; 1545 1546 if (task_ptrace(tsk)) 1547 parent = tsk->parent; 1548 else { 1549 tsk = tsk->group_leader; 1550 parent = tsk->real_parent; 1551 } 1552 1553 info.si_signo = SIGCHLD; 1554 info.si_errno = 0; 1555 /* 1556 * see comment in do_notify_parent() about the following 4 lines 1557 */ 1558 rcu_read_lock(); 1559 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); 1560 info.si_uid = __task_cred(tsk)->uid; 1561 rcu_read_unlock(); 1562 1563 info.si_utime = cputime_to_clock_t(tsk->utime); 1564 info.si_stime = cputime_to_clock_t(tsk->stime); 1565 1566 info.si_code = why; 1567 switch (why) { 1568 case CLD_CONTINUED: 1569 info.si_status = SIGCONT; 1570 break; 1571 case CLD_STOPPED: 1572 info.si_status = tsk->signal->group_exit_code & 0x7f; 1573 break; 1574 case CLD_TRAPPED: 1575 info.si_status = tsk->exit_code & 0x7f; 1576 break; 1577 default: 1578 BUG(); 1579 } 1580 1581 sighand = parent->sighand; 1582 spin_lock_irqsave(&sighand->siglock, flags); 1583 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && 1584 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) 1585 __group_send_sig_info(SIGCHLD, &info, parent); 1586 /* 1587 * Even if SIGCHLD is not generated, we must wake up wait4 calls. 1588 */ 1589 __wake_up_parent(tsk, parent); 1590 spin_unlock_irqrestore(&sighand->siglock, flags); 1591 } 1592 1593 static inline int may_ptrace_stop(void) 1594 { 1595 if (!likely(task_ptrace(current))) 1596 return 0; 1597 /* 1598 * Are we in the middle of do_coredump? 1599 * If so and our tracer is also part of the coredump stopping 1600 * is a deadlock situation, and pointless because our tracer 1601 * is dead so don't allow us to stop. 1602 * If SIGKILL was already sent before the caller unlocked 1603 * ->siglock we must see ->core_state != NULL. Otherwise it 1604 * is safe to enter schedule(). 1605 */ 1606 if (unlikely(current->mm->core_state) && 1607 unlikely(current->mm == current->parent->mm)) 1608 return 0; 1609 1610 return 1; 1611 } 1612 1613 /* 1614 * Return non-zero if there is a SIGKILL that should be waking us up. 1615 * Called with the siglock held. 1616 */ 1617 static int sigkill_pending(struct task_struct *tsk) 1618 { 1619 return sigismember(&tsk->pending.signal, SIGKILL) || 1620 sigismember(&tsk->signal->shared_pending.signal, SIGKILL); 1621 } 1622 1623 /* 1624 * This must be called with current->sighand->siglock held. 1625 * 1626 * This should be the path for all ptrace stops. 1627 * We always set current->last_siginfo while stopped here. 1628 * That makes it a way to test a stopped process for 1629 * being ptrace-stopped vs being job-control-stopped. 1630 * 1631 * If we actually decide not to stop at all because the tracer 1632 * is gone, we keep current->exit_code unless clear_code. 1633 */ 1634 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) 1635 __releases(¤t->sighand->siglock) 1636 __acquires(¤t->sighand->siglock) 1637 { 1638 if (arch_ptrace_stop_needed(exit_code, info)) { 1639 /* 1640 * The arch code has something special to do before a 1641 * ptrace stop. This is allowed to block, e.g. for faults 1642 * on user stack pages. We can't keep the siglock while 1643 * calling arch_ptrace_stop, so we must release it now. 1644 * To preserve proper semantics, we must do this before 1645 * any signal bookkeeping like checking group_stop_count. 1646 * Meanwhile, a SIGKILL could come in before we retake the 1647 * siglock. That must prevent us from sleeping in TASK_TRACED. 1648 * So after regaining the lock, we must check for SIGKILL. 1649 */ 1650 spin_unlock_irq(¤t->sighand->siglock); 1651 arch_ptrace_stop(exit_code, info); 1652 spin_lock_irq(¤t->sighand->siglock); 1653 if (sigkill_pending(current)) 1654 return; 1655 } 1656 1657 /* 1658 * If there is a group stop in progress, 1659 * we must participate in the bookkeeping. 1660 */ 1661 if (current->signal->group_stop_count > 0) 1662 --current->signal->group_stop_count; 1663 1664 current->last_siginfo = info; 1665 current->exit_code = exit_code; 1666 1667 /* Let the debugger run. */ 1668 __set_current_state(TASK_TRACED); 1669 spin_unlock_irq(¤t->sighand->siglock); 1670 read_lock(&tasklist_lock); 1671 if (may_ptrace_stop()) { 1672 do_notify_parent_cldstop(current, CLD_TRAPPED); 1673 /* 1674 * Don't want to allow preemption here, because 1675 * sys_ptrace() needs this task to be inactive. 1676 * 1677 * XXX: implement read_unlock_no_resched(). 1678 */ 1679 preempt_disable(); 1680 read_unlock(&tasklist_lock); 1681 preempt_enable_no_resched(); 1682 schedule(); 1683 } else { 1684 /* 1685 * By the time we got the lock, our tracer went away. 1686 * Don't drop the lock yet, another tracer may come. 1687 */ 1688 __set_current_state(TASK_RUNNING); 1689 if (clear_code) 1690 current->exit_code = 0; 1691 read_unlock(&tasklist_lock); 1692 } 1693 1694 /* 1695 * While in TASK_TRACED, we were considered "frozen enough". 1696 * Now that we woke up, it's crucial if we're supposed to be 1697 * frozen that we freeze now before running anything substantial. 1698 */ 1699 try_to_freeze(); 1700 1701 /* 1702 * We are back. Now reacquire the siglock before touching 1703 * last_siginfo, so that we are sure to have synchronized with 1704 * any signal-sending on another CPU that wants to examine it. 1705 */ 1706 spin_lock_irq(¤t->sighand->siglock); 1707 current->last_siginfo = NULL; 1708 1709 /* 1710 * Queued signals ignored us while we were stopped for tracing. 1711 * So check for any that we should take before resuming user mode. 1712 * This sets TIF_SIGPENDING, but never clears it. 1713 */ 1714 recalc_sigpending_tsk(current); 1715 } 1716 1717 void ptrace_notify(int exit_code) 1718 { 1719 siginfo_t info; 1720 1721 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); 1722 1723 memset(&info, 0, sizeof info); 1724 info.si_signo = SIGTRAP; 1725 info.si_code = exit_code; 1726 info.si_pid = task_pid_vnr(current); 1727 info.si_uid = current_uid(); 1728 1729 /* Let the debugger run. */ 1730 spin_lock_irq(¤t->sighand->siglock); 1731 ptrace_stop(exit_code, 1, &info); 1732 spin_unlock_irq(¤t->sighand->siglock); 1733 } 1734 1735 /* 1736 * This performs the stopping for SIGSTOP and other stop signals. 1737 * We have to stop all threads in the thread group. 1738 * Returns non-zero if we've actually stopped and released the siglock. 1739 * Returns zero if we didn't stop and still hold the siglock. 1740 */ 1741 static int do_signal_stop(int signr) 1742 { 1743 struct signal_struct *sig = current->signal; 1744 int notify; 1745 1746 if (!sig->group_stop_count) { 1747 struct task_struct *t; 1748 1749 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || 1750 unlikely(signal_group_exit(sig))) 1751 return 0; 1752 /* 1753 * There is no group stop already in progress. 1754 * We must initiate one now. 1755 */ 1756 sig->group_exit_code = signr; 1757 1758 sig->group_stop_count = 1; 1759 for (t = next_thread(current); t != current; t = next_thread(t)) 1760 /* 1761 * Setting state to TASK_STOPPED for a group 1762 * stop is always done with the siglock held, 1763 * so this check has no races. 1764 */ 1765 if (!(t->flags & PF_EXITING) && 1766 !task_is_stopped_or_traced(t)) { 1767 sig->group_stop_count++; 1768 signal_wake_up(t, 0); 1769 } 1770 } 1771 /* 1772 * If there are no other threads in the group, or if there is 1773 * a group stop in progress and we are the last to stop, report 1774 * to the parent. When ptraced, every thread reports itself. 1775 */ 1776 notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0; 1777 notify = tracehook_notify_jctl(notify, CLD_STOPPED); 1778 /* 1779 * tracehook_notify_jctl() can drop and reacquire siglock, so 1780 * we keep ->group_stop_count != 0 before the call. If SIGCONT 1781 * or SIGKILL comes in between ->group_stop_count == 0. 1782 */ 1783 if (sig->group_stop_count) { 1784 if (!--sig->group_stop_count) 1785 sig->flags = SIGNAL_STOP_STOPPED; 1786 current->exit_code = sig->group_exit_code; 1787 __set_current_state(TASK_STOPPED); 1788 } 1789 spin_unlock_irq(¤t->sighand->siglock); 1790 1791 if (notify) { 1792 read_lock(&tasklist_lock); 1793 do_notify_parent_cldstop(current, notify); 1794 read_unlock(&tasklist_lock); 1795 } 1796 1797 /* Now we don't run again until woken by SIGCONT or SIGKILL */ 1798 do { 1799 schedule(); 1800 } while (try_to_freeze()); 1801 1802 tracehook_finish_jctl(); 1803 current->exit_code = 0; 1804 1805 return 1; 1806 } 1807 1808 static int ptrace_signal(int signr, siginfo_t *info, 1809 struct pt_regs *regs, void *cookie) 1810 { 1811 if (!task_ptrace(current)) 1812 return signr; 1813 1814 ptrace_signal_deliver(regs, cookie); 1815 1816 /* Let the debugger run. */ 1817 ptrace_stop(signr, 0, info); 1818 1819 /* We're back. Did the debugger cancel the sig? */ 1820 signr = current->exit_code; 1821 if (signr == 0) 1822 return signr; 1823 1824 current->exit_code = 0; 1825 1826 /* 1827 * Update the siginfo structure if the signal has 1828 * changed. If the debugger wanted something 1829 * specific in the siginfo structure then it should 1830 * have updated *info via PTRACE_SETSIGINFO. 1831 */ 1832 if (signr != info->si_signo) { 1833 info->si_signo = signr; 1834 info->si_errno = 0; 1835 info->si_code = SI_USER; 1836 info->si_pid = task_pid_vnr(current->parent); 1837 info->si_uid = task_uid(current->parent); 1838 } 1839 1840 /* If the (new) signal is now blocked, requeue it. */ 1841 if (sigismember(¤t->blocked, signr)) { 1842 specific_send_sig_info(signr, info, current); 1843 signr = 0; 1844 } 1845 1846 return signr; 1847 } 1848 1849 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, 1850 struct pt_regs *regs, void *cookie) 1851 { 1852 struct sighand_struct *sighand = current->sighand; 1853 struct signal_struct *signal = current->signal; 1854 int signr; 1855 1856 relock: 1857 /* 1858 * We'll jump back here after any time we were stopped in TASK_STOPPED. 1859 * While in TASK_STOPPED, we were considered "frozen enough". 1860 * Now that we woke up, it's crucial if we're supposed to be 1861 * frozen that we freeze now before running anything substantial. 1862 */ 1863 try_to_freeze(); 1864 1865 spin_lock_irq(&sighand->siglock); 1866 /* 1867 * Every stopped thread goes here after wakeup. Check to see if 1868 * we should notify the parent, prepare_signal(SIGCONT) encodes 1869 * the CLD_ si_code into SIGNAL_CLD_MASK bits. 1870 */ 1871 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { 1872 int why = (signal->flags & SIGNAL_STOP_CONTINUED) 1873 ? CLD_CONTINUED : CLD_STOPPED; 1874 signal->flags &= ~SIGNAL_CLD_MASK; 1875 1876 why = tracehook_notify_jctl(why, CLD_CONTINUED); 1877 spin_unlock_irq(&sighand->siglock); 1878 1879 if (why) { 1880 read_lock(&tasklist_lock); 1881 do_notify_parent_cldstop(current->group_leader, why); 1882 read_unlock(&tasklist_lock); 1883 } 1884 goto relock; 1885 } 1886 1887 for (;;) { 1888 struct k_sigaction *ka; 1889 /* 1890 * Tracing can induce an artificial signal and choose sigaction. 1891 * The return value in @signr determines the default action, 1892 * but @info->si_signo is the signal number we will report. 1893 */ 1894 signr = tracehook_get_signal(current, regs, info, return_ka); 1895 if (unlikely(signr < 0)) 1896 goto relock; 1897 if (unlikely(signr != 0)) 1898 ka = return_ka; 1899 else { 1900 if (unlikely(signal->group_stop_count > 0) && 1901 do_signal_stop(0)) 1902 goto relock; 1903 1904 signr = dequeue_signal(current, ¤t->blocked, 1905 info); 1906 1907 if (!signr) 1908 break; /* will return 0 */ 1909 1910 if (signr != SIGKILL) { 1911 signr = ptrace_signal(signr, info, 1912 regs, cookie); 1913 if (!signr) 1914 continue; 1915 } 1916 1917 ka = &sighand->action[signr-1]; 1918 } 1919 1920 /* Trace actually delivered signals. */ 1921 trace_signal_deliver(signr, info, ka); 1922 1923 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1924 continue; 1925 if (ka->sa.sa_handler != SIG_DFL) { 1926 /* Run the handler. */ 1927 *return_ka = *ka; 1928 1929 if (ka->sa.sa_flags & SA_ONESHOT) 1930 ka->sa.sa_handler = SIG_DFL; 1931 1932 break; /* will return non-zero "signr" value */ 1933 } 1934 1935 /* 1936 * Now we are doing the default action for this signal. 1937 */ 1938 if (sig_kernel_ignore(signr)) /* Default is nothing. */ 1939 continue; 1940 1941 /* 1942 * Global init gets no signals it doesn't want. 1943 * Container-init gets no signals it doesn't want from same 1944 * container. 1945 * 1946 * Note that if global/container-init sees a sig_kernel_only() 1947 * signal here, the signal must have been generated internally 1948 * or must have come from an ancestor namespace. In either 1949 * case, the signal cannot be dropped. 1950 */ 1951 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && 1952 !sig_kernel_only(signr)) 1953 continue; 1954 1955 if (sig_kernel_stop(signr)) { 1956 /* 1957 * The default action is to stop all threads in 1958 * the thread group. The job control signals 1959 * do nothing in an orphaned pgrp, but SIGSTOP 1960 * always works. Note that siglock needs to be 1961 * dropped during the call to is_orphaned_pgrp() 1962 * because of lock ordering with tasklist_lock. 1963 * This allows an intervening SIGCONT to be posted. 1964 * We need to check for that and bail out if necessary. 1965 */ 1966 if (signr != SIGSTOP) { 1967 spin_unlock_irq(&sighand->siglock); 1968 1969 /* signals can be posted during this window */ 1970 1971 if (is_current_pgrp_orphaned()) 1972 goto relock; 1973 1974 spin_lock_irq(&sighand->siglock); 1975 } 1976 1977 if (likely(do_signal_stop(info->si_signo))) { 1978 /* It released the siglock. */ 1979 goto relock; 1980 } 1981 1982 /* 1983 * We didn't actually stop, due to a race 1984 * with SIGCONT or something like that. 1985 */ 1986 continue; 1987 } 1988 1989 spin_unlock_irq(&sighand->siglock); 1990 1991 /* 1992 * Anything else is fatal, maybe with a core dump. 1993 */ 1994 current->flags |= PF_SIGNALED; 1995 1996 if (sig_kernel_coredump(signr)) { 1997 if (print_fatal_signals) 1998 print_fatal_signal(regs, info->si_signo); 1999 /* 2000 * If it was able to dump core, this kills all 2001 * other threads in the group and synchronizes with 2002 * their demise. If we lost the race with another 2003 * thread getting here, it set group_exit_code 2004 * first and our do_group_exit call below will use 2005 * that value and ignore the one we pass it. 2006 */ 2007 do_coredump(info->si_signo, info->si_signo, regs); 2008 } 2009 2010 /* 2011 * Death signals, no core dump. 2012 */ 2013 do_group_exit(info->si_signo); 2014 /* NOTREACHED */ 2015 } 2016 spin_unlock_irq(&sighand->siglock); 2017 return signr; 2018 } 2019 2020 void exit_signals(struct task_struct *tsk) 2021 { 2022 int group_stop = 0; 2023 struct task_struct *t; 2024 2025 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { 2026 tsk->flags |= PF_EXITING; 2027 return; 2028 } 2029 2030 spin_lock_irq(&tsk->sighand->siglock); 2031 /* 2032 * From now this task is not visible for group-wide signals, 2033 * see wants_signal(), do_signal_stop(). 2034 */ 2035 tsk->flags |= PF_EXITING; 2036 if (!signal_pending(tsk)) 2037 goto out; 2038 2039 /* 2040 * It could be that __group_complete_signal() choose us to 2041 * notify about group-wide signal. Another thread should be 2042 * woken now to take the signal since we will not. 2043 */ 2044 for (t = tsk; (t = next_thread(t)) != tsk; ) 2045 if (!signal_pending(t) && !(t->flags & PF_EXITING)) 2046 recalc_sigpending_and_wake(t); 2047 2048 if (unlikely(tsk->signal->group_stop_count) && 2049 !--tsk->signal->group_stop_count) { 2050 tsk->signal->flags = SIGNAL_STOP_STOPPED; 2051 group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED); 2052 } 2053 out: 2054 spin_unlock_irq(&tsk->sighand->siglock); 2055 2056 if (unlikely(group_stop)) { 2057 read_lock(&tasklist_lock); 2058 do_notify_parent_cldstop(tsk, group_stop); 2059 read_unlock(&tasklist_lock); 2060 } 2061 } 2062 2063 EXPORT_SYMBOL(recalc_sigpending); 2064 EXPORT_SYMBOL_GPL(dequeue_signal); 2065 EXPORT_SYMBOL(flush_signals); 2066 EXPORT_SYMBOL(force_sig); 2067 EXPORT_SYMBOL(send_sig); 2068 EXPORT_SYMBOL(send_sig_info); 2069 EXPORT_SYMBOL(sigprocmask); 2070 EXPORT_SYMBOL(block_all_signals); 2071 EXPORT_SYMBOL(unblock_all_signals); 2072 2073 2074 /* 2075 * System call entry points. 2076 */ 2077 2078 /** 2079 * sys_restart_syscall - restart a system call 2080 */ 2081 SYSCALL_DEFINE0(restart_syscall) 2082 { 2083 struct restart_block *restart = ¤t_thread_info()->restart_block; 2084 return restart->fn(restart); 2085 } 2086 2087 long do_no_restart_syscall(struct restart_block *param) 2088 { 2089 return -EINTR; 2090 } 2091 2092 /* 2093 * We don't need to get the kernel lock - this is all local to this 2094 * particular thread.. (and that's good, because this is _heavily_ 2095 * used by various programs) 2096 */ 2097 2098 /* 2099 * This is also useful for kernel threads that want to temporarily 2100 * (or permanently) block certain signals. 2101 * 2102 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel 2103 * interface happily blocks "unblockable" signals like SIGKILL 2104 * and friends. 2105 */ 2106 int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 2107 { 2108 int error; 2109 2110 spin_lock_irq(¤t->sighand->siglock); 2111 if (oldset) 2112 *oldset = current->blocked; 2113 2114 error = 0; 2115 switch (how) { 2116 case SIG_BLOCK: 2117 sigorsets(¤t->blocked, ¤t->blocked, set); 2118 break; 2119 case SIG_UNBLOCK: 2120 signandsets(¤t->blocked, ¤t->blocked, set); 2121 break; 2122 case SIG_SETMASK: 2123 current->blocked = *set; 2124 break; 2125 default: 2126 error = -EINVAL; 2127 } 2128 recalc_sigpending(); 2129 spin_unlock_irq(¤t->sighand->siglock); 2130 2131 return error; 2132 } 2133 2134 /** 2135 * sys_rt_sigprocmask - change the list of currently blocked signals 2136 * @how: whether to add, remove, or set signals 2137 * @set: stores pending signals 2138 * @oset: previous value of signal mask if non-null 2139 * @sigsetsize: size of sigset_t type 2140 */ 2141 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, 2142 sigset_t __user *, oset, size_t, sigsetsize) 2143 { 2144 int error = -EINVAL; 2145 sigset_t old_set, new_set; 2146 2147 /* XXX: Don't preclude handling different sized sigset_t's. */ 2148 if (sigsetsize != sizeof(sigset_t)) 2149 goto out; 2150 2151 if (set) { 2152 error = -EFAULT; 2153 if (copy_from_user(&new_set, set, sizeof(*set))) 2154 goto out; 2155 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2156 2157 error = sigprocmask(how, &new_set, &old_set); 2158 if (error) 2159 goto out; 2160 if (oset) 2161 goto set_old; 2162 } else if (oset) { 2163 spin_lock_irq(¤t->sighand->siglock); 2164 old_set = current->blocked; 2165 spin_unlock_irq(¤t->sighand->siglock); 2166 2167 set_old: 2168 error = -EFAULT; 2169 if (copy_to_user(oset, &old_set, sizeof(*oset))) 2170 goto out; 2171 } 2172 error = 0; 2173 out: 2174 return error; 2175 } 2176 2177 long do_sigpending(void __user *set, unsigned long sigsetsize) 2178 { 2179 long error = -EINVAL; 2180 sigset_t pending; 2181 2182 if (sigsetsize > sizeof(sigset_t)) 2183 goto out; 2184 2185 spin_lock_irq(¤t->sighand->siglock); 2186 sigorsets(&pending, ¤t->pending.signal, 2187 ¤t->signal->shared_pending.signal); 2188 spin_unlock_irq(¤t->sighand->siglock); 2189 2190 /* Outside the lock because only this thread touches it. */ 2191 sigandsets(&pending, ¤t->blocked, &pending); 2192 2193 error = -EFAULT; 2194 if (!copy_to_user(set, &pending, sigsetsize)) 2195 error = 0; 2196 2197 out: 2198 return error; 2199 } 2200 2201 /** 2202 * sys_rt_sigpending - examine a pending signal that has been raised 2203 * while blocked 2204 * @set: stores pending signals 2205 * @sigsetsize: size of sigset_t type or larger 2206 */ 2207 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) 2208 { 2209 return do_sigpending(set, sigsetsize); 2210 } 2211 2212 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER 2213 2214 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) 2215 { 2216 int err; 2217 2218 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) 2219 return -EFAULT; 2220 if (from->si_code < 0) 2221 return __copy_to_user(to, from, sizeof(siginfo_t)) 2222 ? -EFAULT : 0; 2223 /* 2224 * If you change siginfo_t structure, please be sure 2225 * this code is fixed accordingly. 2226 * Please remember to update the signalfd_copyinfo() function 2227 * inside fs/signalfd.c too, in case siginfo_t changes. 2228 * It should never copy any pad contained in the structure 2229 * to avoid security leaks, but must copy the generic 2230 * 3 ints plus the relevant union member. 2231 */ 2232 err = __put_user(from->si_signo, &to->si_signo); 2233 err |= __put_user(from->si_errno, &to->si_errno); 2234 err |= __put_user((short)from->si_code, &to->si_code); 2235 switch (from->si_code & __SI_MASK) { 2236 case __SI_KILL: 2237 err |= __put_user(from->si_pid, &to->si_pid); 2238 err |= __put_user(from->si_uid, &to->si_uid); 2239 break; 2240 case __SI_TIMER: 2241 err |= __put_user(from->si_tid, &to->si_tid); 2242 err |= __put_user(from->si_overrun, &to->si_overrun); 2243 err |= __put_user(from->si_ptr, &to->si_ptr); 2244 break; 2245 case __SI_POLL: 2246 err |= __put_user(from->si_band, &to->si_band); 2247 err |= __put_user(from->si_fd, &to->si_fd); 2248 break; 2249 case __SI_FAULT: 2250 err |= __put_user(from->si_addr, &to->si_addr); 2251 #ifdef __ARCH_SI_TRAPNO 2252 err |= __put_user(from->si_trapno, &to->si_trapno); 2253 #endif 2254 #ifdef BUS_MCEERR_AO 2255 /* 2256 * Other callers might not initialize the si_lsb field, 2257 * so check explicitly for the right codes here. 2258 */ 2259 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 2260 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 2261 #endif 2262 break; 2263 case __SI_CHLD: 2264 err |= __put_user(from->si_pid, &to->si_pid); 2265 err |= __put_user(from->si_uid, &to->si_uid); 2266 err |= __put_user(from->si_status, &to->si_status); 2267 err |= __put_user(from->si_utime, &to->si_utime); 2268 err |= __put_user(from->si_stime, &to->si_stime); 2269 break; 2270 case __SI_RT: /* This is not generated by the kernel as of now. */ 2271 case __SI_MESGQ: /* But this is */ 2272 err |= __put_user(from->si_pid, &to->si_pid); 2273 err |= __put_user(from->si_uid, &to->si_uid); 2274 err |= __put_user(from->si_ptr, &to->si_ptr); 2275 break; 2276 default: /* this is just in case for now ... */ 2277 err |= __put_user(from->si_pid, &to->si_pid); 2278 err |= __put_user(from->si_uid, &to->si_uid); 2279 break; 2280 } 2281 return err; 2282 } 2283 2284 #endif 2285 2286 /** 2287 * sys_rt_sigtimedwait - synchronously wait for queued signals specified 2288 * in @uthese 2289 * @uthese: queued signals to wait for 2290 * @uinfo: if non-null, the signal's siginfo is returned here 2291 * @uts: upper bound on process time suspension 2292 * @sigsetsize: size of sigset_t type 2293 */ 2294 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, 2295 siginfo_t __user *, uinfo, const struct timespec __user *, uts, 2296 size_t, sigsetsize) 2297 { 2298 int ret, sig; 2299 sigset_t these; 2300 struct timespec ts; 2301 siginfo_t info; 2302 long timeout = 0; 2303 2304 /* XXX: Don't preclude handling different sized sigset_t's. */ 2305 if (sigsetsize != sizeof(sigset_t)) 2306 return -EINVAL; 2307 2308 if (copy_from_user(&these, uthese, sizeof(these))) 2309 return -EFAULT; 2310 2311 /* 2312 * Invert the set of allowed signals to get those we 2313 * want to block. 2314 */ 2315 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2316 signotset(&these); 2317 2318 if (uts) { 2319 if (copy_from_user(&ts, uts, sizeof(ts))) 2320 return -EFAULT; 2321 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 2322 || ts.tv_sec < 0) 2323 return -EINVAL; 2324 } 2325 2326 spin_lock_irq(¤t->sighand->siglock); 2327 sig = dequeue_signal(current, &these, &info); 2328 if (!sig) { 2329 timeout = MAX_SCHEDULE_TIMEOUT; 2330 if (uts) 2331 timeout = (timespec_to_jiffies(&ts) 2332 + (ts.tv_sec || ts.tv_nsec)); 2333 2334 if (timeout) { 2335 /* 2336 * None ready -- temporarily unblock those we're 2337 * interested while we are sleeping in so that we'll 2338 * be awakened when they arrive. 2339 */ 2340 current->real_blocked = current->blocked; 2341 sigandsets(¤t->blocked, ¤t->blocked, &these); 2342 recalc_sigpending(); 2343 spin_unlock_irq(¤t->sighand->siglock); 2344 2345 timeout = schedule_timeout_interruptible(timeout); 2346 2347 spin_lock_irq(¤t->sighand->siglock); 2348 sig = dequeue_signal(current, &these, &info); 2349 current->blocked = current->real_blocked; 2350 siginitset(¤t->real_blocked, 0); 2351 recalc_sigpending(); 2352 } 2353 } 2354 spin_unlock_irq(¤t->sighand->siglock); 2355 2356 if (sig) { 2357 ret = sig; 2358 if (uinfo) { 2359 if (copy_siginfo_to_user(uinfo, &info)) 2360 ret = -EFAULT; 2361 } 2362 } else { 2363 ret = -EAGAIN; 2364 if (timeout) 2365 ret = -EINTR; 2366 } 2367 2368 return ret; 2369 } 2370 2371 /** 2372 * sys_kill - send a signal to a process 2373 * @pid: the PID of the process 2374 * @sig: signal to be sent 2375 */ 2376 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) 2377 { 2378 struct siginfo info; 2379 2380 info.si_signo = sig; 2381 info.si_errno = 0; 2382 info.si_code = SI_USER; 2383 info.si_pid = task_tgid_vnr(current); 2384 info.si_uid = current_uid(); 2385 2386 return kill_something_info(sig, &info, pid); 2387 } 2388 2389 static int 2390 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) 2391 { 2392 struct task_struct *p; 2393 int error = -ESRCH; 2394 2395 rcu_read_lock(); 2396 p = find_task_by_vpid(pid); 2397 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { 2398 error = check_kill_permission(sig, info, p); 2399 /* 2400 * The null signal is a permissions and process existence 2401 * probe. No signal is actually delivered. 2402 */ 2403 if (!error && sig) { 2404 error = do_send_sig_info(sig, info, p, false); 2405 /* 2406 * If lock_task_sighand() failed we pretend the task 2407 * dies after receiving the signal. The window is tiny, 2408 * and the signal is private anyway. 2409 */ 2410 if (unlikely(error == -ESRCH)) 2411 error = 0; 2412 } 2413 } 2414 rcu_read_unlock(); 2415 2416 return error; 2417 } 2418 2419 static int do_tkill(pid_t tgid, pid_t pid, int sig) 2420 { 2421 struct siginfo info; 2422 2423 info.si_signo = sig; 2424 info.si_errno = 0; 2425 info.si_code = SI_TKILL; 2426 info.si_pid = task_tgid_vnr(current); 2427 info.si_uid = current_uid(); 2428 2429 return do_send_specific(tgid, pid, sig, &info); 2430 } 2431 2432 /** 2433 * sys_tgkill - send signal to one specific thread 2434 * @tgid: the thread group ID of the thread 2435 * @pid: the PID of the thread 2436 * @sig: signal to be sent 2437 * 2438 * This syscall also checks the @tgid and returns -ESRCH even if the PID 2439 * exists but it's not belonging to the target process anymore. This 2440 * method solves the problem of threads exiting and PIDs getting reused. 2441 */ 2442 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) 2443 { 2444 /* This is only valid for single tasks */ 2445 if (pid <= 0 || tgid <= 0) 2446 return -EINVAL; 2447 2448 return do_tkill(tgid, pid, sig); 2449 } 2450 2451 /** 2452 * sys_tkill - send signal to one specific task 2453 * @pid: the PID of the task 2454 * @sig: signal to be sent 2455 * 2456 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2457 */ 2458 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) 2459 { 2460 /* This is only valid for single tasks */ 2461 if (pid <= 0) 2462 return -EINVAL; 2463 2464 return do_tkill(0, pid, sig); 2465 } 2466 2467 /** 2468 * sys_rt_sigqueueinfo - send signal information to a signal 2469 * @pid: the PID of the thread 2470 * @sig: signal to be sent 2471 * @uinfo: signal info to be sent 2472 */ 2473 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, 2474 siginfo_t __user *, uinfo) 2475 { 2476 siginfo_t info; 2477 2478 if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) 2479 return -EFAULT; 2480 2481 /* Not even root can pretend to send signals from the kernel. 2482 * Nor can they impersonate a kill()/tgkill(), which adds source info. 2483 */ 2484 if (info.si_code >= 0 || info.si_code == SI_TKILL) { 2485 /* We used to allow any < 0 si_code */ 2486 WARN_ON_ONCE(info.si_code < 0); 2487 return -EPERM; 2488 } 2489 info.si_signo = sig; 2490 2491 /* POSIX.1b doesn't mention process groups. */ 2492 return kill_proc_info(sig, &info, pid); 2493 } 2494 2495 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) 2496 { 2497 /* This is only valid for single tasks */ 2498 if (pid <= 0 || tgid <= 0) 2499 return -EINVAL; 2500 2501 /* Not even root can pretend to send signals from the kernel. 2502 * Nor can they impersonate a kill()/tgkill(), which adds source info. 2503 */ 2504 if (info->si_code >= 0 || info->si_code == SI_TKILL) { 2505 /* We used to allow any < 0 si_code */ 2506 WARN_ON_ONCE(info->si_code < 0); 2507 return -EPERM; 2508 } 2509 info->si_signo = sig; 2510 2511 return do_send_specific(tgid, pid, sig, info); 2512 } 2513 2514 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, 2515 siginfo_t __user *, uinfo) 2516 { 2517 siginfo_t info; 2518 2519 if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) 2520 return -EFAULT; 2521 2522 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); 2523 } 2524 2525 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) 2526 { 2527 struct task_struct *t = current; 2528 struct k_sigaction *k; 2529 sigset_t mask; 2530 2531 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) 2532 return -EINVAL; 2533 2534 k = &t->sighand->action[sig-1]; 2535 2536 spin_lock_irq(¤t->sighand->siglock); 2537 if (oact) 2538 *oact = *k; 2539 2540 if (act) { 2541 sigdelsetmask(&act->sa.sa_mask, 2542 sigmask(SIGKILL) | sigmask(SIGSTOP)); 2543 *k = *act; 2544 /* 2545 * POSIX 3.3.1.3: 2546 * "Setting a signal action to SIG_IGN for a signal that is 2547 * pending shall cause the pending signal to be discarded, 2548 * whether or not it is blocked." 2549 * 2550 * "Setting a signal action to SIG_DFL for a signal that is 2551 * pending and whose default action is to ignore the signal 2552 * (for example, SIGCHLD), shall cause the pending signal to 2553 * be discarded, whether or not it is blocked" 2554 */ 2555 if (sig_handler_ignored(sig_handler(t, sig), sig)) { 2556 sigemptyset(&mask); 2557 sigaddset(&mask, sig); 2558 rm_from_queue_full(&mask, &t->signal->shared_pending); 2559 do { 2560 rm_from_queue_full(&mask, &t->pending); 2561 t = next_thread(t); 2562 } while (t != current); 2563 } 2564 } 2565 2566 spin_unlock_irq(¤t->sighand->siglock); 2567 return 0; 2568 } 2569 2570 int 2571 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) 2572 { 2573 stack_t oss; 2574 int error; 2575 2576 oss.ss_sp = (void __user *) current->sas_ss_sp; 2577 oss.ss_size = current->sas_ss_size; 2578 oss.ss_flags = sas_ss_flags(sp); 2579 2580 if (uss) { 2581 void __user *ss_sp; 2582 size_t ss_size; 2583 int ss_flags; 2584 2585 error = -EFAULT; 2586 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) 2587 goto out; 2588 error = __get_user(ss_sp, &uss->ss_sp) | 2589 __get_user(ss_flags, &uss->ss_flags) | 2590 __get_user(ss_size, &uss->ss_size); 2591 if (error) 2592 goto out; 2593 2594 error = -EPERM; 2595 if (on_sig_stack(sp)) 2596 goto out; 2597 2598 error = -EINVAL; 2599 /* 2600 * Note - this code used to test ss_flags incorrectly: 2601 * old code may have been written using ss_flags==0 2602 * to mean ss_flags==SS_ONSTACK (as this was the only 2603 * way that worked) - this fix preserves that older 2604 * mechanism. 2605 */ 2606 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) 2607 goto out; 2608 2609 if (ss_flags == SS_DISABLE) { 2610 ss_size = 0; 2611 ss_sp = NULL; 2612 } else { 2613 error = -ENOMEM; 2614 if (ss_size < MINSIGSTKSZ) 2615 goto out; 2616 } 2617 2618 current->sas_ss_sp = (unsigned long) ss_sp; 2619 current->sas_ss_size = ss_size; 2620 } 2621 2622 error = 0; 2623 if (uoss) { 2624 error = -EFAULT; 2625 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) 2626 goto out; 2627 error = __put_user(oss.ss_sp, &uoss->ss_sp) | 2628 __put_user(oss.ss_size, &uoss->ss_size) | 2629 __put_user(oss.ss_flags, &uoss->ss_flags); 2630 } 2631 2632 out: 2633 return error; 2634 } 2635 2636 #ifdef __ARCH_WANT_SYS_SIGPENDING 2637 2638 /** 2639 * sys_sigpending - examine pending signals 2640 * @set: where mask of pending signal is returned 2641 */ 2642 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) 2643 { 2644 return do_sigpending(set, sizeof(*set)); 2645 } 2646 2647 #endif 2648 2649 #ifdef __ARCH_WANT_SYS_SIGPROCMASK 2650 /** 2651 * sys_sigprocmask - examine and change blocked signals 2652 * @how: whether to add, remove, or set signals 2653 * @set: signals to add or remove (if non-null) 2654 * @oset: previous value of signal mask if non-null 2655 * 2656 * Some platforms have their own version with special arguments; 2657 * others support only sys_rt_sigprocmask. 2658 */ 2659 2660 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, 2661 old_sigset_t __user *, oset) 2662 { 2663 int error; 2664 old_sigset_t old_set, new_set; 2665 2666 if (set) { 2667 error = -EFAULT; 2668 if (copy_from_user(&new_set, set, sizeof(*set))) 2669 goto out; 2670 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); 2671 2672 spin_lock_irq(¤t->sighand->siglock); 2673 old_set = current->blocked.sig[0]; 2674 2675 error = 0; 2676 switch (how) { 2677 default: 2678 error = -EINVAL; 2679 break; 2680 case SIG_BLOCK: 2681 sigaddsetmask(¤t->blocked, new_set); 2682 break; 2683 case SIG_UNBLOCK: 2684 sigdelsetmask(¤t->blocked, new_set); 2685 break; 2686 case SIG_SETMASK: 2687 current->blocked.sig[0] = new_set; 2688 break; 2689 } 2690 2691 recalc_sigpending(); 2692 spin_unlock_irq(¤t->sighand->siglock); 2693 if (error) 2694 goto out; 2695 if (oset) 2696 goto set_old; 2697 } else if (oset) { 2698 old_set = current->blocked.sig[0]; 2699 set_old: 2700 error = -EFAULT; 2701 if (copy_to_user(oset, &old_set, sizeof(*oset))) 2702 goto out; 2703 } 2704 error = 0; 2705 out: 2706 return error; 2707 } 2708 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 2709 2710 #ifdef __ARCH_WANT_SYS_RT_SIGACTION 2711 /** 2712 * sys_rt_sigaction - alter an action taken by a process 2713 * @sig: signal to be sent 2714 * @act: new sigaction 2715 * @oact: used to save the previous sigaction 2716 * @sigsetsize: size of sigset_t type 2717 */ 2718 SYSCALL_DEFINE4(rt_sigaction, int, sig, 2719 const struct sigaction __user *, act, 2720 struct sigaction __user *, oact, 2721 size_t, sigsetsize) 2722 { 2723 struct k_sigaction new_sa, old_sa; 2724 int ret = -EINVAL; 2725 2726 /* XXX: Don't preclude handling different sized sigset_t's. */ 2727 if (sigsetsize != sizeof(sigset_t)) 2728 goto out; 2729 2730 if (act) { 2731 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 2732 return -EFAULT; 2733 } 2734 2735 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 2736 2737 if (!ret && oact) { 2738 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 2739 return -EFAULT; 2740 } 2741 out: 2742 return ret; 2743 } 2744 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ 2745 2746 #ifdef __ARCH_WANT_SYS_SGETMASK 2747 2748 /* 2749 * For backwards compatibility. Functionality superseded by sigprocmask. 2750 */ 2751 SYSCALL_DEFINE0(sgetmask) 2752 { 2753 /* SMP safe */ 2754 return current->blocked.sig[0]; 2755 } 2756 2757 SYSCALL_DEFINE1(ssetmask, int, newmask) 2758 { 2759 int old; 2760 2761 spin_lock_irq(¤t->sighand->siglock); 2762 old = current->blocked.sig[0]; 2763 2764 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| 2765 sigmask(SIGSTOP))); 2766 recalc_sigpending(); 2767 spin_unlock_irq(¤t->sighand->siglock); 2768 2769 return old; 2770 } 2771 #endif /* __ARCH_WANT_SGETMASK */ 2772 2773 #ifdef __ARCH_WANT_SYS_SIGNAL 2774 /* 2775 * For backwards compatibility. Functionality superseded by sigaction. 2776 */ 2777 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) 2778 { 2779 struct k_sigaction new_sa, old_sa; 2780 int ret; 2781 2782 new_sa.sa.sa_handler = handler; 2783 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; 2784 sigemptyset(&new_sa.sa.sa_mask); 2785 2786 ret = do_sigaction(sig, &new_sa, &old_sa); 2787 2788 return ret ? ret : (unsigned long)old_sa.sa.sa_handler; 2789 } 2790 #endif /* __ARCH_WANT_SYS_SIGNAL */ 2791 2792 #ifdef __ARCH_WANT_SYS_PAUSE 2793 2794 SYSCALL_DEFINE0(pause) 2795 { 2796 current->state = TASK_INTERRUPTIBLE; 2797 schedule(); 2798 return -ERESTARTNOHAND; 2799 } 2800 2801 #endif 2802 2803 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND 2804 /** 2805 * sys_rt_sigsuspend - replace the signal mask for a value with the 2806 * @unewset value until a signal is received 2807 * @unewset: new signal mask value 2808 * @sigsetsize: size of sigset_t type 2809 */ 2810 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) 2811 { 2812 sigset_t newset; 2813 2814 /* XXX: Don't preclude handling different sized sigset_t's. */ 2815 if (sigsetsize != sizeof(sigset_t)) 2816 return -EINVAL; 2817 2818 if (copy_from_user(&newset, unewset, sizeof(newset))) 2819 return -EFAULT; 2820 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2821 2822 spin_lock_irq(¤t->sighand->siglock); 2823 current->saved_sigmask = current->blocked; 2824 current->blocked = newset; 2825 recalc_sigpending(); 2826 spin_unlock_irq(¤t->sighand->siglock); 2827 2828 current->state = TASK_INTERRUPTIBLE; 2829 schedule(); 2830 set_restore_sigmask(); 2831 return -ERESTARTNOHAND; 2832 } 2833 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ 2834 2835 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) 2836 { 2837 return NULL; 2838 } 2839 2840 void __init signals_init(void) 2841 { 2842 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); 2843 } 2844 2845 #ifdef CONFIG_KGDB_KDB 2846 #include <linux/kdb.h> 2847 /* 2848 * kdb_send_sig_info - Allows kdb to send signals without exposing 2849 * signal internals. This function checks if the required locks are 2850 * available before calling the main signal code, to avoid kdb 2851 * deadlocks. 2852 */ 2853 void 2854 kdb_send_sig_info(struct task_struct *t, struct siginfo *info) 2855 { 2856 static struct task_struct *kdb_prev_t; 2857 int sig, new_t; 2858 if (!spin_trylock(&t->sighand->siglock)) { 2859 kdb_printf("Can't do kill command now.\n" 2860 "The sigmask lock is held somewhere else in " 2861 "kernel, try again later\n"); 2862 return; 2863 } 2864 spin_unlock(&t->sighand->siglock); 2865 new_t = kdb_prev_t != t; 2866 kdb_prev_t = t; 2867 if (t->state != TASK_RUNNING && new_t) { 2868 kdb_printf("Process is not RUNNING, sending a signal from " 2869 "kdb risks deadlock\n" 2870 "on the run queue locks. " 2871 "The signal has _not_ been sent.\n" 2872 "Reissue the kill command if you want to risk " 2873 "the deadlock.\n"); 2874 return; 2875 } 2876 sig = info->si_signo; 2877 if (send_sig_info(sig, info, t)) 2878 kdb_printf("Fail to deliver Signal %d to process %d.\n", 2879 sig, t->pid); 2880 else 2881 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); 2882 } 2883 #endif /* CONFIG_KGDB_KDB */ 2884